Changeset 610 for trunk/kernel
- Timestamp:
- Dec 27, 2018, 7:38:58 PM (6 years ago)
- Location:
- trunk/kernel
- Files:
-
- 44 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/Makefile
r590 r610 138 138 build/syscalls/sys_condvar.o \ 139 139 build/syscalls/sys_barrier.o \ 140 build/syscalls/sys_mutex.o 141 142 SYS_OBJS_1 = build/syscalls/sys_ exit.o\140 build/syscalls/sys_mutex.o 141 142 SYS_OBJS_1 = build/syscalls/sys_rename.o \ 143 143 build/syscalls/sys_munmap.o \ 144 144 build/syscalls/sys_open.o \ … … 183 183 build/syscalls/sys_fg.o \ 184 184 build/syscalls/sys_is_fg.o 185 186 SYS_OBJS_5 = build/syscalls/sys_exit.o 185 187 186 188 VFS_OBJS = build/fs/vfs.o \ … … 292 294 $(SYS_OBJS_3) \ 293 295 $(SYS_OBJS_4) \ 296 $(SYS_OBJS_5) \ 294 297 $(HAL_ARCH)/kernel.ld 295 298 $(LD) -o $@ -T $(HAL_ARCH)/kernel.ld $(LIBGCC) \ … … 297 300 $(LIBK_OBJS) $(DRIVERS_OBJS) $(VFS_OBJS) \ 298 301 $(SYS_OBJS_0) $(SYS_OBJS_1) $(SYS_OBJS_2) \ 299 $(SYS_OBJS_3) $(SYS_OBJS_4) -lgcc302 $(SYS_OBJS_3) $(SYS_OBJS_4) $(SYS_OBJS_5) -lgcc 300 303 $(DU) -D $@ > $@.txt 301 304 -
trunk/kernel/fs/devfs.c
r602 r610 93 93 xptr_t unused_xp; // required by vfs_add_child_in_parent() 94 94 95 // create sDEVFS "dev" inode in cluster 095 // create DEVFS "dev" inode in cluster 0 96 96 error = vfs_add_child_in_parent( 0, // cxy 97 97 INODE_TYPE_DIR, … … 102 102 devfs_dev_inode_xp ); 103 103 104 assert( (error == 0) , "cannot create <dev>\n" ); 104 // check success 105 assert( (error == 0) , "cannot create <dev>\n" ); 105 106 106 107 #if DEBUG_DEVFS_INIT … … 149 150 // create "internal" directory 150 151 snprintf( node_name , 16 , "internal_%x" , local_cxy ); 152 151 153 vfs_add_child_in_parent( local_cxy, 152 154 INODE_TYPE_DIR, … … 169 171 { 170 172 chdev_ptr = GET_PTR( chdev_xp ); 173 chdev_cxy = GET_CXY( chdev_xp ); 174 175 assert( (chdev_cxy == local_cxy ), 176 "illegal MMC chdev_xp in cluster %x\n", local_cxy ); 177 171 178 vfs_add_child_in_parent( local_cxy, 172 179 INODE_TYPE_DEV, … … 198 205 { 199 206 chdev_ptr = GET_PTR( chdev_xp ); 207 chdev_cxy = GET_CXY( chdev_xp ); 208 209 assert( (chdev_cxy == local_cxy ), 210 "illegal DMA[%d] chdev_xp in cluster %x\n", channel, local_cxy ); 211 200 212 vfs_add_child_in_parent( local_cxy, 201 213 INODE_TYPE_DEV, … … 226 238 chdev_cxy = GET_CXY( chdev_xp ); 227 239 chdev_ptr = GET_PTR( chdev_xp ); 240 228 241 if( chdev_cxy == local_cxy ) 229 242 { … … 256 269 chdev_cxy = GET_CXY( chdev_xp ); 257 270 chdev_ptr = GET_PTR( chdev_xp ); 271 258 272 if( chdev_cxy == local_cxy ) 259 273 { … … 288 302 chdev_cxy = GET_CXY( chdev_xp ); 289 303 chdev_ptr = GET_PTR( chdev_xp ); 304 290 305 if( chdev_cxy == local_cxy ) 291 306 { … … 321 336 chdev_cxy = GET_CXY( chdev_xp ); 322 337 chdev_ptr = GET_PTR( chdev_xp ); 338 323 339 if( chdev_cxy == local_cxy ) 324 340 { … … 354 370 chdev_cxy = GET_CXY( chdev_xp ); 355 371 chdev_ptr = GET_PTR( chdev_xp ); 372 356 373 if( chdev_cxy == local_cxy ) 357 374 { … … 387 404 chdev_cxy = GET_CXY( chdev_xp ); 388 405 chdev_ptr = GET_PTR( chdev_xp ); 406 389 407 if( chdev_cxy == local_cxy ) 390 408 { … … 419 437 { 420 438 chdev_cxy = GET_CXY( chdev_xp ); 421 chdev_ptr = (chdev_t *)GET_PTR( chdev_xp ); 439 chdev_ptr = GET_PTR( chdev_xp ); 440 422 441 if( chdev_cxy == local_cxy ) 423 442 { … … 447 466 chdev_cxy = GET_CXY( chdev_xp ); 448 467 chdev_ptr = GET_PTR( chdev_xp ); 468 449 469 if( chdev_cxy == local_cxy ) 450 470 { -
trunk/kernel/fs/fatfs.c
r602 r610 1246 1246 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name ); 1247 1247 if( DEBUG_FATFS_REMOVE_DENTRY < cycle ) 1248 printk("\n[%s] 1248 printk("\n[%s] thread[%x,%x] enter / parent <%s> / child <%s> / cycle %d\n", 1249 1249 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle ); 1250 1250 #endif … … 1280 1280 uint32_t page_id = dentry_id >> 7; 1281 1281 uint32_t offset = (dentry_id & 0x7F)<<5; 1282 1283 #if DEBUG_FATFS_REMOVE_DENTRY & 1 1284 if( DEBUG_FATFS_REMOVE_DENTRY < cycle ) 1285 printk("\n[%s] dentry_id %x / page_id %x / offset %x\n", 1286 __FUNCTION__, dentry_id, page_id, offset ); 1287 #endif 1282 1288 1283 1289 // get extended pointer on page descriptor from parent directory mapper … … 1345 1351 cycle = (uint32_t)hal_get_cycles(); 1346 1352 if( DEBUG_FATFS_REMOVE_DENTRY < cycle ) 1347 printk("\n[%s] 1353 printk("\n[%s] thread[%x,%x] exit / parent %s / child %s / cycle %d\n", 1348 1354 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle ); 1349 1355 #endif … … 1358 1364 xptr_t child_inode_xp ) 1359 1365 { 1360 // Two embedded loops :1366 // Two embedded loops to scan the directory mapper: 1361 1367 // - scan the parent directory mapper pages 1362 1368 // - scan the directory entries in each 4 Kbytes page … … 1512 1518 // get child inode cluster and local pointer 1513 1519 cxy_t inode_cxy = GET_CXY( child_inode_xp ); 1514 vfs_inode_t * inode_ptr = (vfs_inode_t *)GET_PTR( child_inode_xp ); 1520 vfs_inode_t * inode_ptr = GET_PTR( child_inode_xp ); 1521 1522 // build extended pointer on parent dentried root 1523 xptr_t parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents ); 1524 1525 // check child inode has at least one parent 1526 assert( (xlist_is_empty( parents_root_xp ) == false ), "child inode must have one parent\n"); 1515 1527 1516 1528 // get dentry pointers and cluster 1517 xptr_t dentry_xp = hal_remote_l64( XPTR( inode_cxy , &inode_ptr->parent_xp ) );1529 xptr_t dentry_xp = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents ); 1518 1530 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp ); 1519 1531 cxy_t dentry_cxy = GET_CXY( dentry_xp ); 1520 1532 1521 // dentry descriptor must bein same cluster as parent inode1533 // check dentry descriptor in same cluster as parent inode 1522 1534 assert( (dentry_cxy == local_cxy) , "illegal dentry cluster\n" ); 1523 1535 -
trunk/kernel/fs/fatfs.h
r602 r610 311 311 * This function implements the generic vfs_fs_child_init() function for the FATFS. 312 312 ***************************************************************************************** 313 * It scan the mapper of an existing parent directory, identified by the <parent> 314 * argument, to find a directory entry identified by the <name> argument. 315 * It updates the existing remote child inode, identified by the <child_xp> argument, 313 * It tries to initialise a new child (new inode/dentry couple in Inode Tree), identified 314 * by the <child_inode_xp> argument, from the parent directory mapper, identified by the 315 * <parent_inode> argument. 316 * - It scan the parent mapper to find the <name> argument. 316 317 * - it set the "type", "size", and "extend" fields in inode descriptor. 317 318 * - it set the " extend" field in dentry descriptor. -
trunk/kernel/fs/vfs.c
r602 r610 141 141 } 142 142 143 ////////////////////////////////////////////////////// 144 error_t vfs_inode_create( xptr_t dentry_xp, 145 vfs_fs_type_t fs_type, 143 //////////////////////////////////////////////////// 144 error_t vfs_inode_create( vfs_fs_type_t fs_type, 146 145 vfs_inode_type_t inode_type, 147 146 uint32_t attr, … … 212 211 vfs_ctx_inum_release( ctx , inum ); 213 212 mapper_destroy( mapper ); 214 return ENOMEM;213 return -1; 215 214 } 216 215 217 216 // initialize inode descriptor 218 inode->gc = 0;219 217 inode->type = inode_type; 220 218 inode->inum = inum; … … 223 221 inode->uid = uid; 224 222 inode->gid = gid; 225 inode->refcount = 0;226 inode->parent_xp = dentry_xp;227 223 inode->ctx = ctx; 228 224 inode->mapper = mapper; 229 225 inode->extend = NULL; 226 inode->links = 0; 230 227 231 228 // initialise inode field in mapper … … 233 230 234 231 // initialise threads waiting queue 235 xlist_root_init( XPTR( local_cxy , &inode->wait_root ) );236 237 // initialize dentries hash table232 // xlist_root_init( XPTR( local_cxy , &inode->wait_root ) ); 233 234 // initialize chidren dentries xhtab 238 235 xhtab_init( &inode->children , XHTAB_DENTRY_TYPE ); 239 236 240 // initialize inode lock 241 remote_rwlock_init( XPTR( local_cxy , &inode->data_lock ), LOCK_VFS_INODE ); 242 243 // initialise lock protecting inode three traversal 244 remote_busylock_init( XPTR( local_cxy , &inode->main_lock ), LOCK_VFS_MAIN ); 237 // initialize parents dentries xlist 238 xlist_root_init( XPTR( local_cxy , &inode->parents ) ); 239 240 // initialize lock protecting size 241 remote_rwlock_init( XPTR( local_cxy , &inode->size_lock ), LOCK_VFS_SIZE ); 242 243 // initialise lock protecting inode tree traversal 244 remote_rwlock_init( XPTR( local_cxy , &inode->main_lock ), LOCK_VFS_MAIN ); 245 246 // return extended pointer on inode 247 *inode_xp = XPTR( local_cxy , inode ); 245 248 246 249 #if DEBUG_VFS_INODE_CREATE … … 251 254 #endif 252 255 253 // return extended pointer on inode254 *inode_xp = XPTR( local_cxy , inode );255 256 return 0; 256 257 … … 260 261 void vfs_inode_destroy( vfs_inode_t * inode ) 261 262 { 262 263 // check inode refcount264 assert( (inode->refcount == 0) , "inode refcount non zero\n" );265 266 263 // release memory allocated for mapper 267 264 mapper_destroy( inode->mapper ); … … 275 272 } // end vfs_inode_destroy() 276 273 277 ////////////////////////////////////////////278 void vfs_inode_remote_up( xptr_t inode_xp )279 {280 // get inode cluster and local pointer281 cxy_t inode_cxy = GET_CXY( inode_xp );282 vfs_inode_t * inode_ptr = GET_PTR( inode_xp );283 284 hal_remote_atomic_add( XPTR( inode_cxy , &inode_ptr->refcount ) , 1 );285 }286 287 //////////////////////////////////////////////288 void vfs_inode_remote_down( xptr_t inode_xp )289 {290 // get inode cluster and local pointer291 cxy_t inode_cxy = GET_CXY( inode_xp );292 vfs_inode_t * inode_ptr = GET_PTR( inode_xp );293 294 hal_remote_atomic_add( XPTR( inode_cxy , &inode_ptr->refcount ) , -1 );295 }296 297 274 ////////////////////////////////////////////// 298 275 uint32_t vfs_inode_get_size( xptr_t inode_xp ) … … 303 280 304 281 // get size 305 remote_rwlock_rd_acquire( XPTR( cxy , &ptr-> data_lock ) );282 remote_rwlock_rd_acquire( XPTR( cxy , &ptr->size_lock ) ); 306 283 uint32_t size = hal_remote_l32( XPTR( cxy , &ptr->size ) ); 307 remote_rwlock_rd_release( XPTR( cxy , &ptr-> data_lock ) );284 remote_rwlock_rd_release( XPTR( cxy , &ptr->size_lock ) ); 308 285 return size; 309 286 } … … 318 295 319 296 // set size 320 remote_rwlock_wr_release( XPTR( cxy , &ptr-> data_lock ) );297 remote_rwlock_wr_release( XPTR( cxy , &ptr->size_lock ) ); 321 298 hal_remote_s32( XPTR( cxy , &ptr->size ) , size ); 322 remote_rwlock_wr_release( XPTR( cxy , &ptr-> data_lock ) );299 remote_rwlock_wr_release( XPTR( cxy , &ptr->size_lock ) ); 323 300 } 324 301 … … 345 322 } 346 323 347 ///////////////////////////////////////// 348 void vfs_inode_get_name( xptr_t inode_xp, 349 char * name ) 350 { 351 cxy_t inode_cxy; 352 vfs_inode_t * inode_ptr; 353 xptr_t dentry_xp; 354 cxy_t dentry_cxy; 355 vfs_dentry_t * dentry_ptr; 324 /////////////////////////////////////////// 325 void vfs_inode_get_name( xptr_t inode_xp, 326 char * name ) 327 { 328 cxy_t inode_cxy; // inode cluster identifier 329 vfs_inode_t * inode_ptr; // local pointer on inode 330 xptr_t parents_root_xp; // extended pointer on inode parents root 356 331 357 332 // get inode cluster and local pointer … … 359 334 inode_ptr = GET_PTR( inode_xp ); 360 335 361 // get parent dentry362 dentry_xp = hal_remote_l64( XPTR( inode_cxy , &inode_ptr->parent_xp ));363 364 // get local copy of name365 if( dentry_xp == XPTR_NULL ) // itis the VFS root336 // build extended pointer on parents dentries root 337 parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents ); 338 339 // check VFS root 340 if( xlist_is_empty( parents_root_xp ) ) // inode is the VFS root 366 341 { 367 342 strcpy( name , "/" ); 368 343 } 369 else // not the VFS root 370 { 344 else // not the VFS root 345 { 346 xptr_t dentry_xp; 347 cxy_t dentry_cxy; 348 vfs_dentry_t * dentry_ptr; 349 350 // get first name in list of parents 351 dentry_xp = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents ); 371 352 dentry_cxy = GET_CXY( dentry_xp ); 372 353 dentry_ptr = GET_PTR( dentry_xp ); 373 354 374 355 hal_remote_strcpy( XPTR( local_cxy , name ) , 375 XPTR( dentry_cxy , &dentry_ptr->name ) ); 376 } 356 XPTR( dentry_cxy , dentry_ptr->name ) ); 357 } 358 377 359 } // end vfs_inode_get_name() 378 360 … … 433 415 error_t vfs_dentry_create( vfs_fs_type_t fs_type, 434 416 char * name, 435 vfs_inode_t * parent,436 417 xptr_t * dentry_xp ) 437 418 { … … 439 420 vfs_dentry_t * dentry; // dentry descriptor (to be allocated) 440 421 kmem_req_t req; // request to kernel memory allocator 441 error_t error;442 422 443 423 #if DEBUG_VFS_DENTRY_CREATE … … 456 436 { 457 437 ctx = NULL; 458 return EINVAL;438 return -1; 459 439 } 460 440 … … 470 450 dentry = (vfs_dentry_t *)kmem_alloc( &req ); 471 451 472 if( dentry == NULL ) return ENOMEM; 452 if( dentry == NULL ) 453 { 454 printk("\n[ERROR] in %s : cannot allocate dentry descriptor\n", 455 __FUNCTION__ ); 456 return -1; 457 } 473 458 474 459 // initialize dentry descriptor 475 476 460 dentry->ctx = ctx; 477 461 dentry->length = length; 478 dentry->parent = parent;479 462 dentry->extend = NULL; 480 463 strcpy( dentry->name , name ); 481 482 #if( DEBUG_VFS_DENTRY_CREATE & 1 )483 cycle = (uint32_t)hal_get_cycles();484 if( DEBUG_VFS_DENTRY_CREATE < cycle )485 printk("\n[%s] thread[%x,%x] / dentry <%s> initialised / cycle %d\n",486 __FUNCTION__, this->process->pid, this->trdid, dentry->name, cycle );487 #endif488 489 // register dentry in hash table rooted in parent inode490 error = xhtab_insert( XPTR( local_cxy , &parent->children ),491 name,492 XPTR( local_cxy , &dentry->list ) );493 494 if( error ) return EINVAL;495 496 #if( DEBUG_VFS_DENTRY_CREATE & 1 )497 cycle = (uint32_t)hal_get_cycles();498 if( DEBUG_VFS_DENTRY_CREATE < cycle )499 printk("\n[%s] thread[%x,%x] / dentry <%s> registered / cycle %d\n",500 __FUNCTION__, this->process->pid, this->trdid, dentry->name, cycle );501 #endif502 464 503 465 // return extended pointer on dentry … … 518 480 void vfs_dentry_destroy( vfs_dentry_t * dentry ) 519 481 { 520 521 // check dentry refcount522 assert( (dentry->refcount == 0) , "dentry refcount non zero\n" );523 524 // get pointer on parent inode525 vfs_inode_t * parent = dentry->parent;526 527 // remove this dentry from parent inode htab528 xhtab_remove( XPTR( local_cxy , &parent->children ),529 dentry->name,530 XPTR( local_cxy , &dentry->list ) );531 532 482 // release memory allocated to dentry 533 483 kmem_req_t req; … … 537 487 538 488 } // end vfs_dentry_destroy() 539 540 //////////////////////////////////////////////541 void vfs_dentry_remote_up( xptr_t dentry_xp )542 {543 // get dentry cluster and local pointer544 cxy_t dentry_cxy = GET_CXY( dentry_xp );545 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );546 547 hal_remote_atomic_add( XPTR( dentry_cxy , &dentry_ptr->refcount ) , 1 );548 }549 550 ////////////////////////////////////////////////551 void vfs_dentry_remote_down( xptr_t dentry_xp )552 {553 // get dentry cluster and local pointer554 cxy_t dentry_cxy = GET_CXY( dentry_xp );555 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );556 557 hal_remote_atomic_add( XPTR( dentry_cxy , &dentry_ptr->refcount ) , -1 );558 }559 560 489 561 490 … … 616 545 void vfs_file_destroy( vfs_file_t * file ) 617 546 { 618 if( file->refcount ) 619 { 620 assert( false , "refcount non zero\n" ); 621 } 547 548 // check refcount 549 assert( (file->refcount == 0) , "refcount non zero\n" ); 622 550 623 551 kmem_req_t req; … … 664 592 665 593 ////////////////////////////////////// 666 error_t vfs_open( process_t * process,594 error_t vfs_open( xptr_t root_xp, 667 595 char * path, 596 xptr_t process_xp, 668 597 uint32_t flags, 669 598 uint32_t mode, … … 671 600 uint32_t * new_file_id ) 672 601 { 673 error_t error; 674 xptr_t inode_xp; // extended pointer on target inode 675 cxy_t inode_cxy; // inode cluster identifier 676 vfs_inode_t * inode_ptr; // inode local pointer 677 uint32_t file_attr; // file descriptor attributes 678 uint32_t lookup_mode; // lookup working mode 679 xptr_t file_xp; // extended pointer on created file descriptor 680 uint32_t file_id; // created file descriptor index in reference fd_array 681 602 error_t error; 603 xptr_t inode_xp; // extended pointer on target inode 604 cxy_t inode_cxy; // inode cluster identifier 605 vfs_inode_t * inode_ptr; // inode local pointer 606 uint32_t file_attr; // file descriptor attributes 607 uint32_t lookup_mode; // lookup working mode 608 xptr_t file_xp; // extended pointer on created file descriptor 609 uint32_t file_id; // created file descriptor index in reference fd_array 610 xptr_t vfs_root_xp; // extended pointer on VFS root inode 611 vfs_inode_t * vfs_root_ptr; // local pointer on VFS root inode 612 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 613 xptr_t lock_xp; // extended pointer on Inode Tree lock 682 614 683 615 if( mode != 0 ) … … 687 619 } 688 620 621 thread_t * this = CURRENT_THREAD; 622 process_t * process = this->process; 623 689 624 #if DEBUG_VFS_OPEN 690 thread_t * this = CURRENT_THREAD;691 625 uint32_t cycle = (uint32_t)hal_get_cycles(); 692 626 if( DEBUG_VFS_OPEN < cycle ) 693 printk("\n[%s] thread[%x,%x] enter for <%s>/ cycle %d\n",694 __FUNCTION__, this->process->pid, this->trdid, path, cycle );627 printk("\n[%s] thread[%x,%x] enter for <%s> / root_inode (%x,%x) / cycle %d\n", 628 __FUNCTION__, process->pid, this->trdid, path, GET_CXY(root_xp), GET_PTR(root_xp), cycle ); 695 629 #endif 696 630 … … 709 643 if( (flags & O_CLOEXEC) ) file_attr |= FD_ATTR_CLOSE_EXEC; 710 644 645 // build extended pointer on lock protecting Inode Tree 646 vfs_root_xp = process->vfs_root_xp; 647 vfs_root_ptr = GET_PTR( vfs_root_xp ); 648 vfs_root_cxy = GET_CXY( vfs_root_xp ); 649 lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock ); 650 651 // take lock protecting Inode Tree in read mode 652 remote_rwlock_rd_acquire( lock_xp ); 653 711 654 // get extended pointer on target inode 712 error = vfs_lookup( process->vfs_cwd_xp , path , lookup_mode , &inode_xp ); 713 714 if( error ) return error; 655 error = vfs_lookup( root_xp, 656 path, 657 lookup_mode, 658 &inode_xp, 659 NULL ); 660 661 // release lock protecting Inode Tree 662 remote_rwlock_rd_release( lock_xp ); 663 664 if( error ) 665 { 666 printk("\n[ERROR] in %s : cannot get inode <%s>\n", 667 __FUNCTION__ , path ); 668 return -1; 669 } 715 670 716 671 // get target inode cluster and local pointer … … 718 673 inode_ptr = GET_PTR( inode_xp ); 719 674 675 #if (DEBUG_VFS_OPEN & 1) 676 cycle = (uint32_t)hal_get_cycles(); 677 if( DEBUG_VFS_OPEN < cycle ) 678 printk("\n[%s] thread[%x,%x] found inode(%x,%x) for <%s>\n", 679 __FUNCTION__, process->pid, this->trdid, inode_cxy, inode_ptr, path ); 680 #endif 681 720 682 // create a new file descriptor in cluster containing inode 721 683 if( inode_cxy == local_cxy ) // target cluster is local … … 730 692 if( error ) return error; 731 693 694 #if (DEBUG_VFS_OPEN & 1) 695 cycle = (uint32_t)hal_get_cycles(); 696 if( DEBUG_VFS_OPEN < cycle ) 697 printk("\n[%s] thread[%x,%x] created file descriptor (%x,%x) for <%s>\n", 698 __FUNCTION__, process->pid, this->trdid, GET_CXY(file_xp), GET_PTR(file_xp), path ); 699 #endif 700 732 701 // allocate and register a new file descriptor index in reference process 733 error = process_fd_register( process , file_xp , &file_id );702 error = process_fd_register( process_xp , file_xp , &file_id ); 734 703 735 704 if( error ) return error; … … 738 707 cycle = (uint32_t)hal_get_cycles(); 739 708 if( DEBUG_VFS_OPEN < cycle ) 740 printk("\n[%s] 741 __FUNCTION__, this->process->pid, this->trdid, path, file_id, GET_CXY( file_xp ), cycle );709 printk("\n[%s] thread[%x,%x] exit for <%s> / fdid %d / cluster %x / cycle %d\n", 710 __FUNCTION__, process->pid, this->trdid, path, file_id, GET_CXY( file_xp ), cycle ); 742 711 #endif 743 712 … … 780 749 781 750 // move data between mapper and buffer 782 if( file_cxy == local_cxy ) 783 { 784 error = mapper_move_user( mapper, 785 to_buffer, 786 file_offset, 787 buffer, 788 size ); 789 } 790 else 791 { 792 rpc_mapper_move_user_client( file_cxy, 793 mapper, 794 to_buffer, 795 file_offset, 796 buffer, 797 size, 798 &error ); 799 } 751 error = mapper_move_user( XPTR( file_cxy , mapper ), 752 to_buffer, 753 file_offset, 754 buffer, 755 size ); 800 756 801 757 // update file offset in file descriptor … … 834 790 // get inode type from remote file descriptor 835 791 inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 836 792 837 793 // action depends on inode type 838 794 if( inode_type == INODE_TYPE_FILE ) … … 1044 1000 1045 1001 //////////////////////////////////// 1046 error_t vfs_unlink( xptr_t cwd_xp, 1002 error_t vfs_mkdir( xptr_t root_xp, 1003 char * path, 1004 uint32_t rights ) 1005 { 1006 error_t error; 1007 xptr_t vfs_root_xp; // extended pointer on VFS root inode 1008 vfs_inode_t * vfs_root_ptr; // local pointer on VFS root inode 1009 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 1010 xptr_t lock_xp; // extended pointer on lock protecting Inode Tree 1011 xptr_t inode_xp; // extended pointer on target inode 1012 vfs_inode_t * inode_ptr; // local pointer on target inode 1013 cxy_t inode_cxy; // target inode cluster identifier 1014 xptr_t dentry_xp; // extended pointer on new dentry 1015 vfs_dentry_t * dentry_ptr; // target dentry local pointer 1016 xptr_t parent_xp; // extended pointer on new parent inode 1017 vfs_inode_t * parent_ptr; // local pointer on new parent inode 1018 cxy_t parent_cxy; // new parent inode cluster identifier 1019 vfs_ctx_t * parent_ctx_ptr; // local pointer on target inode context 1020 uint32_t parent_fs_type; // target inode file system type 1021 1022 xptr_t parents_root_xp; // extended pointer on parents field in inode (root) 1023 xptr_t parents_entry_xp; // extended pointer on parents field in dentry 1024 xptr_t children_xhtab_xp; // extended pointer on children field in inode (root) 1025 xptr_t children_entry_xp; // extended pointer on children field in dentry 1026 1027 char last_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1028 1029 thread_t * this = CURRENT_THREAD; 1030 process_t * process = this->process; 1031 1032 #if DEBUG_VFS_MKDIR 1033 char root_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1034 vfs_inode_get_name( root_xp , root_name ); 1035 uint32_t cycle = (uint32_t)hal_get_cycles(); 1036 if( DEBUG_VFS_MKDIR < cycle ) 1037 printk("\n[%s] thread[%x,%x] enter / root <%s> / path <%s> / cycle %d\n", 1038 __FUNCTION__, process->pid, this->trdid, root_name, path, cycle ); 1039 #endif 1040 1041 // build extended pointer on lock protecting Inode Tree (in VFS root inode) 1042 vfs_root_xp = process->vfs_root_xp; 1043 vfs_root_ptr = GET_PTR( vfs_root_xp ); 1044 vfs_root_cxy = GET_CXY( vfs_root_xp ); 1045 lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock ); 1046 1047 // take the lock protecting Inode Tree in write mode 1048 remote_rwlock_wr_acquire( lock_xp ); 1049 1050 // 1. get pointers on parent inode 1051 error = vfs_lookup( root_xp, 1052 path, 1053 VFS_LOOKUP_DIR | VFS_LOOKUP_PARENT, 1054 &parent_xp, 1055 last_name ); 1056 if( error ) 1057 { 1058 remote_rwlock_wr_release( lock_xp ); 1059 printk("\n[ERROR] in %s : cannot get parent inode for <%s>\n", 1060 __FUNCTION__, path ); 1061 return -1; 1062 } 1063 1064 // get parent inode cluster and local pointer 1065 parent_cxy = GET_CXY( parent_xp ); 1066 parent_ptr = GET_PTR( parent_xp ); 1067 1068 #if( DEBUG_VFS_MKDIR & 1 ) 1069 if( DEBUG_VFS_MKDIR < cycle ) 1070 printk("\n[%s] thread[%x,%x] get parent inode (%x,%x) for <%s>\n", 1071 __FUNCTION__, process->pid, this->trdid, parent_cxy, parent_ptr, path ); 1072 #endif 1073 1074 // get parent inode context, and FS type 1075 parent_ctx_ptr = hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->ctx ) ); 1076 parent_fs_type = hal_remote_l32( XPTR( parent_cxy , &parent_ctx_ptr->type ) ); 1077 1078 // 2. create one new dentry in parent cluster 1079 if( parent_cxy == local_cxy ) 1080 { 1081 error = vfs_dentry_create( parent_fs_type, 1082 last_name, 1083 &dentry_xp ); 1084 } 1085 else 1086 { 1087 rpc_vfs_dentry_create_client( parent_cxy, 1088 parent_fs_type, 1089 last_name, 1090 &dentry_xp, 1091 &error ); 1092 } 1093 1094 if( error ) 1095 { 1096 remote_rwlock_wr_release( lock_xp ); 1097 printk("\n[ERROR] in %s : cannot create new dentry in cluster %x for <%s>\n", 1098 __FUNCTION__, parent_cxy, path ); 1099 return -1; 1100 } 1101 1102 // get local pointer on dentry 1103 dentry_ptr = GET_PTR( dentry_xp ); 1104 1105 #if( DEBUG_VFS_MKDIR & 1 ) 1106 if( DEBUG_VFS_MKDIR < cycle ) 1107 printk("\n[%s] thread[%x,%x] created new dentry (%x,%x) for <%s>\n", 1108 __FUNCTION__, process->pid, this->trdid, parent_cxy, dentry_ptr, path ); 1109 #endif 1110 1111 // 3. create new directory inode in child cluster 1112 // TODO : define attr / uid / gid 1113 uint32_t attr = 0; 1114 uint32_t uid = 0; 1115 uint32_t gid = 0; 1116 1117 // select a target cluster for new inode 1118 inode_cxy = cluster_random_select(); 1119 1120 if( inode_cxy == local_cxy ) // child cluster is local 1121 { 1122 error = vfs_inode_create( parent_fs_type, 1123 INODE_TYPE_DIR, 1124 attr, 1125 rights, 1126 uid, 1127 gid, 1128 &inode_xp ); 1129 } 1130 else // child cluster is remote 1131 { 1132 rpc_vfs_inode_create_client( inode_cxy, 1133 parent_fs_type, 1134 INODE_TYPE_DIR, 1135 attr, 1136 rights, 1137 uid, 1138 gid, 1139 &inode_xp, 1140 &error ); 1141 } 1142 1143 if( error ) 1144 { 1145 printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n", 1146 __FUNCTION__ , inode_cxy , path ); 1147 1148 if( parent_cxy == local_cxy ) vfs_dentry_destroy( dentry_ptr ); 1149 else rpc_vfs_dentry_destroy_client( parent_cxy , dentry_ptr ); 1150 return -1; 1151 } 1152 1153 // get new inode local pointer 1154 inode_ptr = GET_PTR( inode_xp ); 1155 1156 #if(DEBUG_VFS_MKDIR & 1) 1157 if( DEBUG_VFS_MKDIR < cycle ) 1158 printk("\n[%s] thread[%x,%x] created new inode (%x,%x) for <%s>\n", 1159 __FUNCTION__ , process->pid, this->trdid, inode_cxy, inode_ptr, path ); 1160 #endif 1161 1162 // 4. register dentry in new inode list of parents 1163 parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents ); 1164 parents_entry_xp = XPTR( parent_cxy , &dentry_ptr->parents ); 1165 xlist_add_first( parents_root_xp , parents_entry_xp ); 1166 hal_remote_atomic_add( XPTR( inode_cxy , &inode_ptr->links ) , 1 ); 1167 1168 // 5. register dentry in parent inode 1169 children_xhtab_xp = XPTR( parent_cxy , &parent_ptr->children ); 1170 children_entry_xp = XPTR( parent_cxy , &dentry_ptr->children ); 1171 xhtab_insert( children_xhtab_xp , last_name , children_entry_xp ); 1172 1173 // 6. update "parent" and "child_xp" fields in dentry 1174 hal_remote_s64( XPTR( parent_cxy , &dentry_ptr->child_xp ) , inode_xp ); 1175 hal_remote_spt( XPTR( parent_cxy , &dentry_ptr->parent ) , parent_ptr ); 1176 1177 #if(DEBUG_VFS_MKDIR & 1) 1178 if( DEBUG_VFS_MKDIR < cycle ) 1179 printk("\n[%s] thread[%x,%x] updated Inode Tree for <%s>\n", 1180 __FUNCTION__, process->pid, this->trdid, path ); 1181 #endif 1182 1183 // release the lock protecting Inode Tree 1184 remote_rwlock_wr_release( lock_xp ); 1185 1186 // 5. update parent directory mapper 1187 // and synchronize the parent directory on IOC device 1188 if (parent_cxy == local_cxy) 1189 { 1190 error = vfs_fs_add_dentry( parent_ptr, 1191 dentry_ptr ); 1192 } 1193 else 1194 { 1195 rpc_vfs_fs_add_dentry_client( parent_cxy, 1196 parent_ptr, 1197 dentry_ptr, 1198 &error ); 1199 } 1200 1201 if( error ) 1202 { 1203 printk("\n[ERROR] in %s : cannot update parent directory for <%s>\n", 1204 __FUNCTION__, path ); 1205 return -1; 1206 } 1207 1208 #if(DEBUG_VFS_MKDIR & 1) 1209 if( DEBUG_VFS_MKDIR < cycle ) 1210 printk("\n[%s] thread[%x,%x] updated parent dir (mapper and IOC) for <%s>\n", 1211 __FUNCTION__, process->pid, this->trdid, path ); 1212 #endif 1213 1214 return 0; 1215 1216 } // end vfs_mkdir() 1217 1218 /////////////////////////////////////// 1219 error_t vfs_link( xptr_t old_root_xp, 1220 char * old_path, 1221 xptr_t new_root_xp, 1222 char * new_path ) 1223 { 1224 error_t error; 1225 xptr_t vfs_root_xp; // extended pointer on VFS root inode 1226 vfs_inode_t * vfs_root_ptr; // local pointer on VFS root inode 1227 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 1228 xptr_t lock_xp; // extended pointer on lock protecting Inode Tree 1229 xptr_t inode_xp; // extended pointer on target inode 1230 vfs_inode_t * inode_ptr; // local pointer on target inode 1231 cxy_t inode_cxy; // target inode cluster identifier 1232 uint32_t inode_type; // target inode type 1233 vfs_ctx_t * inode_ctx_ptr; // local pointer on target inode context 1234 uint32_t inode_fs_type; // target inode file system type 1235 xptr_t dentry_xp; // extended pointer on new dentry 1236 vfs_dentry_t * dentry_ptr; // target dentry local pointer 1237 xptr_t new_parent_xp; // extended pointer on new parent inode 1238 vfs_inode_t * new_parent_ptr; // local pointer on new parent inode 1239 cxy_t new_parent_cxy; // new parent inode cluster identifier 1240 1241 xptr_t parents_root_xp; // extended pointer on parents field in inode (root) 1242 xptr_t parents_entry_xp; // extended pointer on parents field in dentry 1243 xptr_t children_xhtab_xp; // extended pointer on children field in inode (root) 1244 xptr_t children_entry_xp; // extended pointer on children field in dentry 1245 1246 char new_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1247 1248 thread_t * this = CURRENT_THREAD; 1249 process_t * process = this->process; 1250 1251 #if DEBUG_VFS_LINK 1252 char old_root_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1253 char new_root_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1254 vfs_inode_get_name( old_root_xp , old_root_name ); 1255 vfs_inode_get_name( new_root_xp , new_root_name ); 1256 uint32_t cycle = (uint32_t)hal_get_cycles(); 1257 if( DEBUG_VFS_LINK < cycle ) 1258 printk("\n[%s] thread[%x,%x] enter / old_root <%s> / old_path <%s> / " 1259 "new_root <%s> / new_path <%s> / cycle %d\n", 1260 __FUNCTION__, process->pid, this->trdid, 1261 old_root_name, old_path, new_root_name, new_path, cycle ); 1262 #endif 1263 1264 // build extended pointer on lock protecting Inode Tree (in VFS root inode) 1265 vfs_root_xp = process->vfs_root_xp; 1266 vfs_root_ptr = GET_PTR( vfs_root_xp ); 1267 vfs_root_cxy = GET_CXY( vfs_root_xp ); 1268 lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock ); 1269 1270 // take the lock protecting Inode Tree in write mode 1271 remote_rwlock_wr_acquire( lock_xp ); 1272 1273 // get extended pointer on target inode 1274 error = vfs_lookup( old_root_xp, 1275 old_path, 1276 0, 1277 &inode_xp, 1278 NULL ); 1279 if( error ) 1280 { 1281 remote_rwlock_wr_release( lock_xp ); 1282 printk("\n[ERROR] in %s : cannot get target inode for <%s>\n", 1283 __FUNCTION__, old_path ); 1284 return -1; 1285 } 1286 1287 #if( DEBUG_VFS_LINK & 1 ) 1288 if( DEBUG_VFS_LINK < cycle ) 1289 printk("\n[%s] thread[%x,%x] get child inode (%x,%x) for <%s>\n", 1290 __FUNCTION__, process->pid, this->trdid, 1291 GET_CXY(inode_xp), GET_PTR(inode_xp), old_path, cycle ); 1292 #endif 1293 1294 // get extended pointer on parent inode in new path 1295 error = vfs_lookup( new_root_xp, 1296 new_path, 1297 VFS_LOOKUP_PARENT, 1298 &new_parent_xp, 1299 new_name ); 1300 if( error ) 1301 { 1302 remote_rwlock_wr_release( lock_xp ); 1303 printk("\n[ERROR] in %s : cannot get parent inode for <%s>\n", 1304 __FUNCTION__, new_path ); 1305 return -1; 1306 } 1307 1308 #if( DEBUG_VFS_LINK & 1 ) 1309 if( DEBUG_VFS_LINK < cycle ) 1310 printk("\n[%s] thread[%x,%x] get parent inode (%x,%x) for <%s>\n", 1311 __FUNCTION__, process->pid, this->trdid, 1312 GET_CXY(new_parent_xp), GET_PTR(new_parent_xp), new_path ); 1313 #endif 1314 1315 // get target inode cluster and local pointer 1316 inode_cxy = GET_CXY( inode_xp ); 1317 inode_ptr = GET_PTR( inode_xp ); 1318 1319 // get target inode type, context, and FS type 1320 inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 1321 inode_ctx_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->ctx ) ); 1322 inode_fs_type = hal_remote_l32( XPTR( inode_cxy , &inode_ctx_ptr->type ) ); 1323 1324 // get new parent inode cluster an local pointer 1325 new_parent_ptr = GET_PTR( new_parent_xp ); 1326 new_parent_cxy = GET_CXY( new_parent_xp ); 1327 1328 /////////////////////////////////////////////////////////////////////// 1329 if( (inode_type == INODE_TYPE_FILE) || (inode_type == INODE_TYPE_DIR) ) 1330 { 1331 // 1. create one new dentry 1332 if( new_parent_cxy == local_cxy ) 1333 { 1334 error = vfs_dentry_create( inode_fs_type, 1335 new_name, 1336 &dentry_xp ); 1337 } 1338 else 1339 { 1340 rpc_vfs_dentry_create_client( new_parent_cxy, 1341 inode_fs_type, 1342 new_name, 1343 &dentry_xp, 1344 &error ); 1345 } 1346 1347 if( error ) 1348 { 1349 remote_rwlock_wr_release( lock_xp ); 1350 printk("\n[ERROR] in %s : cannot create new dentry for <%s>\n", 1351 __FUNCTION__, new_path ); 1352 return -1; 1353 } 1354 1355 // get local pointer on dentry 1356 dentry_ptr = GET_PTR( dentry_xp ); 1357 1358 // 2. register dentry in target inode 1359 parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents ); 1360 parents_entry_xp = XPTR( new_parent_cxy , &dentry_ptr->parents ); 1361 xlist_add_first( parents_root_xp , parents_entry_xp ); 1362 hal_remote_atomic_add( XPTR( inode_cxy , &inode_ptr->links ) , 1 ); 1363 1364 // 3. register dentry in parent inode 1365 children_xhtab_xp = XPTR( new_parent_cxy , &new_parent_ptr->children ); 1366 children_entry_xp = XPTR( new_parent_cxy , &dentry_ptr->children ); 1367 xhtab_insert( children_xhtab_xp , new_name , children_entry_xp ); 1368 1369 // 4. update "parent" and "child_xp" fields in dentry 1370 hal_remote_s64( XPTR( new_parent_cxy , &dentry_ptr->child_xp ) , inode_xp ); 1371 hal_remote_spt( XPTR( new_parent_cxy , &dentry_ptr->parent ) , new_parent_ptr ); 1372 1373 #if(DEBUG_VFS_LINK & 1) 1374 if( DEBUG_VFS_LINK < cycle ) 1375 printk("\n[%s] thread[%x,%x] updated Inode Tree / old <%s> / new <%s>\n", 1376 __FUNCTION__, process->pid, this->trdid, old_path, new_path ); 1377 vfs_display( new_parent_xp ); 1378 #endif 1379 1380 // release the lock protecting Inode Tree 1381 remote_rwlock_wr_release( lock_xp ); 1382 1383 // 5. update new parent directory mapper in Inode Tree 1384 // and synchronize the parent directory on IOC device 1385 if (new_parent_cxy == local_cxy) 1386 { 1387 error = vfs_fs_add_dentry( new_parent_ptr, 1388 dentry_ptr ); 1389 } 1390 else 1391 { 1392 rpc_vfs_fs_add_dentry_client( new_parent_cxy, 1393 new_parent_ptr, 1394 dentry_ptr, 1395 &error ); 1396 } 1397 if( error ) 1398 { 1399 printk("\n[ERROR] in %s : cannot update new parent directory for <%s>\n", 1400 __FUNCTION__, new_path ); 1401 return -1; 1402 } 1403 1404 #if(DEBUG_VFS_LINK & 1) 1405 if( DEBUG_VFS_LINK < cycle ) 1406 printk("\n[%s] thread[%x,%x] updated new parent dir (mapper and IOC) / old <%s> / new <%s>\n", 1407 __FUNCTION__, process->pid, this->trdid, old_path, new_path ); 1408 #endif 1409 return 0; 1410 } 1411 else 1412 { 1413 // release the lock protecting Inode Tree 1414 remote_rwlock_wr_release( lock_xp ); 1415 1416 printk("\n[ERROR] in %s : unsupported inode type %s\n", 1417 __FUNCTION__ , vfs_inode_type_str( inode_type ) ); 1418 return -1; 1419 } 1420 1421 } // end vfs_link() 1422 1423 ///////////////////////////////////// 1424 error_t vfs_unlink( xptr_t root_xp, 1047 1425 char * path ) 1048 1426 { 1049 1427 error_t error; 1428 xptr_t vfs_root_xp; // extended pointer on VFS root inode 1429 vfs_inode_t * vfs_root_ptr; // local_pointer on VFS root inode 1430 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 1431 xptr_t lock_xp; // extended pointer on lock protecting Inode Tree 1432 xptr_t parent_xp; // extended pointer on target inode 1433 cxy_t parent_cxy; // target inode cluster identifier 1434 vfs_inode_t * parent_ptr; // target inode local pointer 1050 1435 xptr_t inode_xp; // extended pointer on target inode 1051 1436 cxy_t inode_cxy; // target inode cluster identifier 1052 1437 vfs_inode_t * inode_ptr; // target inode local pointer 1053 uint32_t inode_refcount; // target inode refcount 1054 vfs_inode_type_t type; // target inode type 1055 mapper_t * mapper; // pointer on target inode mapper 1056 xptr_t dentry_xp; // extended pointer on target dentry 1057 cxy_t dentry_cxy; // target dentry cluster identifier 1058 vfs_dentry_t * dentry_ptr; // target dentry local pointer 1059 uint32_t dentry_refcount; // target dentry refcount 1060 vfs_inode_t * dentry_parent_ptr; // parent inode local pointer 1438 uint32_t inode_links; // target inode links count 1439 vfs_inode_type_t inode_type; // target inode type 1440 uint32_t inode_children; // target inode number of children 1441 xptr_t dentry_xp; // extended pointer on dentry to unlink 1442 vfs_dentry_t * dentry_ptr; // local pointer on dentry to unlink 1443 1444 char name[CONFIG_VFS_MAX_NAME_LENGTH]; // name of link to remove 1445 1446 thread_t * this = CURRENT_THREAD; 1447 process_t * process = this->process; 1061 1448 1062 1449 #if DEBUG_VFS_UNLINK 1063 thread_t * this = CURRENT_THREAD;1064 1450 uint32_t cycle = (uint32_t)hal_get_cycles(); 1451 char root_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1452 vfs_inode_get_name( root_xp , root_name ); 1065 1453 if( DEBUG_VFS_UNLINK < cycle ) 1066 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n", 1067 __FUNCTION__, this->process->pid, this->trdid, path, cycle ); 1068 #endif 1069 1070 // get extended pointer on target inode 1071 error = vfs_lookup( cwd_xp , path , 0 , &inode_xp ); 1072 1073 if( error ) return error; 1074 1075 // get inode cluster and local pointer 1454 printk("\n[%s] thread[%x,%x] enter / root <%s> / path <%s> / cycle %d\n", 1455 __FUNCTION__, process->pid, this->trdid, root_name, path, cycle ); 1456 #endif 1457 1458 // build extended pointer on lock protecting Inode Tree (in VFS root inode) 1459 vfs_root_xp = process->vfs_root_xp; 1460 vfs_root_ptr = GET_PTR( root_xp ); 1461 vfs_root_cxy = GET_CXY( root_xp ); 1462 lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock ); 1463 1464 // take the lock protecting Inode Tree 1465 remote_rwlock_wr_acquire( lock_xp ); 1466 1467 // get extended pointer on parent inode 1468 error = vfs_lookup( root_xp, 1469 path, 1470 VFS_LOOKUP_PARENT, 1471 &parent_xp, 1472 name ); 1473 if( error ) 1474 { 1475 remote_rwlock_wr_release( lock_xp ); 1476 printk("\n[ERROR] in %s : cannot get parent inode for <%s> in <%s>\n", 1477 __FUNCTION__, name, path ); 1478 return -1; 1479 } 1480 1481 // get parent inode cluster and local pointer 1482 parent_cxy = GET_CXY( parent_xp ); 1483 parent_ptr = GET_PTR( parent_xp ); 1484 1485 #if( DEBUG_VFS_UNLINK & 1 ) 1486 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1487 vfs_inode_get_name( parent_xp , parent_name ); 1488 if( DEBUG_VFS_UNLINK < cycle ) 1489 printk("\n[%s] thread[%x,%x] parent inode <%s> is (%x,%x)\n", 1490 __FUNCTION__, process->pid, this->trdid, parent_name, parent_cxy, parent_ptr ); 1491 #endif 1492 1493 // build extended pointer on parent inode "children" xhtab 1494 xptr_t children_xp = XPTR( parent_cxy , &parent_ptr->children ); 1495 1496 // get extended pointer on dentry to unlink 1497 dentry_xp = xhtab_lookup( children_xp , name ); 1498 1499 if( dentry_xp == XPTR_NULL ) 1500 { 1501 remote_rwlock_wr_release( lock_xp ); 1502 printk("\n[ERROR] in %s : cannot get target dentry <%s> in <%s>\n", 1503 __FUNCTION__, name, path ); 1504 return -1; 1505 } 1506 1507 // get local pointer on dentry to unlink 1508 dentry_ptr = GET_PTR( dentry_xp ); 1509 1510 #if( DEBUG_VFS_UNLINK & 1 ) 1511 if( DEBUG_VFS_UNLINK < cycle ) 1512 printk("\n[%s] thread[%x,%x] dentry <%s> to unlink is (%x,%x)\n", 1513 __FUNCTION__, process->pid, this->trdid, name, parent_cxy, dentry_ptr ); 1514 #endif 1515 1516 // get pointer on target inode 1517 inode_xp = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) ); 1076 1518 inode_cxy = GET_CXY( inode_xp ); 1077 1519 inode_ptr = GET_PTR( inode_xp ); 1078 1079 // get inode type, refcount, mapper, dentry_xp 1080 type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 1081 inode_refcount = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->refcount ) ); 1082 mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 1083 dentry_xp = hal_remote_l64( XPTR( inode_cxy , &inode_ptr->parent_xp ) ); 1084 1085 // get dentry cluster, local pointer, refcount, and pointers on parent inode 1086 dentry_ptr = GET_PTR( dentry_xp ); 1087 dentry_cxy = GET_CXY( dentry_xp ); 1088 dentry_refcount = hal_remote_l32( XPTR( dentry_cxy , &dentry_ptr->refcount ) ); 1089 dentry_parent_ptr = hal_remote_lpt( XPTR( dentry_cxy , &dentry_ptr->parent ) ); 1090 1091 // check inode & dentry refcount 1092 assert( (inode_refcount == 1), "illegal inode refcount for <%s>\n", path ); 1093 assert( (dentry_refcount == 1), "illegal dentry refcount for <%s>\n", path ); 1094 1095 ///////////////////////////// 1096 if( type == INODE_TYPE_FILE ) 1097 { 1098 // 1. release clusters allocated to file in the FAT mapper 1099 // synchronize the FAT on IOC device 1100 error = vfs_fs_release_inode( inode_xp ); 1101 if( error ) 1520 1521 #if( DEBUG_VFS_UNLINK & 1 ) 1522 char inode_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1523 vfs_inode_get_name( inode_xp , inode_name ); 1524 if( DEBUG_VFS_UNLINK < cycle ) 1525 printk("\n[%s] thread[%x,%x] target inode <%s> is (%x,%x) / cycle %d\n", 1526 __FUNCTION__, process->pid, this->trdid, inode_name, inode_cxy, inode_ptr, cycle ); 1527 #endif 1528 1529 // get target inode "type" and "links" 1530 inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 1531 inode_links = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->links ) ); 1532 1533 // check target inode links counter 1534 assert( (inode_links >= 1), "illegal inode links count %d for <%s>\n", inode_links, path ); 1535 1536 /////////////////////////////////////////////////////////////////////// 1537 if( (inode_type == INODE_TYPE_FILE) || (inode_type == INODE_TYPE_DIR) ) 1538 { 1539 // 1. Release clusters allocated to target inode 1540 // and synchronize the FAT on IOC device if last link. 1541 if( inode_links == 1 ) 1102 1542 { 1103 printk("\n[ERROR] in %s : cannot update FAT mapper <%s>\n", path ); 1104 return -1; 1105 } 1543 // build extended pointer on target inode "children" number 1544 xptr_t inode_children_xp = XPTR( inode_cxy , &inode_ptr->children.items ); 1545 1546 // get target inode number of children 1547 inode_children = hal_remote_l32( inode_children_xp ); 1548 1549 // check no children 1550 if( inode_children != 0 ) 1551 { 1552 remote_rwlock_wr_release( lock_xp ); 1553 printk("\n[ERROR] in %s : cannot remove <%s> inode that has children\n", 1554 __FUNCTION__, path ); 1555 return -1; 1556 } 1557 1558 // release clusters on IOC device 1559 error = vfs_fs_release_inode( inode_xp ); 1560 1561 if( error ) 1562 { 1563 remote_rwlock_wr_release( lock_xp ); 1564 printk("\n[ERROR] in %s : cannot update FAT mapper to remove <%s> inode\n", 1565 __FUNCTION__ , path ); 1566 return -1; 1567 } 1106 1568 1107 1569 #if(DEBUG_VFS_UNLINK & 1) 1108 1570 if( DEBUG_VFS_UNLINK < cycle ) 1109 1571 printk("\n[%s] thread[%x,%x] removed <%s> inode from FAT (mapper and IOC device)\n", 1110 __FUNCTION__, this->process->pid, this->trdid, path ); 1111 #endif 1112 1113 // 2. update parent directory in Inode Tree 1114 // synchronize the parent directory on IOC device 1115 if (dentry_cxy == local_cxy) // dentry is local 1572 __FUNCTION__, process->pid, this->trdid, path ); 1573 #endif 1574 } 1575 1576 // 2. update parent directory mapper 1577 // and synchronize the parent directory on IOC device 1578 if (parent_cxy == local_cxy) 1116 1579 { 1117 error = vfs_fs_remove_dentry( dentry_parent_ptr,1580 error = vfs_fs_remove_dentry( parent_ptr, 1118 1581 dentry_ptr ); 1119 1582 } 1120 else // dentry is remote1583 else 1121 1584 { 1122 rpc_vfs_fs_remove_dentry_client( dentry_cxy,1123 dentry_parent_ptr,1585 rpc_vfs_fs_remove_dentry_client( parent_cxy, 1586 parent_ptr, 1124 1587 dentry_ptr, 1125 1588 &error ); 1126 1589 } 1590 1127 1591 if( error ) 1128 1592 { 1129 printk("\n[ERROR] in %s : cannot update dentry on device for <%s>\n", path ); 1593 remote_rwlock_wr_release( lock_xp ); 1594 printk("\n[ERROR] in %s : cannot update dentry on device for <%s>\n", 1595 __FUNCTION__ , path ); 1130 1596 return -1; 1131 1597 } … … 1134 1600 if( DEBUG_VFS_UNLINK < cycle ) 1135 1601 printk("\n[%s] thread[%x,%x] removed <%s> inode from parent dir (mapper and IOC device)\n", 1136 __FUNCTION__, this->process->pid, this->trdid, path ); 1137 #endif 1138 // 3. remove inode (including mapper & dentry) from Inode Tree 1139 vfs_remove_child_from_parent( inode_xp ); 1602 __FUNCTION__, process->pid, this->trdid, path ); 1603 #endif 1604 // 3. remove dentry from Inode Tree (and associated chils inode when last link) 1605 vfs_remove_child_from_parent( dentry_xp ); 1606 1607 // release the lock protecting Inode Tree 1608 remote_rwlock_wr_release( lock_xp ); 1140 1609 1141 1610 #if DEBUG_VFS_UNLINK 1142 1611 if( DEBUG_VFS_UNLINK < cycle ) 1143 1612 printk("\n[%s] thread[%x,%x] exit / removed <%s> inode from Inode Tree / cycle %d\n", 1144 __FUNCTION__, this->process->pid, this->trdid, path, cycle );1613 __FUNCTION__, process->pid, this->trdid, path, cycle ); 1145 1614 #endif 1146 1615 return 0; 1147 1616 } 1148 ///////////////////////////////// 1149 else if( type == INODE_TYPE_DIR ) 1150 { 1151 printk("\n[ERROR] in %s : unsupported type %s\n", vfs_inode_type_str( type ) ); 1617 else 1618 { 1619 remote_rwlock_wr_release( lock_xp ); 1620 printk("\n[ERROR] in %s : unsupported inode type %s\n", 1621 __FUNCTION__ , vfs_inode_type_str( inode_type ) ); 1152 1622 return -1; 1153 1623 } 1154 //// 1155 else 1156 { 1157 printk("\n[ERROR] in %s : unsupported type %s\n", vfs_inode_type_str( type ) ); 1624 1625 } // end vfs_unlink() 1626 1627 /////////////////////////////////////////// 1628 error_t vfs_stat( xptr_t root_inode_xp, 1629 char * path, 1630 stat_t * st ) 1631 { 1632 error_t error; 1633 xptr_t inode_xp; // extended pointer on target inode 1634 vfs_inode_t * inode_ptr; // local pointer on target inode 1635 cxy_t inode_cxy; // target inode cluster identifier 1636 xptr_t vfs_root_xp; // extended pointer on VFS root inode 1637 vfs_inode_t * vfs_root_ptr; // local_pointer on VFS root inode 1638 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 1639 xptr_t lock_xp; // extended pointer on lock protecting Inode Tree 1640 1641 thread_t * this = CURRENT_THREAD; 1642 process_t * process = this->process; 1643 1644 // build extended pointer on lock protecting Inode Tree (in VFS root inode) 1645 vfs_root_xp = process->vfs_root_xp; 1646 vfs_root_ptr = GET_PTR( vfs_root_xp ); 1647 vfs_root_cxy = GET_CXY( vfs_root_xp ); 1648 lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock ); 1649 1650 // get the lock protecting Inode Tree in read mode 1651 remote_rwlock_rd_acquire( lock_xp ); 1652 1653 // get extended pointer on target inode 1654 error = vfs_lookup( root_inode_xp, 1655 path, 1656 0, 1657 &inode_xp, 1658 NULL ); 1659 1660 // release the lock protecting Inode Tree 1661 remote_rwlock_rd_release( lock_xp ); 1662 1663 if( error ) 1664 { 1665 printk("\n[ERROR] in %s : cannot found inode <%s>\n", 1666 __FUNCTION__ , path ); 1158 1667 return -1; 1159 1668 } 1160 1669 1161 } // end vfs_unlink()1162 1163 //////////////////////////////////////1164 error_t vfs_stat( xptr_t inode_xp,1165 stat_t * st )1166 {1167 1670 // get cluster and local pointer on inode descriptor 1168 vfs_inode_t *inode_ptr = GET_PTR( inode_xp );1169 cxy_tinode_cxy = GET_CXY( inode_xp );1671 inode_ptr = GET_PTR( inode_xp ); 1672 inode_cxy = GET_CXY( inode_xp ); 1170 1673 1171 1674 // get relevant infos from inode descriptor … … 1186 1689 #if DEBUG_VFS_STAT 1187 1690 uint32_t cycle = (uint32_t)hal_get_cycles(); 1188 thread_t * this = CURRENT_THREAD;1189 1691 if( DEBUG_VFS_STAT < cycle ) 1190 1692 printk("\n[%s] thread[%x,%x] set stat %x for inode %x in cluster %x / cycle %d\n" 1191 1693 " %s / inum %d / size %d\n", 1192 __FUNCTION__, this->process->pid, this->trdid, st, inode_ptr, inode_cxy, cycle,1694 __FUNCTION__, process->pid, this->trdid, st, inode_ptr, inode_cxy, cycle, 1193 1695 vfs_inode_type_str( type ), inum, size ); 1194 1696 #endif … … 1207 1709 } 1208 1710 1209 //////////////////////////////////////1210 error_t vfs_mkdir( xptr_t file_xp,1211 char * path,1212 uint32_t mode )1213 {1214 assert( false , "not implemented file_xp: %x, path <%s>, mode: %x\n",1215 file_xp, path, mode );1216 return 0;1217 }1218 1219 1711 //////////////////////////////////// 1220 1712 error_t vfs_rmdir( xptr_t file_xp, … … 1226 1718 } 1227 1719 1228 /////////////////////////////////// 1229 error_t vfs_chdir( xptr_t cwd_xp,1720 //////////////////////////////////// 1721 error_t vfs_chdir( xptr_t root_xp, 1230 1722 char * path ) 1231 1723 { 1232 1724 error_t error; 1233 xptr_t inode_xp; // extended pointer on target inode 1234 cxy_t inode_cxy; // target inode cluster identifier 1235 vfs_inode_t * inode_ptr; // target inode local pointer 1236 uint32_t mode; // lookup working mode 1237 vfs_inode_type_t inode_type; // target inode type 1238 1239 // set lookup working mode 1240 mode = 0; 1725 xptr_t inode_xp; // extended pointer on target inode 1726 cxy_t inode_cxy; // target inode cluster identifier 1727 vfs_inode_t * inode_ptr; // target inode local pointer 1728 vfs_inode_type_t inode_type; // target inode type 1729 xptr_t vfs_root_xp; // extended pointer on VFS root inode 1730 vfs_inode_t * vfs_root_ptr; // local_pointer on VFS root inode 1731 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 1732 xptr_t main_lock_xp; // extended pointer on lock protecting Inode Tree 1733 xptr_t ref_xp; // extended pointer on reference process 1734 process_t * ref_ptr; // local pointer on reference process 1735 cxy_t ref_cxy; // reference process cluster 1736 xptr_t cwd_lock_xp; // extended pointer on lock protecting CWD change 1737 xptr_t cwd_xp_xp; // extended pointer on cwd_xp in reference process 1738 1739 thread_t * this = CURRENT_THREAD; 1740 process_t * process = this->process; 1741 1742 #if DEBUG_VFS_CHDIR 1743 uint32_t cycle = (uint32_t)hal_get_cycles(); 1744 if( DEBUG_VFS_CHDIR < cycle ) 1745 printk("\n[%s] thread[%x,%x] enter for path <%s> / cycle %d\n", 1746 __FUNCTION__, process->pid, this->trdid, path, cycle ); 1747 #endif 1748 1749 // build extended pointer on lock protecting Inode Tree (in VFS root inode) 1750 vfs_root_xp = process->vfs_root_xp; 1751 vfs_root_ptr = GET_PTR( vfs_root_xp ); 1752 vfs_root_cxy = GET_CXY( vfs_root_xp ); 1753 main_lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock ); 1754 1755 // take lock protecting Inode Tree in read mode 1756 remote_rwlock_rd_acquire( main_lock_xp ); 1241 1757 1242 1758 // get extended pointer on target inode 1243 error = vfs_lookup( cwd_xp , path , mode , &inode_xp ); 1244 1245 if( error ) return error; 1246 1247 // get inode cluster and local pointer 1759 error = vfs_lookup( root_xp, 1760 path, 1761 VFS_LOOKUP_DIR, 1762 &inode_xp, 1763 NULL ); 1764 1765 // release lock protecting Inode Tree in read mode 1766 remote_rwlock_rd_release( main_lock_xp ); 1767 1768 if( error ) 1769 { 1770 printk("\n[ERROR] in %s : <%s> not found\n", 1771 __FUNCTION__, path ); 1772 return -1; 1773 } 1774 1775 // get inode type from remote file 1248 1776 inode_cxy = GET_CXY( inode_xp ); 1249 1777 inode_ptr = GET_PTR( inode_xp ); 1250 1251 // get inode type from remote file1252 1778 inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 1253 1779 1254 1780 if( inode_type != INODE_TYPE_DIR ) 1255 1781 { 1256 CURRENT_THREAD->errno = ENOTDIR; 1782 printk("\n[ERROR] in %s : <%s> is not a directory\n", 1783 __FUNCTION__, path ); 1257 1784 return -1; 1258 1785 } 1259 1786 1260 // TODO implement this function using process CWD lock 1261 1262 assert( false , "not implemented\n" ); 1787 // build extended pointer on cwd_lock and cwd_xp 1788 ref_xp = process->ref_xp; 1789 ref_ptr = GET_PTR( ref_xp ); 1790 ref_cxy = GET_CXY( ref_xp ); 1791 cwd_lock_xp = XPTR( ref_cxy , &ref_ptr->cwd_lock ); 1792 cwd_xp_xp = XPTR( ref_cxy , &ref_ptr->cwd_xp ); 1793 1794 // take lock protecting CWD changes 1795 remote_busylock_acquire( cwd_lock_xp ); 1796 1797 // update cwd_xp field in reference process descriptor 1798 hal_remote_s64( cwd_xp_xp , inode_xp ); 1799 1800 // release lock protecting CWD changes 1801 remote_busylock_release( cwd_lock_xp ); 1802 1803 #if DEBUG_VFS_CHDIR 1804 cycle = (uint32_t)hal_get_cycles(); 1805 if( DEBUG_VFS_CHDIR < cycle ) 1806 printk("\n[%s] thread[%x,%x] exit : inode (%x,%x) / &cwd_xp (%x,%x) / cycle %d\n", 1807 __FUNCTION__, process->pid, this->trdid, inode_cxy, inode_ptr, 1808 GET_CXY(cwd_xp_xp), GET_PTR(cwd_xp_xp), cycle ); 1809 #endif 1263 1810 1264 1811 return 0; 1265 } 1812 1813 } // end vfs_chdir() 1266 1814 1267 1815 /////////////////////////////////// … … 1281 1829 1282 1830 // get extended pointer on target inode 1283 error = vfs_lookup( cwd_xp , path , 0 , &inode_xp ); 1831 error = vfs_lookup( cwd_xp, 1832 path, 1833 0, 1834 &inode_xp, 1835 NULL ); 1284 1836 1285 1837 if( error ) return error; … … 1292 1844 inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 1293 1845 1294 1295 assert( false , "not implemented\n" ); 1846 // TODO implement this function 1847 1848 assert( false , "not implemented\n" ); 1849 1296 1850 return 0; 1297 1851 } … … 1360 1914 assert( (indent < 16) , "depth cannot be larger than 15\n" ); 1361 1915 1362 // get inode cluster and local pointer1916 // get current inode cluster and local pointer 1363 1917 inode_cxy = GET_CXY( inode_xp ); 1364 1918 inode_ptr = GET_PTR( inode_xp ); … … 1428 1982 cxy_t dentry_cxy; 1429 1983 vfs_dentry_t * dentry_ptr; 1984 xptr_t parents_root_xp; // root of parent dentries xlist 1430 1985 1431 1986 // get target inode cluster and local pointer … … 1433 1988 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 1434 1989 1435 // get extended pointer on associated dentry1436 dentry_xp = hal_remote_l64( XPTR( inode_cxy , &inode_ptr->parent_xp ));1437 1438 // check if target inode is the File System root1439 if( dentry_xp == XPTR_NULL )1990 // build extended pointer on parents dentries root 1991 parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents ); 1992 1993 // check VFS root 1994 if( xlist_is_empty( parents_root_xp ) ) // inode is the VFS root 1440 1995 { 1441 1996 // build extended pointer on root name … … 1444 1999 else 1445 2000 { 1446 // get dentry cluster and local pointer 2001 // get first parent dentry cluster and pointers 2002 dentry_xp = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents ); 1447 2003 dentry_cxy = GET_CXY( dentry_xp ); 1448 2004 dentry_ptr = GET_PTR( dentry_xp ); … … 1520 2076 xptr_t * child_xp ) 1521 2077 { 1522 xptr_t xhtab_xp; // extended pointer on hash table containing children dentries 1523 xptr_t dentry_xp; // extended pointer on children dentry 2078 xptr_t xhtab_xp; // extended pointer on hash table for children dentries 2079 xptr_t dentry_xp; // extended pointer on children dentry 2080 cxy_t dentry_cxy; 2081 vfs_dentry_t * dentry_ptr; 1524 2082 1525 2083 // get parent inode cluster and local pointer … … 1530 2088 xhtab_xp = XPTR( parent_cxy , &parent_ptr->children ); 1531 2089 1532 // search extended pointer on matching dentry 1533 dentry_xp = xhtab_lookup( xhtab_xp , name ); 1534 1535 if( dentry_xp == XPTR_NULL ) return false; 1536 1537 // get dentry cluster and local pointer 1538 cxy_t dentry_cxy = GET_CXY( dentry_xp ); 1539 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp ); 1540 1541 // return child inode 1542 *child_xp = (xptr_t)hal_remote_l64( XPTR( dentry_cxy , &dentry_ptr->child_xp ) ); 1543 return true; 2090 // get pointers on matching dentry 2091 dentry_xp = xhtab_lookup( xhtab_xp , name ); 2092 dentry_cxy = GET_CXY( dentry_xp ); 2093 dentry_ptr = GET_PTR( dentry_xp ); 2094 2095 if( dentry_xp == XPTR_NULL ) 2096 { 2097 return false; 2098 } 2099 else 2100 { 2101 *child_xp = (xptr_t)hal_remote_l64( XPTR( dentry_cxy , &dentry_ptr->child_xp ) ); 2102 return true; 2103 } 1544 2104 1545 2105 } // end vfs_get_child() … … 1553 2113 // last name in the path. The names are supposed to be separated by one or several '/' 1554 2114 // characters, that are not written in the <name> buffer. 2115 // 2116 // WARNING: the leading characters '/' in the path are skiped before analysis. 2117 // The path "/" identifies the VFS root, and is therefore anaysed as an empty 2118 // string. This empty string is dignaled by the (-1) return value. 1555 2119 ////////////////////////////////////////////////////////////////////////////////////////// 1556 2120 // @ current : pointer on first character to analyse in buffer containing the path. … … 1558 2122 // @ next : [out] pointer on next character to analyse in buffer containing the path. 1559 2123 // @ last : [out] true if the returned name is the last (NUL character found). 1560 // @ return 0 if success / return EINVALif string empty (first chracter is NUL).2124 // @ return 0 if success / return -1 if string empty (first chracter is NUL). 1561 2125 ////////////////////////////////////////////////////////////////////////////////////////// 1562 2126 static error_t vfs_get_name_from_path( char * current, … … 1570 2134 while( *ptr == '/' ) ptr++; 1571 2135 1572 // return EINVAL if string empty 1573 if( *ptr == 0 ) return EINVAL; 2136 // signal empty string 2137 if( *ptr == 0 ) 2138 { 2139 *last = true; 2140 return -1; 2141 } 1574 2142 1575 2143 // copy all characters in name until NUL or '/' … … 1594 2162 } // end vfs_get name_from_path() 1595 2163 1596 ////////////////////////////////////////////// 1597 error_t vfs_lookup( xptr_t cwd_xp,2164 /////////////////////////////////////////////// 2165 error_t vfs_lookup( xptr_t root_xp, 1598 2166 char * pathname, 1599 uint32_t mode, 1600 xptr_t * inode_xp ) 2167 uint32_t lookup_mode, 2168 xptr_t * inode_xp, 2169 char * last_name ) 1601 2170 { 1602 2171 char name[CONFIG_VFS_MAX_NAME_LENGTH]; // one name in path … … 1619 2188 bool_t create; // searched inode must be created if not found 1620 2189 bool_t excl; // searched inode must not exist 2190 bool_t par; // searched inode is the parent 1621 2191 thread_t * this; // pointer on calling thread descriptor 1622 2192 process_t * process; // pointer on calling process descriptor … … 1626 2196 process = this->process; 1627 2197 2198 // check pathname / root_xp consistency 2199 assert( ((pathname[0] != '/') || (root_xp == process->vfs_root_xp)), 2200 "root inode must be VFS root for path <%s>\n", pathname ); 2201 1628 2202 #if DEBUG_VFS_LOOKUP 1629 2203 uint32_t cycle = (uint32_t)hal_get_cycles(); 2204 char root_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2205 vfs_inode_get_name( root_xp , root_name ); 1630 2206 if( DEBUG_VFS_LOOKUP < cycle ) 1631 printk("\n[%s] thread[%x,%x] enter for <%s>/ cycle %d\n",1632 __FUNCTION__, process->pid, this->trdid, pathname, cycle );2207 printk("\n[%s] thread[%x,%x] enter / root <%s> / path <%s> / mode %x / cycle %d\n", 2208 __FUNCTION__, process->pid, this->trdid, root_name, pathname, lookup_mode, cycle ); 1633 2209 #endif 1634 2210 1635 2211 // compute lookup flags 1636 dir = mode & VFS_LOOKUP_DIR; 1637 create = mode & VFS_LOOKUP_CREATE; 1638 excl = mode & VFS_LOOKUP_EXCL; 1639 1640 // get extended pointer on first inode to search 1641 if( pathname[0] == '/' ) parent_xp = process->vfs_root_xp; 1642 else parent_xp = cwd_xp; 1643 1644 // initialise other loop variables 1645 current = pathname; 1646 next = NULL; 1647 last = false; 1648 child_xp = XPTR_NULL; 1649 1650 // take lock on parent inode 1651 vfs_inode_lock( parent_xp ); 1652 1653 // sequencially loop on nodes in pathname 1654 // load from device if one node in path not found in inode tree 2212 dir = (lookup_mode & VFS_LOOKUP_DIR) == VFS_LOOKUP_DIR; 2213 create = (lookup_mode & VFS_LOOKUP_CREATE) == VFS_LOOKUP_CREATE; 2214 excl = (lookup_mode & VFS_LOOKUP_EXCL) == VFS_LOOKUP_EXCL; 2215 par = (lookup_mode & VFS_LOOKUP_PARENT) == VFS_LOOKUP_PARENT; 2216 2217 // initialise loop variables 2218 parent_xp = root_xp; 2219 current = pathname; 2220 next = NULL; 2221 last = false; 2222 child_xp = XPTR_NULL; 2223 2224 // loop on nodes in pathname 2225 // load from device if one node in path not found in Inode Tree 1655 2226 // exit loop when last name found (i.e. last == true) 1656 do 1657 { 1658 // get one name from path, and "last" flag 1659 vfs_get_name_from_path( current , name , &next , &last ); 2227 while( 1 ) 2228 { 2229 // get parent inode cluster and local pointer 2230 parent_cxy = GET_CXY( parent_xp ); 2231 parent_ptr = GET_PTR( parent_xp ); 2232 2233 // get one "name" from path, and "last" flag 2234 error = vfs_get_name_from_path( current , name , &next , &last ); 2235 2236 // VFS root case 2237 if ( error ) 2238 { 2239 2240 #if DEBUG_VFS_LOOKUP 2241 cycle = (uint32_t)hal_get_cycles(); 2242 if( DEBUG_VFS_LOOKUP < cycle ) 2243 printk("\n[%s] thread[%x,%x] exit / parent inode(%x,%x) / <%s> / cycle %d\n", 2244 __FUNCTION__ , process->pid, this->trdid, parent_cxy, parent_ptr, pathname, cycle ); 2245 #endif 2246 *inode_xp = process->vfs_root_xp; 2247 break; 2248 } 1660 2249 1661 2250 #if (DEBUG_VFS_LOOKUP & 1) … … 1665 2254 #endif 1666 2255 1667 // search achild dentry matching name in parent inode2256 // search the child dentry matching name in parent inode 1668 2257 found = vfs_get_child( parent_xp, 1669 2258 name, 1670 2259 &child_xp ); 1671 2260 1672 if (found == false ) // child not found in inode tree 2261 // analyse found & last, depending on lookup_mode 2262 if( found == false ) // not found in Inode Tree 1673 2263 { 2264 // when a inode is not found in the Inode Tree: 2265 // - if (last and par) the Inode Tree is not modified 2266 // - else we speculatively introduce a new (dentry/inode) in inode tree, 2267 // and scan the parent directory mapper to initialise it. 2268 // . if it is not found in the parent mapper: 2269 // - if(last and create), a brand new file or directory is created 2270 // - else, an error is reported 2271 // . if it is found in parent mapper: 2272 // - if( last and excl ), an error is reported 2273 // - else the new child (inode & dentry) is initialised in Inode Tree 2274 // - if the child is a directory, the child mapper is loaded from device 2275 2276 if( last && par ) // does nothing 2277 { 1674 2278 1675 2279 #if (DEBUG_VFS_LOOKUP & 1) 1676 2280 if( DEBUG_VFS_LOOKUP < cycle ) 1677 printk("\n[%s] thread[%x,%x] miss <%s> node => try to create it\n", 2281 printk("\n[%s] thread[%x,%x] child not found but only parent requested in <%s>\n", 2282 __FUNCTION__, process->pid, this->trdid, pathname ); 2283 #endif 2284 } 2285 else // try to get it from parent mapper 2286 { 2287 2288 #if (DEBUG_VFS_LOOKUP & 1) 2289 if( DEBUG_VFS_LOOKUP < cycle ) 2290 printk("\n[%s] thread[%x,%x] miss <%s> inode in Inode Tree => build from parent mapper\n", 1678 2291 __FUNCTION__, process->pid, this->trdid, name ); 1679 2292 #endif 1680 // if a child node is not found in the inode tree, 1681 // we introduce a new (dentry/inode) in inode tree, 1682 // and try to find it by scanning the parent directory mapper. 1683 // . if it is found in parent mapper: 1684 // - if the child is a directory, the child mapper is loaded from device 1685 // - if the child is not a directory, the search is completed 1686 // . if it is not found in the parent mapper: 1687 // - if ( not last or not create ) an error is reported 1688 // - if (last and create) a new file or directory is created 1689 1690 // release lock on parent inode 1691 vfs_inode_unlock( parent_xp ); 2293 // get parent inode FS type 2294 ctx_ptr = hal_remote_lpt( XPTR( parent_cxy,&parent_ptr->ctx ) ); 2295 fs_type = hal_remote_l32( XPTR( parent_cxy , &ctx_ptr->type ) ); 2296 2297 // select a cluster for new inode 2298 child_cxy = cluster_random_select(); 2299 2300 // define child inode type 2301 if( dir ) child_type = INODE_TYPE_DIR; 2302 else child_type = INODE_TYPE_FILE; 1692 2303 1693 // get parent inode cluster and local pointer 1694 parent_cxy = GET_CXY( parent_xp ); 1695 parent_ptr = GET_PTR( parent_xp ); 1696 1697 // get parent inode FS type 1698 ctx_ptr = hal_remote_lpt( XPTR( parent_cxy,&parent_ptr->ctx ) ); 1699 fs_type = hal_remote_l32( XPTR( parent_cxy , &ctx_ptr->type ) ); 1700 1701 // select a cluster for missing inode 1702 child_cxy = cluster_random_select(); 1703 1704 // define child inode type 1705 if( dir ) child_type = INODE_TYPE_DIR; 1706 else child_type = INODE_TYPE_FILE; 1707 1708 // insert a new child dentry/inode in inode tree 1709 error = vfs_add_child_in_parent( child_cxy, 1710 child_type, 1711 fs_type, 1712 parent_xp, 1713 name, 1714 &dentry_xp, 1715 &child_xp ); 1716 if( error ) 1717 { 1718 printk("\n[ERROR] in %s : cannot create node <%s> in path <%s>\n", 1719 __FUNCTION__ , name, pathname ); 1720 return -1; 1721 } 1722 1723 // get child inode local pointer 1724 child_ptr = GET_PTR( child_xp ); 2304 // insert (speculatively) a new child dentry/inode in inode tree 2305 error = vfs_add_child_in_parent( child_cxy, 2306 child_type, 2307 fs_type, 2308 parent_xp, 2309 name, 2310 &dentry_xp, 2311 &child_xp ); 2312 if( error ) 2313 { 2314 printk("\n[ERROR] in %s : cannot create inode <%s> in path <%s>\n", 2315 __FUNCTION__ , name, pathname ); 2316 return -1; 2317 } 2318 2319 // get child inode local pointer 2320 child_ptr = GET_PTR( child_xp ); 1725 2321 1726 2322 #if (DEBUG_VFS_LOOKUP & 1) … … 1729 2325 __FUNCTION__, process->pid, this->trdid, name, child_cxy ); 1730 2326 #endif 1731 // scan parent mapper to find the missing dentry, and complete 1732 // the initialisation of dentry and child inode desciptors 1733 if( parent_cxy == local_cxy ) 1734 1735 { 1736 error = vfs_fs_child_init( parent_ptr, 1737 name, 1738 child_xp ); 1739 } 1740 else 1741 { 1742 rpc_vfs_fs_child_init_client( parent_cxy, 1743 parent_ptr, 1744 name, 1745 child_xp, 1746 &error ); 1747 } 1748 1749 if ( error ) // child not found in parent mapper 1750 { 1751 if ( last && create ) // add a new dentry in parent 2327 // scan parent mapper to find the missing dentry, and complete 2328 // the initialisation of dentry and child inode desciptors 2329 if( parent_cxy == local_cxy ) 2330 1752 2331 { 1753 error = vfs_new_child_init( parent_xp, 1754 dentry_xp, 1755 child_xp ); 1756 if ( error ) 2332 error = vfs_fs_child_init( parent_ptr, 2333 name, 2334 child_xp ); 2335 } 2336 else 2337 { 2338 rpc_vfs_fs_child_init_client( parent_cxy, 2339 parent_ptr, 2340 name, 2341 child_xp, 2342 &error ); 2343 } 2344 2345 if ( error ) // child not found in parent mapper 2346 { 2347 if ( last && create ) // add a brand new dentry in parent 1757 2348 { 1758 printk("\n[ERROR] in %s : cannot init inode <%s> in path <%s>\n", 1759 __FUNCTION__, name, pathname ); 1760 vfs_remove_child_from_parent( child_xp ); 2349 error = vfs_new_child_init( parent_xp, 2350 dentry_xp, 2351 child_xp ); 2352 if ( error ) 2353 { 2354 printk("\n[ERROR] in %s : cannot init inode <%s> in path <%s>\n", 2355 __FUNCTION__, name, pathname ); 2356 vfs_remove_child_from_parent( dentry_xp ); 2357 return -1; 2358 } 2359 2360 #if (DEBUG_VFS_LOOKUP & 1) 2361 if( DEBUG_VFS_LOOKUP < cycle ) 2362 printk("\n[%s] thread[%x,%x] child <%s> not found in parent mapper => create it\n", 2363 __FUNCTION__, process->pid, this->trdid, name ); 2364 #endif 2365 } 2366 else // not last or not create => error 2367 { 2368 printk("\n[ERROR] in %s : <%s> node not found in parent for <%s>\n", 2369 __FUNCTION__ , name , pathname ); 2370 vfs_remove_child_from_parent( dentry_xp ); 1761 2371 return -1; 1762 2372 } 2373 } 2374 else // child has been found in parent mapper 2375 { 2376 // check the excl 2377 if( last && create && excl ) 2378 { 2379 printk("\n[ERROR] in %s : node already exist <%s>\n", 2380 __FUNCTION__, name ); 2381 return -1; 2382 } 1763 2383 1764 2384 #if (DEBUG_VFS_LOOKUP & 1) 1765 2385 if( DEBUG_VFS_LOOKUP < cycle ) 1766 printk("\n[%s] thread[%x,%x] created inode <%s> in path\n",2386 printk("\n[%s] thread[%x,%x] initialised inode <%s> from parent mapper\n", 1767 2387 __FUNCTION__, process->pid, this->trdid, name ); 1768 2388 #endif 1769 } 1770 else // not last or not create => error 1771 { 1772 printk("\n[ERROR] in %s : <%s> node not found in parent for <%s>\n", 1773 __FUNCTION__ , name , pathname ); 1774 vfs_remove_child_from_parent( child_xp ); 1775 return ENOENT; 2389 // load child mapper from device if child is a directory (prefetch) 2390 uint32_t type = hal_remote_l32( XPTR( child_cxy , &child_ptr->type ) ); 2391 if( type == INODE_TYPE_DIR ) 2392 { 2393 if( child_cxy == local_cxy ) 2394 { 2395 error = vfs_inode_load_all_pages( child_ptr ); 2396 } 2397 else 2398 { 2399 rpc_vfs_inode_load_all_pages_client( child_cxy, 2400 child_ptr, 2401 &error ); 2402 } 2403 if ( error ) 2404 { 2405 printk("\n[ERROR] in %s : cannot load <%s> from device\n", 2406 __FUNCTION__ , name ); 2407 vfs_remove_child_from_parent( dentry_xp ); 2408 return -1; 2409 } 2410 2411 #if (DEBUG_VFS_LOOKUP & 1) 2412 if( DEBUG_VFS_LOOKUP < cycle ) 2413 printk("\n[%s] thread[%x,%x] loaded directory mapper for <%s> from IOC\n", 2414 __FUNCTION__ , process->pid, this->trdid, name ); 2415 #endif 2416 } 1776 2417 } 1777 2418 } 1778 else // child found in parent mapper1779 {1780 // load child mapper from device if child is a directory (prefetch)1781 if( hal_remote_l32( XPTR( child_cxy , &child_ptr->type ) ) == INODE_TYPE_DIR )1782 {1783 if( child_cxy == local_cxy )1784 {1785 error = vfs_inode_load_all_pages( child_ptr );1786 }1787 else1788 {1789 rpc_vfs_inode_load_all_pages_client( child_cxy,1790 child_ptr,1791 &error );1792 }1793 if ( error )1794 {1795 printk("\n[ERROR] in %s : cannot load <%s> from device\n",1796 __FUNCTION__ , name );1797 vfs_remove_child_from_parent( child_xp );1798 return EIO;1799 }1800 1801 #if (DEBUG_VFS_LOOKUP & 1)1802 if( DEBUG_VFS_LOOKUP < cycle )1803 printk("\n[%s] thread[%x,%x] loaded from IOC device mapper for <%s> in <%s>\n",1804 __FUNCTION__ , process->pid, this->trdid, name, pathname );1805 #endif1806 }1807 }1808 1809 // take lock on parent inode1810 vfs_inode_lock( parent_xp );1811 2419 } 1812 else // childfound in inode tree2420 else // child directly found in inode tree 1813 2421 { 1814 2422 1815 2423 #if (DEBUG_VFS_LOOKUP & 1) 1816 2424 if( DEBUG_VFS_LOOKUP < cycle ) 1817 printk("\n[%s] thread[%x,%x] found <%s> / inode %x in cluster %x\n", 1818 __FUNCTION__, process->pid, this->trdid, name, GET_PTR(child_xp), GET_CXY(child_xp) ); 1819 #endif 2425 printk("\n[%s] thread[%x,%x] found <%s> in Inode Tree / inode (%x,%x)\n", 2426 __FUNCTION__, process->pid, this->trdid, name, GET_CXY(child_xp), GET_PTR(child_xp) ); 2427 #endif 2428 // get child inode local pointer and cluster 1820 2429 child_ptr = GET_PTR( child_xp ); 1821 2430 child_cxy = GET_CXY( child_xp ); 1822 parent_cxy = GET_CXY( parent_xp ); 1823 parent_ptr = GET_PTR( parent_xp ); 1824 1825 if( last && (mode & VFS_LOOKUP_CREATE) && (mode & VFS_LOOKUP_EXCL) ) 2431 2432 // check the excl flag 2433 if( last && create && excl ) 1826 2434 { 1827 printk("\n[ERROR] in %s : node already exist <%s>\n", __FUNCTION__, name ); 1828 return EINVAL; 2435 printk("\n[ERROR] in %s : node <%s> already exist\n", 2436 __FUNCTION__, name ); 2437 return -1; 1829 2438 } 1830 2439 } … … 1842 2451 1843 2452 // take lock on child inode and release lock on parent 1844 vfs_inode_lock( child_xp ); 1845 vfs_inode_unlock( parent_xp ); 1846 1847 // update loop variables 1848 parent_xp = child_xp; 1849 current = next; 1850 } 1851 while( last == false ); 1852 1853 // release lock 1854 vfs_inode_unlock( parent_xp ); 2453 // vfs_inode_lock( child_xp ); 2454 // vfs_inode_unlock( parent_xp ); 2455 2456 // exit when last 2457 if ( last ) // last inode in path => return relevant info 2458 { 2459 if ( par ) // return parent inode and child name 2460 { 1855 2461 1856 2462 #if DEBUG_VFS_LOOKUP 1857 2463 cycle = (uint32_t)hal_get_cycles(); 1858 2464 if( DEBUG_VFS_LOOKUP < cycle ) 1859 printk("\n[%s] thread[%x,%x] exit for <%s> cycle %d\n", 1860 __FUNCTION__ , process->pid, this->trdid, pathname, cycle ); 1861 #endif 1862 1863 // return searched pointer 1864 if( mode & VFS_LOOKUP_PARENT ) *inode_xp = parent_xp; 1865 else *inode_xp = child_xp; 2465 printk("\n[%s] thread[%x,%x] exit / parent inode(%x,%x) / <%s> / cycle %d\n", 2466 __FUNCTION__ , process->pid, this->trdid, parent_cxy, parent_ptr, pathname, cycle ); 2467 #endif 2468 *inode_xp = parent_xp; 2469 strcpy( last_name , name ); 2470 break; 2471 } 2472 else // return child inode name 2473 { 2474 2475 #if DEBUG_VFS_LOOKUP 2476 cycle = (uint32_t)hal_get_cycles(); 2477 if( DEBUG_VFS_LOOKUP < cycle ) 2478 printk("\n[%s] thread[%x,%x] exit / child inode (%x,%x) / <%s> / cycle %d\n", 2479 __FUNCTION__ , process->pid, this->trdid, child_cxy, child_ptr, pathname, cycle ); 2480 #endif 2481 *inode_xp = child_xp; 2482 break; 2483 } 2484 } 2485 else // not the last inode in path => update loop variables 2486 { 2487 parent_xp = child_xp; 2488 current = next; 2489 } 2490 } 1866 2491 1867 2492 return 0; 1868 2493 1869 2494 } // end vfs_lookup() 1870 1871 1872 2495 1873 2496 /////////////////////////////////////////////// … … 1954 2577 cycle = (uint32_t)hal_get_cycles(); 1955 2578 if( DEBUG_VFS_NEW_CHILD_INIT < cycle ) 1956 printk("\n[%s] thread[%x,%x] exit / parent <%s> / child <% > / cycle %d\n",2579 printk("\n[%s] thread[%x,%x] exit / parent <%s> / child <%s> / cycle %d\n", 1957 2580 __FUNCTION__ , this->process->pid, this->trdid, parent_name, child_name, cycle ); 1958 2581 #endif … … 1962 2585 } // end vfs_new_child_init() 1963 2586 1964 //////////////////////////////////////////// 1965 error_t vfs_get_path( xptr_t searched_xp, 1966 char * buffer, 1967 uint32_t max_size ) 1968 { 1969 xptr_t dentry_xp; // extended pointer on current dentry 1970 char * name; // local pointer on current dentry name 1971 uint32_t length; // length of current dentry name 1972 uint32_t count; // number of characters written in buffer 1973 uint32_t index; // slot index in buffer 1974 xptr_t inode_xp; // extended pointer on 1975 1976 // implementation note: 1977 // we use two variables "index" and "count" because the buffer 1978 // is written in decreasing index order (from leaf to root) 1979 // TODO : handle conflict with a concurrent rename [AG] 1980 // FIXME : handle synchro in the loop [AG] 1981 1982 // set the NUL character in buffer / initialise buffer index and count 2587 ////////////////////////////////////////// 2588 error_t vfs_get_path( xptr_t inode_xp, 2589 char * buffer, 2590 char ** first, 2591 uint32_t max_size ) 2592 { 2593 xptr_t dentry_xp; // extended pointer on current dentry 2594 vfs_dentry_t * dentry_ptr; // local pointer on current dentry 2595 cxy_t dentry_cxy; // current dentry cluster identifier 2596 xptr_t name_xp; // extended pointer on current dentry name 2597 uint32_t length; // length of current dentry name 2598 int32_t index; // slot index in buffer 2599 xptr_t current_xp; // extended pointer on current inode 2600 vfs_inode_t * current_ptr; // local pointer on current inode 2601 cxy_t current_cxy; // current inode cluster identifier 2602 xptr_t vfs_root_xp; // extended pointer on VFS root inode 2603 vfs_inode_t * vfs_root_ptr; // local pointer on VFS root inode 2604 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 2605 xptr_t lock_xp; // extended pointer on Inode Tree lock 2606 xptr_t parents_root_xp; // extended pointer on current inode parents root 2607 bool_t found; // condition to exit the while loop 2608 2609 thread_t * this = CURRENT_THREAD; 2610 process_t * process = this->process; 2611 2612 #if DEBUG_VFS_GET_PATH 2613 uint32_t cycle = (uint32_t)hal_get_cycles(); 2614 if( DEBUG_VFS_GET_PATH < cycle ) 2615 printk("\n[%s] thread[%x,%x] enter : inode (%x,%x) / cycle %d\n", 2616 __FUNCTION__ , process->pid, this->trdid, 2617 GET_CXY( inode_xp ), GET_PTR( inode_xp ), cycle ); 2618 #endif 2619 2620 // set the NUL character in buffer / initialise buffer index 1983 2621 buffer[max_size - 1] = 0; 1984 count = 1; 1985 index = max_size - 2; 2622 index = (int32_t)(max_size - 1); 1986 2623 1987 2624 // initialize current inode 1988 inode_xp = searched_xp; 1989 1990 // exit when root inode found (i.e. dentry_xp == XPTR_NULL) 2625 current_xp = inode_xp; 2626 2627 // build extended pointer on lock protecting Inode Tree 2628 vfs_root_xp = process->vfs_root_xp; 2629 vfs_root_ptr = GET_PTR( vfs_root_xp ); 2630 vfs_root_cxy = GET_CXY( vfs_root_xp ); 2631 lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock ); 2632 2633 // take lock protecting Inode Tree in read mode 2634 remote_rwlock_rd_acquire( lock_xp ); 2635 2636 // traverse Inode Tree from target inode to VFS root 2637 // selecting always the first parent dentry 2638 // the buffer is written in "reverse order" (from target inode to root) 2639 // exit the while loop when the VFS root has been found 1991 2640 do 1992 2641 { 1993 // get inode cluster and local pointer 1994 cxy_t inode_cxy = GET_CXY( inode_xp ); 1995 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 1996 1997 // get extended pointer on parent dentry 1998 dentry_xp = (xptr_t)hal_remote_l64( XPTR( inode_cxy , inode_ptr->parent_xp ) ); 1999 2000 // get dentry cluster and local pointer 2001 cxy_t dentry_cxy = GET_CXY( dentry_xp ); 2002 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp ); 2003 2004 // get dentry name length and pointer 2005 length = hal_remote_l32( XPTR( dentry_cxy , &dentry_ptr->length ) ); 2006 name = (char *)hal_remote_lpt( XPTR( dentry_cxy , &dentry_ptr->name ) ); 2007 2008 // update index and count 2009 index -= (length + 1); 2010 count += (length + 1); 2011 2012 // check buffer overflow 2013 if( count >= max_size ) 2642 // get current inode cluster and local pointer 2643 current_cxy = GET_CXY( current_xp ); 2644 current_ptr = GET_PTR( current_xp ); 2645 2646 // build extended pointer on parents dentries root 2647 parents_root_xp = XPTR( current_cxy , ¤t_ptr->parents ); 2648 2649 // compute exit condition <=> current inode is VFS root 2650 found = xlist_is_empty( parents_root_xp ); 2651 2652 if( found ) // parent is the VFS root 2014 2653 { 2015 printk("\n[ERROR] in %s : kernel buffer too small\n", __FUNCTION__ ); 2016 return EINVAL; 2654 if( index == (int32_t)(max_size - 1) ) 2655 { 2656 // update index 2657 index--; 2658 2659 // set separator 2660 buffer[index] = '/'; 2661 2662 // check buffer overflow 2663 assert( (index >= 0) , "kernel buffer too small\n" ); 2664 2665 } 2017 2666 } 2018 2019 // update pathname 2020 hal_remote_memcpy( XPTR( local_cxy , &buffer[index + 1] ) , 2021 XPTR( dentry_cxy , name ) , length ); 2022 buffer[index] = '/'; 2023 2024 // get extended pointer on next inode 2025 inode_xp = (xptr_t)hal_remote_l64( XPTR( dentry_cxy , dentry_ptr->parent ) ); 2026 } 2027 while( (dentry_xp != XPTR_NULL) ); 2028 2667 else // not the VFS root 2668 { 2669 // get first parent dentry cluster and pointers 2670 dentry_xp = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents ); 2671 dentry_cxy = GET_CXY( dentry_xp ); 2672 dentry_ptr = GET_PTR( dentry_xp ); 2673 2674 // get extended pointer on dentry name and name length 2675 name_xp = XPTR( dentry_cxy , dentry_ptr->name ); 2676 length = hal_remote_l32( XPTR( dentry_cxy , &dentry_ptr->length ) ); 2677 2678 #if (DEBUG_VFS_GET_PATH & 1) 2679 char debug_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2680 hal_remote_strcpy( XPTR( local_cxy , debug_name ) , name_xp ); 2681 if( DEBUG_VFS_GET_PATH < cycle ) 2682 printk("\n[%s] thread(%x,%s) get current dentry <%s> in cluster %x\n", 2683 __FUNCTION__ , process->pid, this->trdid, debug_name, current_cxy ); 2684 #endif 2685 // update index 2686 index -= (length + 1); 2687 2688 // check buffer overflow 2689 assert( (index >= 0) , "kernel buffer too small\n" ); 2690 2691 // update pathname 2692 hal_remote_memcpy( XPTR( local_cxy , &buffer[index + 1] ) , 2693 name_xp , length ); 2694 2695 // set separator 2696 buffer[index] = '/'; 2697 2698 // get extended pointer on parent inode 2699 current_xp = XPTR( dentry_cxy , 2700 hal_remote_lpt( XPTR( dentry_cxy , &dentry_ptr->parent ) ) ); 2701 } 2702 } 2703 while( found == false ); 2704 2705 // release lock protecting Inode Tree in read mode 2706 remote_rwlock_rd_release( lock_xp ); 2707 2708 #if DEBUG_VFS_GET_PATH 2709 cycle = (uint32_t)hal_get_cycles(); 2710 if( DEBUG_VFS_GET_PATH < cycle ) 2711 printk("\n[%s] thread[%x,%x] exit : path <%s> / cycle %d\n", 2712 __FUNCTION__ , process->pid, this->trdid, &buffer[index], cycle ); 2713 #endif 2714 2715 // return pointer on first character in buffer 2716 *first = &buffer[index]; 2029 2717 return 0; 2030 2718 … … 2033 2721 2034 2722 //////////////////////////////////////////////////////////////////// 2035 error_t vfs_add_child_in_parent( cxy_t child_ inode_cxy,2036 vfs_inode_type_t child_ inode_type,2723 error_t vfs_add_child_in_parent( cxy_t child_cxy, 2724 vfs_inode_type_t child_type, 2037 2725 vfs_fs_type_t fs_type, 2038 2726 xptr_t parent_inode_xp, 2039 2727 char * name, 2040 xptr_t * dentry_xp,2728 xptr_t * child_dentry_xp, 2041 2729 xptr_t * child_inode_xp ) 2042 2730 { 2043 error_t error; 2044 xptr_t new_dentry_xp; // extended pointer on created dentry 2045 vfs_dentry_t * new_dentry_ptr; // created dentry local pointer 2046 xptr_t new_inode_xp; // extended pointer on created child inode 2047 cxy_t parent_inode_cxy; // parent inode cluster identifier 2048 vfs_inode_t * parent_inode_ptr; // parent inode local pointer 2049 2050 // get parent inode cluster and local pointer 2051 parent_inode_cxy = GET_CXY( parent_inode_xp ); 2731 error_t error; 2732 cxy_t parent_cxy; // parent inode cluster identifier 2733 vfs_inode_t * parent_inode_ptr; // parent inode local pointer 2734 xptr_t new_dentry_xp; // extended pointer on created dentry 2735 vfs_dentry_t * new_dentry_ptr; // created dentry local pointer 2736 xptr_t new_inode_xp; // extended pointer on created child inode 2737 vfs_inode_t * new_inode_ptr; // local pointer on created child inode 2738 2739 xptr_t parents_root_xp; // extended pointer on child inode "parents" field 2740 xptr_t parents_entry_xp; // extended pointer on child dentry "parents" field 2741 xptr_t children_xhtab_xp; // extended pointer on parent inode "children" field 2742 xptr_t children_entry_xp; // extended pointer on child dentry "children" field 2743 2744 // get parent inode cluster and pointer 2745 parent_cxy = GET_CXY( parent_inode_xp ); 2052 2746 parent_inode_ptr = GET_PTR( parent_inode_xp ); 2053 2747 … … 2058 2752 thread_t * this = CURRENT_THREAD; 2059 2753 if( DEBUG_VFS_ADD_CHILD < cycle ) 2060 printk("\n[%s] thread[%x,%x] enter / child <%s> cxy %x / parent <%s> cxy %x/ cycle %d\n",2061 __FUNCTION__, this->process->pid, this->trdid, name, child_inode_cxy,2062 parent_name, parent_inode_cxy,(uint32_t)hal_get_cycles() );2063 #endif 2064 2065 // 1. create dentry 2066 if( parent_ inode_cxy == local_cxy ) // parent cluster is the local cluster2754 printk("\n[%s] thread[%x,%x] enter / child <%s> / parent <%s> / cycle %d\n", 2755 __FUNCTION__, this->process->pid, this->trdid, name, 2756 parent_name, (uint32_t)hal_get_cycles() ); 2757 #endif 2758 2759 // 1. create dentry in parent cluster 2760 if( parent_cxy == local_cxy ) // parent cluster is local 2067 2761 { 2068 2762 error = vfs_dentry_create( fs_type, 2069 2763 name, 2070 parent_inode_ptr,2071 2764 &new_dentry_xp ); 2072 2765 } 2073 else // parent cluster is remote2074 { 2075 rpc_vfs_dentry_create_client( parent_ inode_cxy,2766 else // parent cluster is remote 2767 { 2768 rpc_vfs_dentry_create_client( parent_cxy, 2076 2769 fs_type, 2077 2770 name, 2078 parent_inode_ptr,2079 2771 &new_dentry_xp, 2080 2772 &error ); … … 2084 2776 { 2085 2777 printk("\n[ERROR] in %s : cannot create dentry <%s> in cluster %x\n", 2086 __FUNCTION__ , name , parent_ inode_cxy );2778 __FUNCTION__ , name , parent_cxy ); 2087 2779 return -1; 2088 2780 } … … 2093 2785 #if(DEBUG_VFS_ADD_CHILD & 1) 2094 2786 if( DEBUG_VFS_ADD_CHILD < cycle ) 2095 printk("\n[%s] thread[%x,%x] / dentry <%s> created in cluster %x\n", 2096 __FUNCTION__, this->process->pid, this->trdid, name, parent_inode_cxy ); 2097 #endif 2098 2099 // 2. create child inode TODO : define attr / mode / uid / gid 2787 printk("\n[%s] thread[%x,%x] / dentry <%s> created (%x,%x)\n", 2788 __FUNCTION__, this->process->pid, this->trdid, name, parent_cxy, new_dentry_ptr ); 2789 #endif 2790 2791 // 2. create child inode in child cluster 2792 // TODO : define attr / mode / uid / gid 2100 2793 uint32_t attr = 0; 2101 2794 uint32_t mode = 0; … … 2103 2796 uint32_t gid = 0; 2104 2797 2105 if( child_inode_cxy == local_cxy ) // child cluster is the local cluster 2106 { 2107 error = vfs_inode_create( new_dentry_xp, 2108 fs_type, 2109 child_inode_type, 2798 if( child_cxy == local_cxy ) // child cluster is local 2799 { 2800 error = vfs_inode_create( fs_type, 2801 child_type, 2110 2802 attr, 2111 2803 mode, … … 2116 2808 else // child cluster is remote 2117 2809 { 2118 rpc_vfs_inode_create_client( child_inode_cxy, 2119 new_dentry_xp, 2810 rpc_vfs_inode_create_client( child_cxy, 2120 2811 fs_type, 2121 child_ inode_type,2812 child_type, 2122 2813 attr, 2123 2814 mode, … … 2131 2822 { 2132 2823 printk("\n[ERROR] in %s : cannot create inode in cluster %x\n", 2133 __FUNCTION__ , child_ inode_cxy );2824 __FUNCTION__ , child_cxy ); 2134 2825 2135 if( parent_ inode_cxy == local_cxy ) vfs_dentry_destroy( new_dentry_ptr );2136 else rpc_vfs_dentry_destroy_client( parent_ inode_cxy , new_dentry_ptr );2826 if( parent_cxy == local_cxy ) vfs_dentry_destroy( new_dentry_ptr ); 2827 else rpc_vfs_dentry_destroy_client( parent_cxy , new_dentry_ptr ); 2137 2828 return -1; 2138 2829 } 2139 2830 2831 // get new inode local pointer 2832 new_inode_ptr = GET_PTR( new_inode_xp ); 2833 2140 2834 #if(DEBUG_VFS_ADD_CHILD & 1) 2141 2835 if( DEBUG_VFS_ADD_CHILD < cycle ) 2142 printk("\n[%s] thread[%x,%x] / inode <%s> created in cluster %x\n", 2143 __FUNCTION__ , this->process->pid, this->trdid, name , child_inode_cxy ); 2144 #endif 2145 2146 // 3. update "child_xp" field in dentry and increment refcounts 2147 hal_remote_s64( XPTR( parent_inode_cxy , &new_dentry_ptr->child_xp ) , new_inode_xp ); 2148 vfs_inode_remote_up( new_inode_xp ); 2149 vfs_dentry_remote_up( new_dentry_xp ); 2836 printk("\n[%s] thread[%x,%x] / inode <%s> created (%x,%x)\n", 2837 __FUNCTION__ , this->process->pid, this->trdid, name , child_cxy, new_inode_ptr ); 2838 #endif 2839 2840 // 3. register new_dentry in new_inode xlist of parents 2841 parents_root_xp = XPTR( child_cxy , &new_inode_ptr->parents ); 2842 parents_entry_xp = XPTR( parent_cxy, &new_dentry_ptr->parents ); 2843 xlist_add_first( parents_root_xp , parents_entry_xp ); 2844 hal_remote_atomic_add( XPTR( child_cxy , &new_inode_ptr->links ) , 1 ); 2845 2846 #if(DEBUG_VFS_ADD_CHILD & 1) 2847 if( local_cxy == 1 ) 2848 // if( DEBUG_VFS_ADD_CHILD < cycle ) 2849 printk("\n[%s] thread[%x,%x] / dentry (%x,%x) registered in child inode (%x,%x)\n", 2850 __FUNCTION__, this->process->pid, this->trdid, 2851 parent_cxy, new_dentry_ptr, child_cxy, new_inode_ptr ); 2852 #endif 2853 2854 // 4. register new_dentry in parent_inode xhtab of children 2855 children_xhtab_xp = XPTR( parent_cxy , &parent_inode_ptr->children ); 2856 children_entry_xp = XPTR( parent_cxy , &new_dentry_ptr->children ); 2857 xhtab_insert( children_xhtab_xp , name , children_entry_xp ); 2858 2859 #if(DEBUG_VFS_ADD_CHILD & 1) 2860 if( DEBUG_VFS_ADD_CHILD < cycle ) 2861 printk("\n[%s] thread[%x,%x] / dentry (%x,%x) registered in parent inode (%x,%x)\n", 2862 __FUNCTION__, this->process->pid, this->trdid, 2863 parent_cxy, new_dentry_ptr, parent_cxy, parent_inode_ptr ); 2864 #endif 2865 2866 // 5. update "parent" and "child_xp" fields in new_dentry 2867 hal_remote_s64( XPTR( parent_cxy , &new_dentry_ptr->child_xp ) , new_inode_xp ); 2868 hal_remote_spt( XPTR( parent_cxy , &new_dentry_ptr->parent ) , parent_inode_ptr ); 2150 2869 2151 2870 #if DEBUG_VFS_ADD_CHILD … … 2157 2876 2158 2877 // return extended pointer on dentry & child inode 2159 * dentry_xp= new_dentry_xp;2160 *child_inode_xp = new_inode_xp;2878 *child_dentry_xp = new_dentry_xp; 2879 *child_inode_xp = new_inode_xp; 2161 2880 return 0; 2162 2881 2163 2882 } // end vfs_add_child_in_parent() 2164 2883 2165 //////////////////////////////////////////////////// 2166 void vfs_remove_child_from_parent( xptr_t inode_xp ) 2167 { 2168 cxy_t inode_cxy; 2169 vfs_inode_t * inode_ptr; 2170 xptr_t dentry_xp; 2171 cxy_t dentry_cxy; 2172 vfs_dentry_t * dentry_ptr; 2884 ///////////////////////////////////////////////////// 2885 void vfs_remove_child_from_parent( xptr_t dentry_xp ) 2886 { 2887 cxy_t parent_cxy; // parent inode cluster identifier 2888 cxy_t child_cxy; // child inode cluster identifier 2889 vfs_dentry_t * dentry_ptr; // local pointer on dentry 2890 xptr_t child_inode_xp; // extended pointer on child inode 2891 vfs_inode_t * child_inode_ptr; // local pointer on child inode 2892 vfs_inode_t * parent_inode_ptr; // local pointer on parent inode 2893 uint32_t links; // number of child inode parents 2894 2895 char dentry_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2173 2896 2174 // get inode cluster and local pointer 2175 inode_cxy = GET_CXY( inode_xp ); 2176 inode_ptr = GET_PTR( inode_xp ); 2177 2178 // get associated dentry cluster and pointers 2179 dentry_xp = hal_remote_l64( XPTR( inode_cxy , &inode_ptr->parent_xp ) ); 2180 dentry_cxy = GET_CXY( dentry_xp ); 2897 // get parent cluster and dentry local pointer 2898 parent_cxy = GET_CXY( dentry_xp ); 2181 2899 dentry_ptr = GET_PTR( dentry_xp ); 2182 2900 2183 // check dentry refcount 2184 assert( ( hal_remote_l32( XPTR( dentry_cxy , &dentry_ptr->refcount ) ) == 1 ), 2185 "dentry refcount must be 1\n" ); 2186 2187 // check inode refcount 2188 assert( ( hal_remote_l32( XPTR( inode_cxy , &inode_ptr->refcount ) ) == 1 ), 2189 "inode refcount must be 1\n" ); 2190 2191 // decrement refcount for inode and dentry 2192 vfs_inode_remote_down( inode_xp ); 2193 vfs_dentry_remote_down( dentry_xp ); 2194 2195 // delete dentry 2196 if( dentry_cxy == local_cxy ) 2901 // get a local copy of dentry name 2902 hal_remote_strcpy( XPTR( local_cxy , dentry_name ), 2903 XPTR( parent_cxy , &dentry_ptr->name ) ); 2904 2905 // get parent_inode local pointer 2906 parent_inode_ptr = hal_remote_lpt( XPTR( parent_cxy , &dentry_ptr->parent ) ); 2907 2908 // get child cluster and child_inode pointers 2909 child_inode_xp = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) ); 2910 child_cxy = GET_CXY( child_inode_xp ); 2911 child_inode_ptr = GET_PTR( child_inode_xp ); 2912 2913 // remove dentry from parent_inode 2914 xhtab_remove( XPTR( parent_cxy , &parent_inode_ptr->children ), 2915 dentry_name, 2916 XPTR( parent_cxy , &dentry_ptr->children ) ); 2917 2918 // remove dentry from child_inode 2919 xlist_unlink( XPTR( parent_cxy , &dentry_ptr->parents ) ); 2920 links = hal_remote_atomic_add( XPTR( child_cxy , &child_inode_ptr->links ) , -1 ); 2921 2922 // delete dentry descriptor 2923 if( parent_cxy == local_cxy ) 2197 2924 { 2198 2925 vfs_dentry_destroy( dentry_ptr ); … … 2200 2927 else 2201 2928 { 2202 rpc_vfs_dentry_destroy_client( dentry_cxy,2929 rpc_vfs_dentry_destroy_client( parent_cxy, 2203 2930 dentry_ptr ); 2204 2931 } 2205 2932 2206 // delete inode 2207 if( inode_cxy == local_cxy ) 2208 { 2209 vfs_inode_destroy( inode_ptr ); 2210 } 2211 else 2212 { 2213 rpc_vfs_inode_destroy_client( inode_cxy, 2214 inode_ptr ); 2933 // delete child_inode descriptor if last link 2934 if( links == 1 ) 2935 { 2936 if( child_cxy == local_cxy ) 2937 { 2938 vfs_inode_destroy( child_inode_ptr ); 2939 } 2940 else 2941 { 2942 rpc_vfs_inode_destroy_client( child_cxy , child_inode_ptr ); 2943 } 2215 2944 } 2216 2945 … … 2374 3103 } // end vfs_fs_child_init() 2375 3104 3105 //////////////////////////////////////////////// 3106 error_t vfs_fs_sync_inode( vfs_inode_t * inode ) 3107 { 3108 error_t error = 0; 3109 3110 // check arguments 3111 assert( (inode != NULL) , "inode pointer is NULL\n"); 3112 3113 // get inode FS type 3114 vfs_fs_type_t fs_type = inode->ctx->type; 3115 3116 // call relevant FS function 3117 if( fs_type == FS_TYPE_FATFS ) 3118 { 3119 error = fatfs_sync_inode( inode ); 3120 } 3121 else if( fs_type == FS_TYPE_RAMFS ) 3122 { 3123 assert( false , "should not be called for RAMFS\n" ); 3124 } 3125 else if( fs_type == FS_TYPE_DEVFS ) 3126 { 3127 assert( false , "should not be called for DEVFS\n" ); 3128 } 3129 else 3130 { 3131 assert( false , "undefined file system type\n" ); 3132 } 3133 3134 return error; 3135 3136 } // end vfs_fs_sync_inode() 3137 3138 //////////////////////////////////////////////// 3139 error_t vfs_fs_sync_fat( vfs_fs_type_t fs_type ) 3140 { 3141 error_t error = 0; 3142 3143 // call relevant FS function 3144 if( fs_type == FS_TYPE_FATFS ) 3145 { 3146 error = fatfs_sync_fat(); 3147 } 3148 else if( fs_type == FS_TYPE_RAMFS ) 3149 { 3150 assert( false , "should not be called for RAMFS\n" ); 3151 } 3152 else if( fs_type == FS_TYPE_DEVFS ) 3153 { 3154 assert( false , "should not be called for DEVFS\n" ); 3155 } 3156 else 3157 { 3158 assert( false , "undefined file system type\n" ); 3159 } 3160 3161 return error; 3162 3163 } // end vfs_fs_sync_fat() 3164 3165 ////////////////////////////////////////////////////// 3166 error_t vfs_fs_sync_free_info( vfs_fs_type_t fs_type ) 3167 { 3168 error_t error = 0; 3169 3170 // call relevant FS function 3171 if( fs_type == FS_TYPE_FATFS ) 3172 { 3173 error = fatfs_sync_free_info(); 3174 } 3175 else if( fs_type == FS_TYPE_RAMFS ) 3176 { 3177 assert( false , "should not be called for RAMFS\n" ); 3178 } 3179 else if( fs_type == FS_TYPE_DEVFS ) 3180 { 3181 assert( false , "should not be called for DEVFS\n" ); 3182 } 3183 else 3184 { 3185 assert( false , "undefined file system type\n" ); 3186 } 3187 3188 return error; 3189 3190 } // end vfs_fs_sync_fat() 3191 2376 3192 ///////////////////////////////////////////////// 2377 3193 error_t vfs_fs_cluster_alloc( uint32_t fs_type, -
trunk/kernel/fs/vfs.h
r602 r610 69 69 *****************************************************************************************/ 70 70 71 #define VFS_LOOKUP_DIR 0x01 /* the searched inode is a directory*/71 #define VFS_LOOKUP_DIR 0x01 /* the searched inode must be a directory */ 72 72 #define VFS_LOOKUP_OPEN 0x02 /* the search is for an open/opendir */ 73 73 #define VFS_LOOKUP_PARENT 0x04 /* return the parent inode (not the inode itself) */ 74 74 #define VFS_LOOKUP_CREATE 0x10 /* file must be created if missing */ 75 #define VFS_LOOKUP_EXCL 0x20 /* file cannot previously exist */ 75 #define VFS_LOOKUP_EXCL 0x20 /* file cannot previously exist */ 76 76 77 77 /****************************************************************************************** … … 117 117 /****************************************************************************************** 118 118 * This structure define a VFS inode. 119 * It contains an extended pointer on the parent dentry, and (for directory only) 120 * an hash table (xhtab) registering all children dentries. 121 * The <parent> inode is unique for a directory (no hard links for directories). 122 * For a file, the parent field points to the first dentry who created this inode. 119 * An inode has several children dentries (if it is a directory), an can have several 120 * parents dentries (if it hass several aliases links): 121 * - The "parents" field is the root of the xlist of parents dentries, and the "links" 122 * fiels define the number of aliases parent dentries. only a FILE inode can have 123 * several parents (no hard links for directories). 124 * - The "children" field is an embedded xhtab containing pointers on all local children 125 * dentries. This set of children is empty for a FILE inode. 123 126 * Synchronisation: 124 * - the main_lock (remote_ busylock) is used during the inode tree traversal,125 * or for inode modification (add/remove children ).126 * - the data_lock (remote_rwlock) is used during read/write accesses to the data127 * stored in the mapper.128 * - the mapper lock (remote rwlock) is only used during the radix tree traversal129 * to return the relevant page for read/write.127 * - the main_lock (remote_rwlock) is used during the inode tree traversal, 128 * or for inode modification (add/remove children in xhtab). 129 * - the size_lock (remote_rwlock) is used during read/write accesses to the size 130 * field in the mapper. 131 * - access to the data stored in the associated mapper use the mapper remote_rwlock 132 * protecting radix tree traversal and modifications. 130 133 *****************************************************************************************/ 131 134 … … 158 161 { 159 162 struct vfs_ctx_s * ctx; /*! local pointer on FS context */ 160 uint32_t gc; /*! generation counter */161 163 uint32_t inum; /*! inode identifier (unique in file system) */ 162 164 uint32_t attr; /*! inode attributes (see above) */ 163 165 vfs_inode_type_t type; /*! inode type (see above) */ 164 166 uint32_t size; /*! number of bytes */ 165 uint32_t links; /*! number of alias dentry */166 167 uint32_t uid; /*! user owner identifier */ 167 168 uint32_t gid; /*! group owner identifier */ 168 169 uint32_t rights; /*! access rights */ 169 uint32_t refcount; /*! reference counter (all pointers)*/170 xptr_t parent_xp; /*! extended pointer on parent dentry*/170 xlist_entry_t parents; /*! root of list of parents dentries */ 171 uint32_t links; /*! number of parent dentries (hard links) */ 171 172 xhtab_t children; /*! embedded xhtab of children dentries */ 172 remote_rwlock_t data_lock; /*! protect read/write to data and to size*/173 remote_ busylock_tmain_lock; /*! protect inode tree traversal and modifs */174 175 xlist_entry_twait_root; /*! root of threads waiting on this inode */173 remote_rwlock_t size_lock; /*! protect read/write to size */ 174 remote_rwlock_t main_lock; /*! protect inode tree traversal and modifs */ 175 // list_entry_t list; /*! member of set of inodes in same cluster */ 176 // list_entry_t wait_root; /*! root of threads waiting on this inode */ 176 177 struct mapper_s * mapper; /*! associated file cache */ 177 178 void * extend; /*! fs_type_specific inode extension */ … … 183 184 #define VFS_ISUID 0x0004000 184 185 #define VFS_ISGID 0x0002000 185 #define VFS_ISVTX 0x0001000186 define VFS_ISVTX 0x0001000 186 187 187 188 #define VFS_IRWXU 0x0000700 … … 203 204 * This structure defines a directory entry. 204 205 * A dentry contains the name of a remote file/dir, an extended pointer on the 205 * inode representing this file/dir, a nd alocal pointer on the inode representing206 * inode representing this file/dir, a local pointer on the inode representing 206 207 * the parent directory. 208 * A dentry can be member of the set of children of a given directory inode (xhtab). 209 * A dentry can be member of the set of parents of a given inode (xlist). 207 210 *****************************************************************************************/ 208 211 … … 212 215 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 213 216 uint32_t length; /*! name length (bytes) */ 214 uint32_t refcount; /*! reference counter (all pointers) */215 217 struct vfs_inode_s * parent; /*! local pointer on parent inode */ 216 218 xptr_t child_xp; /*! extended pointer on child inode */ 217 xlist_entry_t list; /*! member of list of dentries with same key */ 219 xlist_entry_t children; /*! member of set of children dentries */ 220 xlist_entry_t parents; /*! member of set of parent dentries */ 218 221 void * extend; /*! FS specific extension */ 219 222 } … … 301 304 302 305 303 304 306 /****************************************************************************************** 305 307 * These low-level functions access / modify a VFS inode descriptor … … 314 316 * This function allocates memory from local cluster for an inode descriptor and the 315 317 * associated mapper. It initialise these descriptors from arguments values. 316 * The parent dentry must have been previously created.317 318 * If the client thread is not running in the cluster containing this inode, 318 319 * it must use the rpc_vfs_inode_create_client() function. 319 320 ****************************************************************************************** 320 * @ dentry_xp : extended pointer on associated dentry (in parent inode cluster).321 321 * @ fs_type : file system type. 322 322 * @ inode_type : inode type. … … 328 328 * @ return 0 if success / return ENOMEM or EINVAL if error. 329 329 *****************************************************************************************/ 330 error_t vfs_inode_create( xptr_t dentry_xp, 331 vfs_fs_type_t fs_type, 330 error_t vfs_inode_create( vfs_fs_type_t fs_type, 332 331 vfs_inode_type_t inode_type, 333 332 uint32_t attr, … … 340 339 * This function releases memory allocated to an inode descriptor, including 341 340 * all memory allocated to the mapper (both mapper descriptor and radix tree). 342 * The mapper should not contain any dirty page (shold be synchronized before deletion), 343 * and the inode refcount must be zero. 341 * The mapper should not contain any dirty page (should be synchronized before deletion). 344 342 * It must be executed by a thread running in the cluster containing the inode. 345 343 * Use the rpc_vfs_inode_destroy_client() function if required. … … 348 346 *****************************************************************************************/ 349 347 void vfs_inode_destroy( vfs_inode_t * inode ); 350 351 /******************************************************************************************352 * This function atomically increment/decrement the inode refcount.353 * It can be called by any thread running in any cluster.354 *****************************************************************************************/355 void vfs_inode_remote_up( xptr_t inode_xp );356 void vfs_inode_remote_down( xptr_t inode_xp );357 348 358 349 /****************************************************************************************** … … 423 414 * This function allocates memory from local cluster for a dentry descriptor, 424 415 * initialises it from arguments values, and returns the extended pointer on dentry. 425 * The inode field is not initialized, because the inode does not exist yet.426 416 * If the client thread is not running in the target cluster for this inode, 427 417 * it must use the rpc_dentry_create_client() function. … … 429 419 * @ fs_type : file system type. 430 420 * @ name : directory entry file/dir name. 431 * @ parent : local pointer on parent inode.432 421 * @ dentry_xp : [out] buffer for extended pointer on created dentry. 433 422 * @ return 0 if success / return ENOMEM or EINVAL if error. … … 435 424 error_t vfs_dentry_create( vfs_fs_type_t fs_type, 436 425 char * name, 437 vfs_inode_t * parent,438 426 xptr_t * dentry_xp ); 439 427 440 428 /****************************************************************************************** 441 * This function re leases memory allocated to a dentry descriptor.442 * The dentry refcount must be zero.429 * This function removes the dentry from the parent inode xhtab, and releases the memory 430 * allocated to the dentry descriptor. 443 431 * It must be executed by a thread running in the cluster containing the dentry. 444 432 * Use the rpc_vfs_dentry_destroy_client() function if required. … … 447 435 *****************************************************************************************/ 448 436 void vfs_dentry_destroy( vfs_dentry_t * dentry ); 449 450 /******************************************************************************************451 * These functions atomically increment/decrement the dentry refcount.452 * It can be called by any thread running in any cluster.453 *****************************************************************************************/454 void vfs_dentry_remote_up( xptr_t dentry_xp );455 void vfs_dentry_remote_down( xptr_t dentry_xp );456 437 457 438 … … 496 477 497 478 /****************************************************************************************** 498 * These functions access / modify the distributed VFS Inode Tree e499 *****************************************************************************************/ 500 501 /****************************************************************************************** 502 * This function returns in a kernel bufferallocated by the caller function,503 * the pathname of a file/dir identified by an extended pointer on the inode.479 * These functions access / modify the distributed VFS Inode Tree 480 *****************************************************************************************/ 481 482 /****************************************************************************************** 483 * This function returns in a kernel <buffer> allocated by the caller function, 484 * the pathname of a file/dir identified by the <inode_xp> argument. 504 485 * It traverse the Inode Tree from the target node to the root. 505 486 * It can be called by any thread running in any cluster. 506 ****************************************************************************************** 507 * @ inode_xp : pointer on inode descriptor. 508 * @ buffer : kernel buffer for pathname (must be allocated by caller). 509 * @ size : max number of characters in buffer. 487 * As this buffer if filled in "reverse order" (i.e. from the target inode to the root), 488 * the pathname is stored in the higher part of the buffer. 489 * A pointer on the first character of the pathname is returned in <first> buffer. 490 * 491 * WARNING : This function takes & releases the remote_rwlock protecting the Inode Tree. 492 ****************************************************************************************** 493 * @ inode_xp : [in] extended pointer on target inode descriptor. 494 * @ buffer : [in] kernel buffer for pathname (allocated by caller). 495 * @ first : [out] pointer on first character in buffer. 496 * @ max_size : [in] max number of characters in buffer. 510 497 * @ return 0 if success / return EINVAL if buffer too small. 511 498 *****************************************************************************************/ 512 499 error_t vfs_get_path( xptr_t inode_xp, 513 500 char * buffer, 501 char ** first, 514 502 uint32_t max_size ); 515 503 516 504 /****************************************************************************************** 517 * This function takes a pathname (absolute or relative to cwd) and returns an extended 518 * pointer on the associated inode. 505 * This function traverses the the Inode Tree, from inode identified by the <root_xp> 506 * argument, and returns in <inode_xp> the inode identified by the < pathname> argument. 507 * It can be called by a thread running in any cluster. 508 * It supports the following flags that define the lookup modes : 509 * - VFS_LOOKUP_DIR : the searched inode must be a directory 510 * - VFS_LOOKUP_OPEN : the search is for an open/opendir 511 * - VFS_LOOKUP_PARENT : return the parent inode (not the inode itself) 512 * - VFS_LOOKUP_CREATE : file/directory must be created if missing on IOC 513 * - VFS_LOOKUP_EXCL : file cannot previously exist 514 * As the inode Tree is a cache, the search policy is the following : 519 515 * - If a given directory name in the path is not found in the inode tree, it try to load 520 516 * the missing dentry/inode couple, from informations found in the parent directory. 521 * - If this directory entry does not exist on device, it returns an error.522 * - If the the file identified by the pathname does not exist on devicebut the523 * flag CREATE is set, the inode is created. 524 * - If the the file identified by the pathname exist on device but both flags EXCL517 * - If this directory entry does not exist on IOC, it returns an error. 518 * - If the the file identified by the pathname does not exist on IOC but the 519 * flag CREATE is set, the inode is created. It returns an error otherwise. 520 * - If the the file identified by the pathname exist on device, but both flags EXCL 525 521 * and CREATE are set, an error is returned. 526 ****************************************************************************************** 527 * @ cwd_xp : extended pointer on current directory (for relative path). 528 * @ pathname : path in kernel space (can be relative or absolute). 529 * @ lookup_mode : flags defining the working mode (defined above in this file). 522 * - If the PARENT flag is set, it returns in <inode_xp> an extended pointer on the parent 523 * inode, and copies in <last_name> buffer a string containing the last name in path. 524 * 525 * WARNING : The remote_rwlock protecting the Inode Tree must be taken by the caller. 526 * 527 * TODO the access rights are not checked yet. 528 ****************************************************************************************** 529 * @ root_xp : [in] extended pointer on root inode (can be root of a subtree). 530 * @ pathname : [in] path (can be relative or absolute). 531 * @ lookup_mode : [in] flags defining the searching mode. 530 532 * @ inode_xp : [out] buffer for extended pointer on searched inode. 533 * @ last_name : [out] pointer on buffer for last name in path. 531 534 * @ return 0 if success / ENOENT if inode not found , EACCES if permisson denied, 532 * EAGAIN if a new complete lookup must be made 533 *****************************************************************************************/ 534 error_t vfs_lookup( xptr_t cwd_xp, 535 *****************************************************************************************/ 536 error_t vfs_lookup( xptr_t root_xp, 535 537 char * pathname, 536 538 uint32_t lookup_mode, 537 xptr_t * inode_xp ); 539 xptr_t * inode_xp, 540 char * last_name ); 538 541 539 542 /****************************************************************************************** 540 543 * This function creates a new couple dentry/inode, and insert it in the Inode-Tree. 544 * Only the distributed Inode Tree is modified: it does NOT modify the parent mapper, 545 * and does NOT update the FS on IOC device. 541 546 * It can be executed by any thread running in any cluster (can be different from both 542 * the child cluster and the parent cluster), as it uses RPCs if required. 543 * Only the distributed Inode Tree is modified: Even for a new file, this function 544 * does NOT modify the parent mapper, and does NOT update the FS on IOC device. 547 * the child cluster and the parent cluster). 545 548 * 546 549 * [Implementation] … … 563 566 * @ return 0 if success / -1 if dentry or inode cannot be created. 564 567 *****************************************************************************************/ 565 error_t vfs_add_child_in_parent( cxy_t child_inode cxy,566 vfs_inode_type_t chil g_inode_type,568 error_t vfs_add_child_in_parent( cxy_t child_inode_cxy, 569 vfs_inode_type_t child_inode_type, 567 570 vfs_fs_type_t fs_type, 568 571 xptr_t parent_inode_xp, … … 572 575 573 576 /****************************************************************************************** 574 * This function removes a couple dentry/inode from the Inode-Tree. 575 * Both the inode and dentry references counters must be 1. 577 * This function removes a remote dentry from the Inode-Tree. 578 * - It removes the dentry from the parent inode xhtab ("children" field), and from the 579 * child inode xlist ("parents" field). 580 * - It releases the memory allocated to the dentry descriptor. 581 * - If the number of parents of the child inode is one, it also releases the memory 582 * allocated to the child inode. 583 * Only the Inode Tree is modified: it does NOT modify the parent mapper, 584 * and does NOT update the FS on IOC device. 576 585 * It can be executed by any thread running in any cluster (can be different from both 577 * the inode cluster and the dentry cluster) , as it uses RPCs if required.578 ****************************************************************************************** 579 * @ child_xp : extended pointer on removed inode.580 *****************************************************************************************/ 581 void vfs_remove_child_from_parent( xptr_t inode_xp );586 * the inode cluster and the dentry cluster). 587 ****************************************************************************************** 588 * @ dentry_xp : extended pointer on removed dentry. 589 *****************************************************************************************/ 590 void vfs_remove_child_from_parent( xptr_t dentry_xp ); 582 591 583 592 /****************************************************************************************** … … 599 608 *****************************************************************************************/ 600 609 error_t vfs_new_child_init( xptr_t parent_xp, 601 xptr_t dentry_xp,602 xptr_t child_xp );610 xptr_t dentry_xp, 611 xptr_t child_xp ); 603 612 604 613 /****************************************************************************************** … … 661 670 /****************************************************************************************** 662 671 * This function allocates a vfs_file_t structure in the cluster containing the inode 663 * associated to the file identified by the <cwd_xp> & <path> arguments.672 * identified by the <root_xp> & <path> arguments. 664 673 * It initializes it, register it in the reference process fd_array identified by the 665 * <process > argument, and returns both the extended pointer on the file descriptor,666 * and the allocated index in the fd_array.674 * <process_xp> argument, and returns both the extended pointer on the file descriptor, 675 * and the allocated index in the <file_xp> and <file_id> buffers. 667 676 * The pathname can be relative to current directory or absolute. 668 * If the inode does not exist in the inode cache, it try to find the file on the mounted677 * If the inode does not exist in the inode cache, it try to find the file on the IOC 669 678 * device, and creates an inode on a pseudo randomly selected cluster if found. 670 679 * It the requested file does not exist on device, it creates a new inode if the 671 680 * O_CREAT flag is set, and return an error otherwise. 681 * 682 * WARNING : this function takes & releases the remote_rwlock protecting the Inode Tree. 672 683 ****************************************************************************************** 673 * @ process : local pointer on local process descriptor copy.684 * @ root_xp : extended pointer on path root inode. 674 685 * @ path : file pathname (absolute or relative to current directory). 686 * @ process_xp : extended pointer on client reference process. 675 687 * @ flags : defined in vfs_file_t structure. 676 688 * @ mode : access rights (as defined by chmod). … … 679 691 * @ return 0 if success / return non-zero if error. 680 692 *****************************************************************************************/ 681 error_t vfs_open( struct process_s * process,693 error_t vfs_open( xptr_t root_xp, 682 694 char * path, 695 xptr_t process_xp, 683 696 uint32_t flags, 684 697 uint32_t mode, … … 721 734 /****************************************************************************************** 722 735 * This function is called by the kernel to create in the file system a new directory 723 * entry identified by the <cwd_xp> & <path_1>, linked to the node identified by the 724 * <cwd_xp> & <path_2> arguments. It can be any type of node. 725 * If the link is successful, the link count of the target node is incremented. 726 * <path_1> and <path_2> share equal access rights to the underlying object. 736 * identified by the <root_xp> & <path> arguments, with the access permission defined 737 * by the <rights> argument. All nodes in the path - but the last - must exist. 738 * 739 * WARNING : this function takes & releases the remote_rwlock protecting the Inode Tree. 740 ****************************************************************************************** 741 * @ root_xp : extended pointer on path root inode (any inode in Inode Tree). 742 * @ path : pathname (absolute or relative to current directory). 743 * @ rights : access rights. 744 * @ returns 0 if success / -1 if error. 745 *****************************************************************************************/ 746 error_t vfs_mkdir( xptr_t root_xp, 747 char * path, 748 uint32_t rights ); 749 750 /****************************************************************************************** 751 * This function is called by the kernel to create in the file system a new directory 752 * entry identified by the <new_root_xp> & <new_path> arguments, to be linked to an 753 * existing inode, identified by the <old_root_xp> & <old_path> arguments. 754 * If the link is successful, the link count of the target inode is incremented. 755 * The <new_path> and <old_path> share equal access rights to the underlying inode. 727 756 * Both the IOC device and the Inode Tree are modified. 728 ****************************************************************************************** 729 * @ cwd_xp : extended pointer on current working directory file descriptor. 730 * @ path_1 : new pathname (absolute or relative to current directory). 731 * @ path_1 : existing pathname (absolute or relative to current directory). 757 $ 758 * TODO This function should handle any type of node, but the current implementation 759 * handles only the FILE and DIR types. 760 * 761 * WARNING : this function takes & releases the remote_rwlock protecting the Inode Tree. 762 ****************************************************************************************** 763 * @ old_root_xp : extended pointer on old path root inode (any inode in Inode Tree). 764 * @ old_path : old pathname (absolute or relative to current directory). 765 * @ nld_root_xp : extended pointer on new path root inode (any inode in Inode Tree). 766 * @ new_path : new pathname (absolute or relative to current directory). 732 767 * @ returns 0 if success / -1 if error. 733 768 *****************************************************************************************/ 734 error_t vfs_link( xptr_t cwd_xp, 735 char * path_1, 736 char * path_2 ); 769 error_t vfs_link( xptr_t old_root_xp, 770 char * old_path, 771 xptr_t new_root_xp, 772 char * new_path ); 737 773 738 774 /****************************************************************************************** 739 775 * This function is called by the kernel to remove from the file system a directory entry 740 * identified by the < cwd_xp> & <path> arguments. The target node can be any type of node.741 * The link count of the target node is decremented. If the removed link is the last,742 * the targetnode is deleted.776 * identified by the <root_xp> & <path> arguments. 777 * The link count of the target node is decremented. 778 * If the removed link is the last, the target inode is deleted. 743 779 * Both the IOC device and the Inode Tree are modified. 744 ****************************************************************************************** 745 * @ cwd_xp : extended pointer on the current working directory file descriptor. 780 * 781 * TODO This function should handle any type of node, but the current implementation 782 * handles only only the FILE and DIR types. 783 * 784 * WARNING : this function takes & releases the remote_rwlock protecting the Inode Tree. 785 ****************************************************************************************** 786 * @ root_xp : extended pointer on root inode (can be any inode in Inode Tree). 746 787 * @ path : pathname (absolute or relative to current directory). 747 788 * @ returns 0 if success / -1 if error. 748 789 *****************************************************************************************/ 749 error_t vfs_unlink( xptr_t cwd_xp,790 error_t vfs_unlink( xptr_t root_xp, 750 791 char * path ); 751 792 752 793 /****************************************************************************************** 753 * This function returns, in the structure pointed by the <st> pointer, 754 * various informations on the inode identified by the <inode_xp> argument. 755 * TODO : only partially implemented yet... 756 ****************************************************************************************** 757 * @ inode_xp : extended pointer on the remote inode. 794 * This function returns, in the structure pointed by the <st> pointer, various 795 * informations on the inode identified by the <root_inode_xp> and <patname> arguments. 796 * 797 * TODO : only partially implemented yet (only size and inum fields). 798 * 799 * WARNING : this function takes & releases the remote_rwlock protecting the Inode Tree. 800 ****************************************************************************************** 801 * @ root_xp : extended pointer on path root inode (any inode in Inode Tree) 802 * @ pathname : pathname to target inode. 758 803 * @ st : local pointer on the stat structure in kernel space. 759 804 * @ returns 0 if success / -1 if error. 760 805 *****************************************************************************************/ 761 error_t vfs_stat( xptr_t inode_xp, 806 error_t vfs_stat( xptr_t root_xp, 807 char * pathname, 762 808 struct stat * st ); 763 809 … … 775 821 776 822 /****************************************************************************************** 777 * This function creates a new inode and associated dentry for the directory defined 778 * by the <cwd_xp> & <path> arguments. 823 * This function creates a new directory as defined by the <root_xp> & <path> arguments. 779 824 * TODO not implemented yet... 780 825 ****************************************************************************************** 781 * @ cwd_xp : extended pointer on the current working directory file descriptor.782 * @ path : pathname (absolute or relative to current directory).826 * @ root_xp : extended pointer on the path root directory. 827 * @ path : pathname (absolute or relative to CWD). 783 828 * @ mode : access rights (as defined by chmod) 784 829 * @ returns 0 if success / -1 if error. 785 830 *****************************************************************************************/ 786 error_t vfs_mkdir( xptr_t cwd_xp,831 error_t vfs_mkdir( xptr_t root_xp, 787 832 char * path, 788 833 uint32_t mode ); 789 834 790 835 /****************************************************************************************** 791 * This function makes the directory identified by <cwd_xp / path> arguments to become792 * t he working directory for the calling process.836 * This function makes the directory identified by the <root_xp and <path> arguments 837 * to become the working directory for the calling process. 793 838 ****************************************************************************************** 794 * @ cwd_xp : extended pointer on current directory file descriptor (relative path).795 * @ path : file pathname (absolute or relative to current directory).839 * @ root_xp : extended pointer on the path root directory. 840 * @ path : pathname (absolute or relative to CWD). 796 841 * return 0 if success / -1 if error. 797 842 *****************************************************************************************/ 798 error_t vfs_chdir( xptr_t cwd_xp,843 error_t vfs_chdir( xptr_t root_xp, 799 844 char * path ); 800 845 801 846 /****************************************************************************************** 802 * This function change the access rigths for the file identified by the <cwd_xp / path>803 * arguments. The new access rights aredefined by the <mode> argument value.847 * This function change the access rigths for the file/directory identified by the 848 * <root_xp> and <path> arguments as defined by the <mode> argument value. 804 849 ****************************************************************************************** 805 * @ cwd_xp : extended pointer on current directory file descriptor (relative path).806 * @ path : file pathname (absolute or relative to current directory).807 * @ mode : access rights new value.850 * @ root_xp : extended pointer on the path root directory. 851 * @ path : pathname (absolute or relative to CWD). 852 * @ mode : access rights 808 853 * return 0 if success / -1 if error. 809 854 *****************************************************************************************/ 810 error_t vfs_chmod( xptr_t cwd_xp,855 error_t vfs_chmod( xptr_t root_xp, 811 856 char * path, 812 857 uint32_t mode ); … … 816 861 * TODO not implemented yet 817 862 ****************************************************************************************** 818 * @ path : FIFO pathname (absolute or relative to current directory).819 * @ cwd_xp : extended pointer on the current working directory file descriptor.820 * @ mode 821 *****************************************************************************************/ 822 error_t vfs_mkfifo( xptr_t cwd_xp,863 * @ root_xp : extended pointer on the path root directory. 864 * @ path : pathname (absolute or relative to CWD). 865 * @ mode : access rights new value. 866 *****************************************************************************************/ 867 error_t vfs_mkfifo( xptr_t root_xp, 823 868 char * path, 824 869 uint32_t mode ); … … 905 950 906 951 /***************************************************************************************** 907 * This function updates the FS on the IOC device for the FAT itself.908 * It scan all clusters registered in the FAT mapper, and copies to device909 * each page marked as dirty.952 * This function updates the FS defined by the <fs_type> argument on the IOC device 953 * for the FAT itself. It scan all clusters registered in the FAT mapper, and copies 954 * to device each page marked as dirty. 910 955 * 911 956 * Depending on the file system type, it calls the relevant, FS specific function. 912 957 * It can be called by a thread running in any cluster. 913 958 ***************************************************************************************** 959 * @ fs_type : specific file system type. 914 960 * @ return 0 if success / return EIO if failure during device access. 915 961 ****************************************************************************************/ 916 error_t vfs_fs_sync_fat( v oid);962 error_t vfs_fs_sync_fat( vfs_fs_type_t fs_type ); 917 963 918 964 /***************************************************************************************** 919 * This function updates the free clusters info on the IOC device. 965 * This function updates the free clusters info on the IOC device for the FS defined 966 * by the <fs_type> argument. 920 967 * 921 968 * Depending on the file system type, it calls the relevant, FS specific function. 922 969 * It can be called by a thread running in any cluster. 923 970 ***************************************************************************************** 971 * @ fs_type : specific file system type. 924 972 * @ return 0 if success / return EIO if failure during device access. 925 973 ****************************************************************************************/ 926 error_t vfs_fs_sync_free_info( v oid);974 error_t vfs_fs_sync_free_info( vfs_fs_type_t fs_type ); 927 975 928 976 /****************************************************************************************** -
trunk/kernel/kern/do_syscall.c
r583 r610 2 2 * do_syscall.c - architecture independant entry-point for system calls. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 61 61 sys_mutex, // 9 62 62 63 sys_ exit,// 1063 sys_rename, // 10 64 64 sys_munmap, // 11 65 65 sys_open, // 12 … … 104 104 sys_fg, // 48 105 105 sys_is_fg, // 49 106 107 sys_exit, // 50 106 108 }; 107 109 … … 122 124 case SYS_MUTEX : return "MUTEX"; // 9 123 125 124 case SYS_ EXIT: return "EXIT";// 10126 case SYS_RENAME: return "RENAME"; // 10 125 127 case SYS_MUNMAP: return "MUNMAP"; // 11 126 128 case SYS_OPEN: return "OPEN"; // 12 … … 165 167 case SYS_FG: return "FG"; // 48 166 168 case SYS_IS_FG: return "IS_FG"; // 49 169 170 case SYS_EXIT: return "EXIT"; // 50 171 167 172 default: return "undefined"; 168 173 } -
trunk/kernel/kern/kernel_init.c
r601 r610 146 146 147 147 "THREAD_JOIN", // 10 148 " VFS_MAIN",// 11148 "XHTAB_STATE", // 11 149 149 "CHDEV_QUEUE", // 12 150 150 "CHDEV_TXT0", // 13 … … 154 154 "CONDVAR_STATE", // 17 155 155 "SEM_STATE", // 18 156 " XHTAB_STATE", // 19156 "RPOCESS_CWD", // 19 157 157 158 158 "unused_20", // 20 … … 171 171 172 172 "MAPPER_STATE", // 30 173 " PROCESS_CWD",// 31174 "VFS_ INODE",// 32175 "V FS_FILE",// 33176 "VMM_ VSL", // 34177 "V MM_GPT",// 35173 "VFS_SIZE", // 31 174 "VFS_FILE", // 32 175 "VMM_VSL", // 33 176 "VMM_GPT", // 34 177 "VFS_MAIN", // 35 178 178 }; 179 179 … … 970 970 #if DEBUG_KERNEL_INIT 971 971 if( (core_lid == 0) & (local_cxy == 0) ) 972 printk("\n[%s] : exit barrier 0 : TXT0 initialized / sr %x /cycle %d\n",973 __FUNCTION__, (uint32_t)hal_get_ sr(), (uint32_t)hal_get_cycles() );972 printk("\n[%s] : exit barrier 0 : TXT0 initialized / cycle %d\n", 973 __FUNCTION__, (uint32_t)hal_get_cycles() ); 974 974 #endif 975 975 … … 1012 1012 #if DEBUG_KERNEL_INIT 1013 1013 if( (core_lid == 0) & (local_cxy == 0) ) 1014 printk("\n[%s] : exit barrier 1 : clusters initialised / sr %x /cycle %d\n",1015 __FUNCTION__, (uint32_t)hal_get_ sr(), (uint32_t)hal_get_cycles() );1014 printk("\n[%s] : exit barrier 1 : clusters initialised / cycle %d\n", 1015 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1016 1016 #endif 1017 1017 … … 1039 1039 #if DEBUG_KERNEL_INIT 1040 1040 if( (core_lid == 0) & (local_cxy == 0) ) 1041 printk("\n[%s] : exit barrier 2 : PIC initialised / sr %x /cycle %d\n",1042 __FUNCTION__, (uint32_t)hal_get_ sr(), (uint32_t)hal_get_cycles() );1041 printk("\n[%s] : exit barrier 2 : PIC initialised / cycle %d\n", 1042 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1043 1043 #endif 1044 1044 … … 1072 1072 #if DEBUG_KERNEL_INIT 1073 1073 if( (core_lid == 0) & (local_cxy == 0) ) 1074 printk("\n[%s] : exit barrier 3 : all chdevs initialised / sr %x /cycle %d\n",1075 __FUNCTION__, (uint32_t)hal_get_ sr(), (uint32_t)hal_get_cycles() );1074 printk("\n[%s] : exit barrier 3 : all chdevs initialised / cycle %d\n", 1075 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1076 1076 #endif 1077 1077 … … 1136 1136 1137 1137 // 4. create VFS root inode in cluster 0 1138 error = vfs_inode_create( XPTR_NULL, // dentry_xp 1139 FS_TYPE_FATFS, // fs_type 1138 error = vfs_inode_create( FS_TYPE_FATFS, // fs_type 1140 1139 INODE_TYPE_DIR, // inode_type 1141 1140 0, // attr … … 1174 1173 // register VFS root inode in process_zero descriptor of cluster 0 1175 1174 process_zero.vfs_root_xp = vfs_root_inode_xp; 1176 process_zero. vfs_cwd_xp= vfs_root_inode_xp;1175 process_zero.cwd_xp = vfs_root_inode_xp; 1177 1176 } 1178 1177 … … 1185 1184 #if DEBUG_KERNEL_INIT 1186 1185 if( (core_lid == 0) & (local_cxy == 0) ) 1187 printk("\n[%s] : exit barrier 4 : VFS root initialized in cluster 0 / sr %x / cycle %d\n", 1188 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1186 printk("\n[%s] : exit barrier 4 : VFS root (%x,%x) in cluster 0 / cycle %d\n", 1187 __FUNCTION__, GET_CXY(process_zero.vfs_root_xp), 1188 GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() ); 1189 1189 #endif 1190 1190 … … 1243 1243 // update local process_zero descriptor 1244 1244 process_zero.vfs_root_xp = vfs_root_inode_xp; 1245 process_zero. vfs_cwd_xp= vfs_root_inode_xp;1245 process_zero.cwd_xp = vfs_root_inode_xp; 1246 1246 } 1247 1247 … … 1254 1254 #if DEBUG_KERNEL_INIT 1255 1255 if( (core_lid == 0) & (local_cxy == 1) ) 1256 printk("\n[%s] : exit barrier 5 : VFS root initialized in cluster 1 / sr %x / cycle %d\n", 1257 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1256 printk("\n[%s] : exit barrier 4 : VFS root (%x,%x) in cluster 1 / cycle %d\n", 1257 __FUNCTION__, GET_CXY(process_zero.vfs_root_xp), 1258 GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() ); 1258 1259 #endif 1259 1260 … … 1303 1304 #if DEBUG_KERNEL_INIT 1304 1305 if( (core_lid == 0) & (local_cxy == 0) ) 1305 printk("\n[%s] : exit barrier 6 : DEVFS root initialized in cluster 0 / sr %x /cycle %d\n",1306 __FUNCTION__, (uint32_t)hal_get_ sr(), (uint32_t)hal_get_cycles() );1306 printk("\n[%s] : exit barrier 6 : DEVFS root initialized in cluster 0 / cycle %d\n", 1307 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1307 1308 #endif 1308 1309 … … 1340 1341 #if DEBUG_KERNEL_INIT 1341 1342 if( (core_lid == 0) & (local_cxy == 0) ) 1342 printk("\n[%s] : exit barrier 7 : DEV initialized in cluster 0 / sr %x /cycle %d\n",1343 __FUNCTION__, (uint32_t)hal_get_ sr(), (uint32_t)hal_get_cycles() );1343 printk("\n[%s] : exit barrier 7 : DEV initialized in cluster 0 / cycle %d\n", 1344 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1344 1345 #endif 1345 1346 … … 1366 1367 #if DEBUG_KERNEL_INIT 1367 1368 if( (core_lid == 0) & (local_cxy == 0) ) 1368 printk("\n[%s] : exit barrier 8 : process init created / sr %x /cycle %d\n",1369 __FUNCTION__, (uint32_t)hal_get_ sr(), (uint32_t)hal_get_cycles() );1369 printk("\n[%s] : exit barrier 8 : process init created / cycle %d\n", 1370 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1370 1371 #endif 1371 1372 … … 1436 1437 dev_pic_enable_timer( CONFIG_SCHED_TICK_MS_PERIOD ); 1437 1438 1439 ///////////////////////////////////////////////////////////////////////////////// 1440 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 1441 (info->x_size * info->y_size) ); 1442 barrier_wait( &local_barrier , info->cores_nr ); 1443 ///////////////////////////////////////////////////////////////////////////////// 1444 1438 1445 #if DEBUG_KERNEL_INIT 1439 printk("\n[%s] : thread %x on core[%x,%d] jumps to thread_idle_func() / cycle %d\n", 1440 __FUNCTION__ , CURRENT_THREAD , local_cxy , core_lid , (uint32_t)hal_get_cycles() ); 1446 thread_t * this = CURRENT_THREAD; 1447 printk("\n[%s] : thread[%x,%x] on core[%x,%d] jumps to thread_idle_func() / cycle %d\n", 1448 __FUNCTION__ , this->process->pid, this->trdid, 1449 local_cxy, core_lid, (uint32_t)hal_get_cycles() ); 1441 1450 #endif 1442 1451 1443 1452 // each core jump to thread_idle_func 1444 1453 thread_idle_func(); 1445 } 1446 1454 1455 } // end kernel_init() 1456 -
trunk/kernel/kern/process.c
r593 r610 95 95 xptr_t parent_xp ) 96 96 { 97 xptr_t process_xp; 97 98 cxy_t parent_cxy; 98 99 process_t * parent_ptr; … … 113 114 pid_t parent_pid; 114 115 116 // build extended pointer on this reference process 117 process_xp = XPTR( local_cxy , process ); 118 115 119 // get parent process cluster and local pointer 116 120 parent_cxy = GET_CXY( parent_xp ); … … 121 125 122 126 #if DEBUG_PROCESS_REFERENCE_INIT 127 thread_t * this = CURRENT_THREAD; 123 128 uint32_t cycle = (uint32_t)hal_get_cycles(); 124 if( DEBUG_PROCESS_REFERENCE_INIT )125 printk("\n[%s] thread %x in process %xenter to initalialize process %x / cycle %d\n",126 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid , pid, cycle );127 #endif 128 129 // initialize PID, REF_XP, PARENT_XP, and STATE129 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 130 printk("\n[%s] thread[%x,%x] enter to initalialize process %x / cycle %d\n", 131 __FUNCTION__, parent_pid, this->trdid, pid, cycle ); 132 #endif 133 134 // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields 130 135 process->pid = pid; 131 136 process->ref_xp = XPTR( local_cxy , process ); … … 134 139 process->term_state = 0; 135 140 141 // initialize VFS root inode and CWD inode 142 process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) ); 143 process->cwd_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) ); 144 136 145 // initialize vmm as empty 137 146 error = vmm_init( process ); … … 141 150 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 142 151 cycle = (uint32_t)hal_get_cycles(); 143 if( DEBUG_PROCESS_REFERENCE_INIT )144 printk("\n[%s] thread %x in process %x/ vmm empty for process %x / cycle %d\n",145 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, cycle );152 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 153 printk("\n[%s] thread[%x,%x] / vmm empty for process %x / cycle %d\n", 154 __FUNCTION__, parent_pid, this->trdid, pid, cycle ); 146 155 #endif 147 156 … … 161 170 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 162 171 cycle = (uint32_t)hal_get_cycles(); 163 if( DEBUG_PROCESS_REFERENCE_INIT )164 printk("\n[%s] thread %x in process %x/ process %x attached to TXT%d / cycle %d\n",165 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, txt_id, cycle );172 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 173 printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 174 __FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle ); 166 175 #endif 167 176 // build path to TXT_RX[i] and TXT_TX[i] chdevs … … 170 179 171 180 // create stdin pseudo file 172 error = vfs_open( process,181 error = vfs_open( process->vfs_root_xp, 173 182 rx_path, 183 process_xp, 174 184 O_RDONLY, 175 185 0, // FIXME chmod … … 182 192 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 183 193 cycle = (uint32_t)hal_get_cycles(); 184 if( DEBUG_PROCESS_REFERENCE_INIT )185 printk("\n[%s] thread %x in process %x/ stdin open for process %x / cycle %d\n",186 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, cycle );194 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 195 printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 196 __FUNCTION__, parent_pid, this->trdid, pid, cycle ); 187 197 #endif 188 198 189 199 // create stdout pseudo file 190 error = vfs_open( process,200 error = vfs_open( process->vfs_root_xp, 191 201 tx_path, 202 process_xp, 192 203 O_WRONLY, 193 204 0, // FIXME chmod … … 200 211 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 201 212 cycle = (uint32_t)hal_get_cycles(); 202 if( DEBUG_PROCESS_REFERENCE_INIT )203 printk("\n[%s] thread %x in process %x/ stdout open for process %x / cycle %d\n",204 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, cycle );213 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 214 printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 215 __FUNCTION__, parent_pid, this->trdid, pid, cycle ); 205 216 #endif 206 217 207 218 // create stderr pseudo file 208 error = vfs_open( process,219 error = vfs_open( process->vfs_root_xp, 209 220 tx_path, 221 process_xp, 210 222 O_WRONLY, 211 223 0, // FIXME chmod … … 218 230 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 219 231 cycle = (uint32_t)hal_get_cycles(); 220 if( DEBUG_PROCESS_REFERENCE_INIT )221 printk("\n[%s] thread %x in process %x/ stderr open for process %x / cycle %d\n",222 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, cycle );232 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 233 printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 234 __FUNCTION__, parent_pid, this->trdid, pid, cycle ); 223 235 #endif 224 236 … … 247 259 } 248 260 249 // initialize specific inodes root and cwd 250 process->vfs_root_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy, 251 &parent_ptr->vfs_root_xp ) ); 252 process->vfs_cwd_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy, 253 &parent_ptr->vfs_cwd_xp ) ); 254 vfs_inode_remote_up( process->vfs_root_xp ); 255 vfs_inode_remote_up( process->vfs_cwd_xp ); 256 257 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD ); 261 // initialize lock protecting CWD changes 262 remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD ); 258 263 259 264 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 260 265 cycle = (uint32_t)hal_get_cycles(); 261 if( DEBUG_PROCESS_REFERENCE_INIT )262 printk("\n[%s] thread %x in process %x/ set fd_array for process %x / cycle %d\n",263 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid , cycle );266 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 267 printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 268 __FUNCTION__, parent_pid, this->trdid, pid , cycle ); 264 269 #endif 265 270 … … 300 305 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 301 306 cycle = (uint32_t)hal_get_cycles(); 302 if( DEBUG_PROCESS_REFERENCE_INIT )303 printk("\n[%s] thread %x in process %xexit for process %x / cycle %d\n",304 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, cycle );307 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 308 printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 309 __FUNCTION__, parent_pid, this->trdid, pid, cycle ); 305 310 #endif 306 311 … … 325 330 326 331 #if DEBUG_PROCESS_COPY_INIT 327 thread_t * this = CURRE T_THREAD;332 thread_t * this = CURRENT_THREAD; 328 333 uint32_t cycle = (uint32_t)hal_get_cycles(); 329 if( DEBUG_PROCESS_COPY_INIT )330 printk("\n[%s] thread %x in process %xenter for process %x / cycle %d\n",331 __FUNCTION__, this-> trdid, this->process->pid, local_process->pid, cycle );334 if( DEBUG_PROCESS_COPY_INIT < cycle ) 335 printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", 336 __FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle ); 332 337 #endif 333 338 … … 342 347 process_fd_init( local_process ); 343 348 344 // reset vfs_root_xp / vfs_bin_xp / vfs_cwd_xp fields349 // reset vfs_root_xp / vfs_bin_xp / cwd_xp fields 345 350 local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) ); 346 351 local_process->vfs_bin_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) ); 347 local_process-> vfs_cwd_xp= XPTR_NULL;352 local_process->cwd_xp = XPTR_NULL; 348 353 349 354 // reset children list root (not used in a process descriptor copy) … … 382 387 #if DEBUG_PROCESS_COPY_INIT 383 388 cycle = (uint32_t)hal_get_cycles(); 384 if( DEBUG_PROCESS_COPY_INIT )385 printk("\n[%s] thread %x in process %xexit for process %x / cycle %d\n",386 __FUNCTION__, this-> trdid, this->process->pid, local_process->pid, cycle );389 if( DEBUG_PROCESS_COPY_INIT < cycle ) 390 printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 391 __FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle ); 387 392 #endif 388 393 … … 406 411 407 412 #if DEBUG_PROCESS_DESTROY 413 thread_t * this = CURRENT_THREAD; 408 414 uint32_t cycle = (uint32_t)hal_get_cycles(); 409 if( DEBUG_PROCESS_DESTROY )410 printk("\n[%s] thread %x in process %xenter for process %x in cluster %x / cycle %d\n",411 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, pid, local_cxy, cycle );415 if( DEBUG_PROCESS_DESTROY < cycle ) 416 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 417 __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle ); 412 418 #endif 413 419 … … 446 452 if( process->vfs_bin_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_bin_xp ); 447 453 if( process->vfs_root_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_root_xp ); 448 if( process-> vfs_cwd_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_cwd_xp );454 if( process->cwd_xp != XPTR_NULL ) vfs_file_count_down( process->cwd_xp ); 449 455 450 456 // Destroy VMM … … 456 462 #if DEBUG_PROCESS_DESTROY 457 463 cycle = (uint32_t)hal_get_cycles(); 458 if( DEBUG_PROCESS_DESTROY )459 printk("\n[%s] thread %x in process %xexit / process %x in cluster %x / cycle %d\n",460 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, pid, local_cxy, cycle );464 if( DEBUG_PROCESS_DESTROY < cycle ) 465 printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n", 466 __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle ); 461 467 #endif 462 468 … … 561 567 process_ptr = GET_PTR( process_xp ); 562 568 563 // printk("\n@@@ in %s : process_cxy %x / process_ptr %x / pid %x\n",564 // __FUNCTION__, process_cxy, process_ptr, hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ) );565 566 569 if( process_cxy == local_cxy ) // process copy is local 567 570 { … … 652 655 assert( (LPID_FROM_PID( process->pid ) != 0 ), "target process must be an user process" ); 653 656 654 // get target process cluster657 // get target process owner cluster 655 658 owner_cxy = CXY_FROM_PID( process->pid ); 656 659 … … 697 700 while( 1 ) 698 701 { 699 // exit when all scheduler ackno ledges received702 // exit when all scheduler acknowledges received 700 703 if ( ack_count == 0 ) break; 701 704 … … 927 930 uint32_t fd; 928 931 932 // initialize lock 929 933 remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY ); 930 934 935 // initialize number of open files 931 936 process->fd_array.current = 0; 932 937 … … 937 942 } 938 943 } 939 ///////////////////////////////////////////////// 940 error_t process_fd_register( process_t * process,944 //////////////////////////////////////////////////// 945 error_t process_fd_register( xptr_t process_xp, 941 946 xptr_t file_xp, 942 947 uint32_t * fdid ) … … 944 949 bool_t found; 945 950 uint32_t id; 946 uint32_t count;947 951 xptr_t xp; 948 952 949 953 // get reference process cluster and local pointer 950 xptr_t ref_xp = process->ref_xp; 951 process_t * ref_ptr = GET_PTR( ref_xp ); 952 cxy_t ref_cxy = GET_CXY( ref_xp ); 954 process_t * process_ptr = GET_PTR( process_xp ); 955 cxy_t process_cxy = GET_CXY( process_xp ); 956 957 // check client process is reference process 958 assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ), 959 "client process must be reference process\n" ); 960 961 #if DEBUG_PROCESS_FD_REGISTER 962 thread_t * this = CURRENT_THREAD; 963 uint32_t cycle = (uint32_t)hal_get_cycles(); 964 pid_t pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) ); 965 if( DEBUG_PROCESS_FD_REGISTER < cycle ) 966 printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", 967 __FUNCTION__, this->process->pid, this->trdid, pid, cycle ); 968 #endif 969 970 // build extended pointer on lock protecting reference fd_array 971 xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock ); 953 972 954 973 // take lock protecting reference fd_array 955 remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->fd_array.lock ));974 remote_queuelock_acquire( lock_xp ); 956 975 957 976 found = false; … … 959 978 for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ ) 960 979 { 961 xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) );980 xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) ); 962 981 if ( xp == XPTR_NULL ) 963 982 { 964 983 // update reference fd_array 965 hal_remote_s64( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp ); 966 count = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) ) + 1; 967 hal_remote_s32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , count ); 968 969 // update local fd_array copy if required 970 if( ref_cxy != local_cxy ) 971 { 972 process->fd_array.array[id] = file_xp; 973 process->fd_array.current = count; 974 } 984 hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp ); 985 hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 ); 975 986 976 987 // exit … … 981 992 } 982 993 983 // release lock protecting reference fd_array 984 remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) ); 994 // release lock protecting fd_array 995 remote_queuelock_release( lock_xp ); 996 997 #if DEBUG_PROCESS_FD_REGISTER 998 cycle = (uint32_t)hal_get_cycles(); 999 if( DEBUG_PROCESS_FD_REGISTER < cycle ) 1000 printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n", 1001 __FUNCTION__, this->process->pid, this->trdid, pid, id, cycle ); 1002 #endif 985 1003 986 1004 if ( !found ) return -1; 987 1005 else return 0; 988 } 1006 1007 } // end process_fd_register() 989 1008 990 1009 //////////////////////////////////////////////// … … 1119 1138 // returns trdid 1120 1139 *trdid = TRDID( local_cxy , ltid ); 1121 1122 // if( LPID_FROM_PID( process->pid ) == 0 )1123 // printk("\n@@@ %s : allocate ltid %d for a thread %s in cluster %x\n",1124 // __FUNCTION__, ltid, thread_type_str( thread->type), local_cxy );1125 1126 1140 } 1127 1141 … … 1158 1172 process->th_tbl[ltid] = NULL; 1159 1173 process->th_nr = count-1; 1160 1161 // if( LPID_FROM_PID( process->pid ) == 0 )1162 // printk("\n@@@ %s : release ltid %d for a thread %s in cluster %x\n",1163 // __FUNCTION__, ltid, thread_type_str( thread->type), local_cxy );1164 1174 1165 1175 // release lock protecting th_tbl … … 1363 1373 process_t * process; // local pointer on this process 1364 1374 pid_t pid; // this process identifier 1375 xptr_t ref_xp; // reference process for this process 1365 1376 error_t error; // value returned by called functions 1366 1377 char * path; // path to .elf file … … 1370 1381 char ** args_pointers; // array of pointers on main thread arguments 1371 1382 1372 // get thread, process & PID1383 // get thread, process, pid and ref_xp 1373 1384 thread = CURRENT_THREAD; 1374 1385 process = thread->process; 1375 1386 pid = process->pid; 1387 ref_xp = process->ref_xp; 1376 1388 1377 1389 // get relevant infos from exec_info … … 1390 1402 file_xp = XPTR_NULL; 1391 1403 file_id = 0xFFFFFFFF; 1392 error = vfs_open( process ,1404 error = vfs_open( process->vfs_root_xp, 1393 1405 path, 1406 ref_xp, 1394 1407 O_RDONLY, 1395 1408 0, … … 1543 1556 #endif 1544 1557 1545 } // end process_zero_ init()1558 } // end process_zero_create() 1546 1559 1547 1560 //////////////////////////////// … … 1558 1571 1559 1572 #if DEBUG_PROCESS_INIT_CREATE 1573 thread_t * this = CURRENT_THREAD; 1560 1574 uint32_t cycle = (uint32_t)hal_get_cycles(); 1561 1575 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1562 printk("\n[%s] thread %x in process %xenter / cycle %d\n",1563 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cycle );1576 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 1577 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1564 1578 #endif 1565 1579 … … 1571 1585 "no memory for process descriptor in cluster %x\n", local_cxy ); 1572 1586 1587 // set the CWD and VFS_ROOT fields in process descriptor 1588 process->cwd_xp = process_zero.vfs_root_xp; 1589 process->vfs_root_xp = process_zero.vfs_root_xp; 1590 1573 1591 // get PID from local cluster 1574 1592 error = cluster_pid_alloc( process , &pid ); … … 1589 1607 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1590 1608 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1591 printk("\n[%s] thread %x in process %xinitialized process descriptor\n",1592 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );1609 printk("\n[%s] thread[%x,%x] initialized process descriptor\n", 1610 __FUNCTION__, this->process->pid, this->trdid ); 1593 1611 #endif 1594 1612 … … 1596 1614 file_xp = XPTR_NULL; 1597 1615 file_id = -1; 1598 error = vfs_open( process ,1616 error = vfs_open( process->vfs_root_xp, 1599 1617 CONFIG_PROCESS_INIT_PATH, 1618 XPTR( local_cxy , process ), 1600 1619 O_RDONLY, 1601 1620 0, … … 1608 1627 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1609 1628 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1610 printk("\n[%s] thread %x in process %xopen .elf file decriptor\n",1611 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );1629 printk("\n[%s] thread[%x,%x] open .elf file decriptor\n", 1630 __FUNCTION__, this->process->pid, this->trdid ); 1612 1631 #endif 1613 1632 … … 1621 1640 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1622 1641 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1623 printk("\n[%s] thread %x in process %xregistered code/data vsegs in VMM\n",1624 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );1642 printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n", 1643 __FUNCTION__, this->process->pid, this->trdid ); 1625 1644 #endif 1626 1645 … … 1641 1660 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1642 1661 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1643 printk("\n[%s] thread %x in process %xregistered init process in parent\n",1644 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );1662 printk("\n[%s] thread[%x,%x] registered init process in parent\n", 1663 __FUNCTION__, this->process->pid, this->trdid ); 1645 1664 #endif 1646 1665 … … 1668 1687 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1669 1688 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1670 printk("\n[%s] thread %x in process %xcreated main thread\n",1671 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );1689 printk("\n[%s] thread[%x,%x] created main thread\n", 1690 __FUNCTION__, this->process->pid, this->trdid ); 1672 1691 #endif 1673 1692 … … 1680 1699 cycle = (uint32_t)hal_get_cycles(); 1681 1700 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1682 printk("\n[%s] thread %x in process %xexit / cycle %d\n",1683 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cycle );1701 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 1702 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1684 1703 #endif 1685 1704 … … 1865 1884 1866 1885 #if DEBUG_PROCESS_TXT 1886 thread_t * this = CURRENT_THREAD; 1867 1887 uint32_t cycle = (uint32_t)hal_get_cycles(); 1868 1888 if( DEBUG_PROCESS_TXT < cycle ) 1869 printk("\n[%s] thread %x in process %x attached process %x to TXT %d / cycle %d\n", 1870 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1871 process->pid, txt_id , cycle ); 1889 printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n", 1890 __FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle ); 1872 1891 #endif 1873 1892 … … 1919 1938 1920 1939 #if DEBUG_PROCESS_TXT 1940 thread_t * this = CURRENT_THREAD; 1921 1941 uint32_t cycle = (uint32_t)hal_get_cycles(); 1922 1942 uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) ); 1923 1943 if( DEBUG_PROCESS_TXT < cycle ) 1924 printk("\n[%s] thread %x in process %x detached process %x from TXT %d / cycle %d\n", 1925 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1926 process_pid, txt_id, cycle ); 1944 printk("\n[%s] thread[%x,%x] detached process %x from TXT %d / cycle %d\n", 1945 __FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle ); 1927 1946 #endif 1928 1947 … … 1961 1980 1962 1981 #if DEBUG_PROCESS_TXT 1982 thread_t * this = CURRENT_THREAD; 1963 1983 uint32_t cycle = (uint32_t)hal_get_cycles(); 1964 1984 uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) ); 1965 1985 if( DEBUG_PROCESS_TXT < cycle ) 1966 printk("\n[%s] thread %x in process %xgive TXT %d to process %x / cycle %d\n",1967 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, txt_id, process_pid, cycle );1986 printk("\n[%s] thread[%x,%x] give TXT %d to process %x / cycle %d\n", 1987 __FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle ); 1968 1988 #endif 1969 1989 … … 1990 2010 1991 2011 #if DEBUG_PROCESS_TXT 1992 uint32_t cycle; 2012 thread_t * this = CURRENT_THREAD; 2013 uint32_t cycle; 1993 2014 #endif 1994 2015 … … 2042 2063 2043 2064 #if DEBUG_PROCESS_TXT 2044 cycle 2065 cycle = (uint32_t)hal_get_cycles(); 2045 2066 uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , ¤t_ptr->pid ) ); 2046 2067 if( DEBUG_PROCESS_TXT < cycle ) 2047 printk("\n[%s] thread %x in process %xrelease TXT %d to KSH %x / cycle %d\n",2048 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, txt_id, ksh_pid, cycle );2068 printk("\n[%s] thread[%x,%x] release TXT %d to KSH %x / cycle %d\n", 2069 __FUNCTION__, this->process->pid, this->trdid, txt_id, ksh_pid, cycle ); 2049 2070 process_txt_display( txt_id ); 2050 2071 #endif … … 2079 2100 2080 2101 #if DEBUG_PROCESS_TXT 2081 cycle 2102 cycle = (uint32_t)hal_get_cycles(); 2082 2103 uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , ¤t_ptr->pid ) ); 2083 2104 if( DEBUG_PROCESS_TXT < cycle ) 2084 printk("\n[%s] thread %x in process %xrelease TXT %d to process %x / cycle %d\n",2085 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, txt_id, new_pid, cycle );2105 printk("\n[%s] thread[%x,%x] release TXT %d to process %x / cycle %d\n", 2106 __FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle ); 2086 2107 process_txt_display( txt_id ); 2087 2108 #endif … … 2099 2120 cycle = (uint32_t)hal_get_cycles(); 2100 2121 if( DEBUG_PROCESS_TXT < cycle ) 2101 printk("\n[%s] thread %x in process %xrelease TXT %d to nobody / cycle %d\n",2102 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, txt_id, cycle );2122 printk("\n[%s] thread[%x,%x] release TXT %d to nobody / cycle %d\n", 2123 __FUNCTION__, this->process->pid, this->trdid, txt_id, cycle ); 2103 2124 process_txt_display( txt_id ); 2104 2125 #endif … … 2113 2134 if( DEBUG_PROCESS_TXT < cycle ) 2114 2135 printk("\n[%s] thread %x in process %d does nothing (not TXT owner) / cycle %d\n", 2115 __FUNCTION__, CURRENT_THREAD->trdid, process_pid, cycle );2136 __FUNCTION__, this->trdid, process_pid, cycle ); 2116 2137 process_txt_display( txt_id ); 2117 2138 #endif -
trunk/kernel/kern/process.h
r601 r610 125 125 fd_array_t fd_array; /*! embedded open file descriptors array */ 126 126 127 xptr_t vfs_root_xp; /*! extended pointer on current VFS root inode*/127 xptr_t vfs_root_xp; /*! extended pointer on VFS root inode */ 128 128 xptr_t vfs_bin_xp; /*! extended pointer on .elf file descriptor */ 129 129 pid_t pid; /*! process identifier */ … … 132 132 xptr_t parent_xp; /*! extended pointer on parent process */ 133 133 134 xptr_t vfs_cwd_xp;/*! extended pointer on current working dir inode */135 remote_ rwlock_tcwd_lock; /*! lock protecting working directory changes */134 xptr_t cwd_xp; /*! extended pointer on current working dir inode */ 135 remote_busylock_t cwd_lock; /*! lock protecting working directory changes */ 136 136 137 137 xlist_entry_t children_root; /*! root of the children process xlist */ … … 338 338 * It scan the list of local thread, and sets the THREAD_BLOCKED_GLOBAL bit for all threads. 339 339 * It request the relevant schedulers to acknowledge the blocking, using IPI if required, 340 * and returns only when all threads in cluster, but the calling thread, are actually blocked. 340 * when the target thread is running on another core than the calling thread. 341 * It returns only when all threads in cluster, including the caller are actually blocked. 341 342 * The threads are not detached from the scheduler, and not detached from the local process. 342 343 ********************************************************************************************* … … 425 426 426 427 /********************************************************************************************* 427 * This function allocates a free slot in the fd_array of the reference process, 428 * register the <file_xp> argument in the allocated slot, and return the slot index. 428 * This function allocates a free slot in the fd_array of the reference process 429 * identified by the <process_xp> argument, register the <file_xp> argument in the 430 * allocated slot, and return the slot index in the <fdid> buffer. 429 431 * It can be called by any thread in any cluster, because it uses portable remote access 430 432 * primitives to access the reference process descriptor. 431 433 * It takes the lock protecting the reference fd_array against concurrent accesses. 432 434 ********************************************************************************************* 433 * @ file_xp : extended pointer on the file descriptor to be registered. 434 * @ fdid : [out] buffer for fd_array slot index. 435 * @ process_xp : [in] extended pointer on client reference process. 436 * @ file_xp : [in] extended pointer on the file descriptor to be registered. 437 * @ fdid : [out] buffer for fd_array slot index. 435 438 * @ return 0 if success / return EMFILE if array full. 436 439 ********************************************************************************************/ 437 error_t process_fd_register( process_t * process,440 error_t process_fd_register( xptr_t process_xp, 438 441 xptr_t file_xp, 439 442 uint32_t * fdid ); … … 447 450 * TODO this function is not implemented yet. 448 451 ********************************************************************************************* 449 * @ process : pointer on the local process descriptor.450 * @ fdid : file descriptor index in the fd_array.452 * @ process : [in] pointer on the local process descriptor. 453 * @ fdid : [in] file descriptor index in the fd_array. 451 454 ********************************************************************************************/ 452 455 void process_fd_remove( process_t * process, -
trunk/kernel/kern/rpc.c
r601 r610 75 75 &rpc_kcm_alloc_server, // 22 76 76 &rpc_kcm_free_server, // 23 77 &rpc_ mapper_move_user_server, // 2477 &rpc_undefined, // 24 unused slot 78 78 &rpc_mapper_handle_miss_server, // 25 79 79 &rpc_undefined, // 26 unused slot … … 111 111 "KCM_ALLOC", // 22 112 112 "KCM_FREE", // 23 113 " MAPPER_MOVE_USER",// 24113 "undefined", // 24 114 114 "MAPPER_HANDLE_MISS", // 25 115 115 "undefined", // 26 … … 302 302 uint32_t cycle = (uint32_t)hal_get_cycles(); 303 303 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 304 printk("\n[ DBG] %s :RPC thread %x on core[%d] takes RPC_FIFO ownership / cycle %d\n",304 printk("\n[%s] RPC thread %x on core[%d] takes RPC_FIFO ownership / cycle %d\n", 305 305 __FUNCTION__, server_ptr->trdid, server_core_lid, cycle ); 306 306 #endif … … 318 318 desc_ptr = GET_PTR( desc_xp ); 319 319 320 index = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->index ) ); 321 blocking = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->blocking ) ); 320 index = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->index ) ); 321 blocking = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->blocking ) ); 322 client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); 322 323 323 324 #if DEBUG_RPC_SERVER_GENERIC … … 325 326 uint32_t items = remote_fifo_items( XPTR( local_cxy , rpc_fifo ) ); 326 327 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 327 printk("\n[ DBG] %s :RPC thread %x got rpc %s / client_cxy %x / items %d / cycle %d\n",328 printk("\n[%s] RPC thread %x got rpc %s / client_cxy %x / items %d / cycle %d\n", 328 329 __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, items, cycle ); 329 330 #endif 331 // register client thread in RPC thread descriptor 332 server_ptr->rpc_client_xp = XPTR( desc_cxy , client_ptr ); 333 330 334 // call the relevant server function 331 335 rpc_server[index]( desc_xp ); … … 334 338 cycle = (uint32_t)hal_get_cycles(); 335 339 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 336 printk("\n[ DBG] %s :RPC thread %x completes rpc %s / client_cxy %x / cycle %d\n",340 printk("\n[%s] RPC thread %x completes rpc %s / client_cxy %x / cycle %d\n", 337 341 __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, cycle ); 338 342 #endif … … 355 359 cycle = (uint32_t)hal_get_cycles(); 356 360 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 357 printk("\n[ DBG] %s :RPC thread %x unblocked client thread %x / cycle %d\n",361 printk("\n[%s] RPC thread %x unblocked client thread %x / cycle %d\n", 358 362 __FUNCTION__, server_ptr->trdid, client_ptr->trdid, cycle ); 359 363 #endif … … 372 376 uint32_t cycle = (uint32_t)hal_get_cycles(); 373 377 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 374 printk("\n[ DBG] %s :RPC thread %x suicides / cycle %d\n",378 printk("\n[%s] RPC thread %x suicides / cycle %d\n", 375 379 __FUNCTION__, server_ptr->trdid, cycle ); 376 380 #endif … … 391 395 uint32_t cycle = (uint32_t)hal_get_cycles(); 392 396 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 393 printk("\n[ DBG] %s :RPC thread %x block IDLE & deschedules / cycle %d\n",397 printk("\n[%s] RPC thread %x block IDLE & deschedules / cycle %d\n", 394 398 __FUNCTION__, server_ptr->trdid, cycle ); 395 399 #endif … … 870 874 uint32_t action = rpc->args[0]; 871 875 pid_t pid = rpc->args[1]; 876 thread_t * this = CURRENT_THREAD; 872 877 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 873 printk("\n[ DBG] %s :enter to request %s of process %x in cluster %x / cycle %d\n",874 __FUNCTION__ , process_action_str( action ) , pid , cxy, cycle );878 printk("\n[%s] thread[%x,%x] enter to request %s of process %x in cluster %x / cycle %d\n", 879 __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle ); 875 880 #endif 876 881 … … 885 890 cycle = (uint32_t)hal_get_cycles(); 886 891 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 887 printk("\n[ DBG] %s : exit after requesting to %sprocess %x in cluster %x / cycle %d\n",888 __FUNCTION__ , process_action_str( action ) , pid , cxy, cycle );892 printk("\n[%s] thread[%x,%x] requested %s of process %x in cluster %x / cycle %d\n", 893 __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle ); 889 894 #endif 890 895 … … 915 920 #if DEBUG_RPC_PROCESS_SIGACTION 916 921 uint32_t cycle = (uint32_t)hal_get_cycles(); 922 thread_t * this = CURRENT_THREAD; 917 923 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 918 printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n", 919 __FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle ); 924 printk("\n[%s] thread[%x,%x] enter to %s process %x in cluster %x / cycle %d\n", 925 __FUNCTION__, this->process->pid, this->trdid, 926 process_action_str( action ), pid, local_cxy, cycle ); 920 927 #endif 921 928 … … 954 961 cycle = (uint32_t)hal_get_cycles(); 955 962 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 956 printk("\n[DBG] %s : exit after %s process %x in cluster %x / cycle %d\n", 957 __FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle ); 963 printk("\n[%s] thread[%x,%x] exit after %s process %x in cluster %x / cycle %d\n", 964 __FUNCTION__, this->process->pid, this->trdid, 965 process_action_str( action ), pid, local_cxy, cycle ); 958 966 #endif 959 967 … … 966 974 ///////////////////////////////////////////////////// 967 975 void rpc_vfs_inode_create_client( cxy_t cxy, 968 xptr_t dentry_xp, // in969 976 uint32_t fs_type, // in 970 977 uint32_t inode_type, // in … … 993 1000 994 1001 // set input arguments in RPC descriptor 995 rpc.args[0] = (uint64_t)dentry_xp; 996 rpc.args[1] = (uint64_t)fs_type; 997 rpc.args[2] = (uint64_t)inode_type; 998 rpc.args[3] = (uint64_t)attr; 999 rpc.args[4] = (uint64_t)rights; 1000 rpc.args[5] = (uint64_t)uid; 1001 rpc.args[6] = (uint64_t)gid; 1002 rpc.args[0] = (uint64_t)fs_type; 1003 rpc.args[1] = (uint64_t)inode_type; 1004 rpc.args[2] = (uint64_t)attr; 1005 rpc.args[3] = (uint64_t)rights; 1006 rpc.args[4] = (uint64_t)uid; 1007 rpc.args[5] = (uint64_t)gid; 1002 1008 1003 1009 // register RPC request in remote RPC fifo … … 1005 1011 1006 1012 // get output values from RPC descriptor 1007 *inode_xp = (xptr_t)rpc.args[ 7];1008 *error = (error_t)rpc.args[ 8];1013 *inode_xp = (xptr_t)rpc.args[6]; 1014 *error = (error_t)rpc.args[7]; 1009 1015 1010 1016 #if DEBUG_RPC_VFS_INODE_CREATE … … 1027 1033 #endif 1028 1034 1029 xptr_t dentry_xp;1030 1035 uint32_t fs_type; 1031 1036 uint32_t inode_type; … … 1042 1047 1043 1048 // get input arguments from client rpc descriptor 1044 dentry_xp = (xptr_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1045 fs_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1046 inode_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1047 attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 1048 rights = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 1049 uid = (uid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) ); 1050 gid = (gid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[6] ) ); 1049 fs_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1050 inode_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1051 attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1052 rights = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 1053 uid = (uid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 1054 gid = (gid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) ); 1051 1055 1052 1056 // call local kernel function 1053 error = vfs_inode_create( dentry_xp, 1054 fs_type, 1057 error = vfs_inode_create( fs_type, 1055 1058 inode_type, 1056 1059 attr, … … 1061 1064 1062 1065 // set output arguments 1063 hal_remote_s64( XPTR( client_cxy , &desc->args[ 7] ) , (uint64_t)inode_xp );1064 hal_remote_s64( XPTR( client_cxy , &desc->args[ 8] ) , (uint64_t)error );1066 hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)inode_xp ); 1067 hal_remote_s64( XPTR( client_cxy , &desc->args[7] ) , (uint64_t)error ); 1065 1068 1066 1069 #if DEBUG_RPC_VFS_INODE_CREATE … … 1149 1152 uint32_t type, // in 1150 1153 char * name, // in 1151 struct vfs_inode_s * parent, // in1152 1154 xptr_t * dentry_xp, // out 1153 1155 error_t * error ) // out … … 1172 1174 rpc.args[0] = (uint64_t)type; 1173 1175 rpc.args[1] = (uint64_t)(intptr_t)name; 1174 rpc.args[2] = (uint64_t)(intptr_t)parent;1175 1176 1176 1177 // register RPC request in remote RPC fifo … … 1178 1179 1179 1180 // get output values from RPC descriptor 1180 *dentry_xp = (xptr_t)rpc.args[ 3];1181 *error = (error_t)rpc.args[ 4];1181 *dentry_xp = (xptr_t)rpc.args[2]; 1182 *error = (error_t)rpc.args[3]; 1182 1183 1183 1184 #if DEBUG_RPC_VFS_DENTRY_CREATE … … 1202 1203 uint32_t type; 1203 1204 char * name; 1204 vfs_inode_t * parent;1205 1205 xptr_t dentry_xp; 1206 1206 error_t error; … … 1212 1212 1213 1213 // get arguments "name", "type", and "parent" from client RPC descriptor 1214 type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1215 name = (char *)(intptr_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1216 parent = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1214 type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1215 name = (char *)(intptr_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1217 1216 1218 1217 // makes a local copy of name … … 1223 1222 error = vfs_dentry_create( type, 1224 1223 name_copy, 1225 parent,1226 1224 &dentry_xp ); 1227 1225 // set output arguments 1228 hal_remote_s64( XPTR( client_cxy , &desc->args[ 3] ) , (uint64_t)dentry_xp );1229 hal_remote_s64( XPTR( client_cxy , &desc->args[ 4] ) , (uint64_t)error );1226 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)dentry_xp ); 1227 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1230 1228 1231 1229 #if DEBUG_RPC_VFS_DENTRY_CREATE … … 2112 2110 2113 2111 ///////////////////////////////////////////////////////////////////////////////////////// 2114 // [24] Marshaling functions attached to RPC_MAPPER_MOVE_USER 2115 ///////////////////////////////////////////////////////////////////////////////////////// 2116 2117 ///////////////////////////////////////////////// 2118 void rpc_mapper_move_user_client( cxy_t cxy, 2119 mapper_t * mapper, // in 2120 bool_t to_buffer, // in 2121 uint32_t file_offset, // in 2122 void * buffer, // in 2123 uint32_t size, // in 2124 error_t * error ) // out 2125 { 2126 #if DEBUG_RPC_MAPPER_MOVE_USER 2127 uint32_t cycle = (uint32_t)hal_get_cycles(); 2128 if( cycle > DEBUG_RPC_MAPPER_MOVE_USER ) 2129 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2130 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2131 #endif 2132 2133 assert( (cxy != local_cxy) , "target cluster is not remote\n"); 2134 2135 // initialise RPC descriptor header 2136 rpc_desc_t rpc; 2137 rpc.index = RPC_MAPPER_MOVE_USER; 2138 rpc.blocking = true; 2139 rpc.responses = 1; 2140 2141 // set input arguments in RPC descriptor 2142 rpc.args[0] = (uint64_t)(intptr_t)mapper; 2143 rpc.args[1] = (uint64_t)to_buffer; 2144 rpc.args[2] = (uint64_t)file_offset; 2145 rpc.args[3] = (uint64_t)(intptr_t)buffer; 2146 rpc.args[4] = (uint64_t)size; 2147 2148 // register RPC request in remote RPC fifo 2149 rpc_send( cxy , &rpc ); 2150 2151 // get output values from RPC descriptor 2152 *error = (error_t)rpc.args[5]; 2153 2154 #if DEBUG_RPC_MAPPER_MOVE_USER 2155 cycle = (uint32_t)hal_get_cycles(); 2156 if( cycle > DEBUG_RPC_MAPPER_MOVE_USER ) 2157 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2158 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2159 #endif 2160 } 2161 2162 ///////////////////////////////////////////// 2163 void rpc_mapper_move_user_server( xptr_t xp ) 2164 { 2165 #if DEBUG_RPC_MAPPER_MOVE_USER 2166 uint32_t cycle = (uint32_t)hal_get_cycles(); 2167 if( cycle > DEBUG_RPC_MAPPER_MOVE_USER ) 2168 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2169 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2170 #endif 2171 2172 mapper_t * mapper; 2173 bool_t to_buffer; 2174 uint32_t file_offset; 2175 void * buffer; 2176 uint32_t size; 2177 error_t error; 2178 2179 // get client cluster identifier and pointer on RPC descriptor 2180 cxy_t client_cxy = GET_CXY( xp ); 2181 rpc_desc_t * desc = GET_PTR( xp ); 2182 2183 // get arguments from client RPC descriptor 2184 mapper = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2185 to_buffer = hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2186 file_offset = hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 2187 buffer = (void *)(intptr_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 2188 size = hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 2189 2190 // call local kernel function 2191 error = mapper_move_user( mapper, 2192 to_buffer, 2193 file_offset, 2194 buffer, 2195 size ); 2196 2197 // set output argument to client RPC descriptor 2198 hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error ); 2199 2200 #if DEBUG_RPC_MAPPER_MOVE_USER 2201 cycle = (uint32_t)hal_get_cycles(); 2202 if( cycle > DEBUG_RPC_MAPPER_MOVE_USER ) 2203 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2204 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2205 #endif 2206 } 2112 // [24] undefined slot 2113 ///////////////////////////////////////////////////////////////////////////////////////// 2207 2114 2208 2115 ///////////////////////////////////////////////////////////////////////////////////////// … … 2280 2187 2281 2188 // set output argument to client RPC descriptor 2282 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t) error);2283 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t) page_xp);2189 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)page_xp ); 2190 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 2284 2191 2285 2192 #if DEBUG_RPC_MAPPER_HANDLE_MISS -
trunk/kernel/kern/rpc.h
r601 r610 77 77 RPC_VFS_FILE_DESTROY = 15, 78 78 RPC_VFS_FS_CHILD_INIT = 16, 79 RPC_VFS_FS_ REMOVE_DENTRY= 17,80 RPC_VFS_FS_ ADD_DENTRY= 18,79 RPC_VFS_FS_ADD_DENTRY = 17, 80 RPC_VFS_FS_REMOVE_DENTRY = 18, 81 81 RPC_VFS_INODE_LOAD_ALL_PAGES = 19, 82 82 … … 85 85 RPC_KCM_ALLOC = 22, 86 86 RPC_KCM_FREE = 23, 87 RPC_ MAPPER_MOVE_USER= 24,87 RPC_UNDEFINED_24 = 24, 88 88 RPC_MAPPER_HANDLE_MISS = 25, 89 89 RPC_UNDEFINED_26 = 26, … … 307 307 *********************************************************************************** 308 308 * @ cxy : server cluster identifier. 309 * @ dentry_xp : [in] extended pointer on parent dentry.310 309 * @ fs_type : [in] file system type. 311 310 * @ inode_type : [in] file system type. … … 318 317 **********************************************************************************/ 319 318 void rpc_vfs_inode_create_client( cxy_t cxy, 320 xptr_t dentry_xp,321 319 uint32_t fs_type, 322 320 uint32_t inode_type, … … 349 347 * @ type : [in] file system type. 350 348 * @ name : [in] directory entry name. 351 * @ parent : [in] local pointer on parent inode.352 349 * @ dentry_xp : [out] buffer for extended pointer on created dentry. 353 350 * @ error : [out] error status (0 if success). … … 356 353 uint32_t type, 357 354 char * name, 358 struct vfs_inode_s * parent,359 355 xptr_t * dentry_xp, 360 356 error_t * error ); … … 546 542 547 543 /*********************************************************************************** 548 * [24] The RPC_MAPPER_MOVE_USER allows a client thread to require a remote 549 * mapper to move data to/from a distributed user buffer. 550 * It is used by the vfs_move_user() function to move data between a mapper 551 * and an user buffer required by a sys_read() or a sys_write(). 552 *********************************************************************************** 553 * @ cxy : server cluster identifier. 554 * @ mapper : [in] local pointer on mapper. 555 * @ to_buffer : [in] move data from mapper to buffer if non zero. 556 * @ file_offset : [in] first byte to move in mapper. 557 * @ buffer : [in] user space buffer pointer. 558 * @ size : [in] number of bytes to move. 559 * @ error : [out] error status (0 if success). 560 **********************************************************************************/ 561 void rpc_mapper_move_user_client( cxy_t cxy, 562 struct mapper_s * mapper, 563 bool_t to_buffer, 564 uint32_t file_offset, 565 void * buffer, 566 uint32_t size, 567 error_t * error ); 568 569 void rpc_mapper_move_user_server( xptr_t xp ); 544 * [24] undefined slot 545 **********************************************************************************/ 570 546 571 547 /*********************************************************************************** -
trunk/kernel/kern/scheduler.c
r593 r610 254 254 uint32_t cycle = (uint32_t)hal_get_cycles(); 255 255 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 256 printk("\n[ DBG] %s :thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",256 printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n", 257 257 __FUNCTION__ , process->pid , thread->trdid , local_cxy , thread->core->lid , cycle ); 258 258 #endif … … 266 266 cycle = (uint32_t)hal_get_cycles(); 267 267 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 268 printk("\n[ DBG] %s :process %x in cluster %x deleted / cycle %d\n",268 printk("\n[%s] process %x in cluster %x deleted / cycle %d\n", 269 269 __FUNCTION__ , process->pid , local_cxy , cycle ); 270 270 #endif … … 336 336 uint32_t cycle = (uint32_t)hal_get_cycles(); 337 337 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 338 printk("\n[ DBG] %s :thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",338 printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n", 339 339 __FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle ); 340 340 #endif … … 396 396 uint32_t cycle = (uint32_t)hal_get_cycles(); 397 397 if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 398 printk("\n[ DBG] %s :new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n",398 printk("\n[%s] new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n", 399 399 __FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle ); 400 400 #endif … … 409 409 uint32_t cycle = (uint32_t)hal_get_cycles(); 410 410 if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 411 printk("\n[ DBG] %s :idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n",411 printk("\n[%s] idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n", 412 412 __FUNCTION__, thread->trdid, local_cxy, lid, cycle ); 413 413 #endif … … 540 540 #if DEBUG_SCHED_YIELD 541 541 if( sched->trace ) 542 printk("\n[ DBG] %s :core[%x,%d] / cause = %s\n"542 printk("\n[%s] core[%x,%d] / cause = %s\n" 543 543 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 544 544 __FUNCTION__, local_cxy, lid, cause, … … 557 557 #if (DEBUG_SCHED_YIELD & 1) 558 558 if( sched->trace ) 559 printk("\n[ DBG] %s :core[%x,%d] / cause = %s\n"559 printk("\n[%s] core[%x,%d] / cause = %s\n" 560 560 " thread %x (%s) (%x,%x) continue / cycle %d\n", 561 561 __FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type), … … 687 687 uint32_t blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) ); 688 688 uint32_t flags = hal_remote_l32 ( XPTR( cxy , &thread->flags ) ); 689 process_t * process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );689 process_t * process = hal_remote_lpt ( XPTR( cxy , &thread->process ) ); 690 690 pid_t pid = hal_remote_l32 ( XPTR( cxy , &process->pid ) ); 691 691 … … 695 695 char name[16]; 696 696 chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) ); 697 hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , &chdev->name ) );697 hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , chdev->name ) ); 698 698 699 699 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n", … … 721 721 uint32_t blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) ); 722 722 uint32_t flags = hal_remote_l32 ( XPTR( cxy , &thread->flags ) ); 723 process_t * process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );723 process_t * process = hal_remote_lpt ( XPTR( cxy , &thread->process ) ); 724 724 pid_t pid = hal_remote_l32 ( XPTR( cxy , &process->pid ) ); 725 725 -
trunk/kernel/kern/thread.h
r583 r610 194 194 dma_command_t dma_cmd; /*! DMA device generic command */ 195 195 196 cxy_t rpc_client_cxy; /*! client cluster index (for a RPC thread)*/196 xptr_t rpc_client_xp; /*! client thread (for a RPC thread only) */ 197 197 198 198 list_entry_t wait_list; /*! member of a local waiting queue */ -
trunk/kernel/kernel_config.h
r607 r610 37 37 #define DEBUG_BARRIER 0 38 38 39 #define DEBUG_BUSYLOCK 139 #define DEBUG_BUSYLOCK 0 40 40 #define DEBUG_BUSYLOCK_THREAD_XP 0x0000000000ULL // selected thread xptr 41 41 … … 81 81 #define DEBUG_FATFS_MOVE_PAGE 0 82 82 #define DEBUG_FATFS_RELEASE_INODE 0 83 #define DEBUG_FATFS_REMOVE_DENTRY 183 #define DEBUG_FATFS_REMOVE_DENTRY 0 84 84 #define DEBUG_FATFS_SYNC_FAT 0 85 85 #define DEBUG_FATFS_SYNC_FSINFO 0 … … 90 90 #define DEBUG_HAL_GPT_CREATE 0 91 91 #define DEBUG_HAL_GPT_DESTROY 0 92 92 #define DEBUG_HAL_USPACE 0 93 93 #define DEBUG_HAL_KENTRY 0 94 94 #define DEBUG_HAL_EXCEPTIONS 0 … … 102 102 #define DEBUG_KMEM 0 103 103 104 #define DEBUG_KERNEL_INIT 0105 106 #define DEBUG_MAPPER_GET_PAGE 1104 #define DEBUG_KERNEL_INIT 2 105 106 #define DEBUG_MAPPER_GET_PAGE 0 107 107 #define DEBUG_MAPPER_HANDLE_MISS 0 108 108 #define DEBUG_MAPPER_MOVE_USER 0 … … 116 116 #define DEBUG_PROCESS_COPY_INIT 0 117 117 #define DEBUG_PROCESS_DESTROY 0 118 #define DEBUG_PROCESS_FD_REGISTER 0 118 119 #define DEBUG_PROCESS_GET_LOCAL_COPY 0 119 120 #define DEBUG_PROCESS_INIT_CREATE 0 … … 125 126 #define DEBUG_PROCESS_ZERO_CREATE 0 126 127 127 #define DEBUG_QUEUELOCK 0128 #define DEBUG_QUEUELOCK_TYPE 0 // lock type (0 is undefined) 128 129 129 130 #define DEBUG_RPC_CLIENT_GENERIC 0 … … 132 133 #define DEBUG_RPC_KCM_ALLOC 0 133 134 #define DEBUG_RPC_KCM_FREE 0 134 #define DEBUG_RPC_MAPPER_ rGT_PAGE0135 #define DEBUG_RPC_MAPPER_HANDLE_MISS 0 135 136 #define DEBUG_RPC_MAPPER_MOVE_USER 0 136 137 #define DEBUG_RPC_PMEM_GET_PAGES 0 … … 150 151 #define DEBUG_RPC_VMM_GET_VSEG 0 151 152 152 #define DEBUG_RWLOCK 0153 #define DEBUG_RWLOCK_TYPE 0 // lock type (0 is undefined) 153 154 154 155 #define DEBUG_SCHED_HANDLE_SIGNALS 2 … … 169 170 #define DEBUG_SYS_FORK 0 170 171 #define DEBUG_SYS_GET_CONFIG 0 172 #define DEBUG_SYS_GETCWD 0 171 173 #define DEBUG_SYS_GETPID 0 172 174 #define DEBUG_SYS_ISATTY 0 … … 174 176 #define DEBUG_SYS_KILL 0 175 177 #define DEBUG_SYS_OPEN 0 178 #define DEBUG_SYS_MKDIR 2 176 179 #define DEBUG_SYS_MMAP 0 177 180 #define DEBUG_SYS_MUNMAP 0 … … 205 208 #define DEBUG_VFS_ADD_CHILD 0 206 209 #define DEBUG_VFS_CLOSE 0 210 #define DEBUG_VFS_CHDIR 0 207 211 #define DEBUG_VFS_DENTRY_CREATE 0 212 #define DEBUG_VFS_FILE_CREATE 0 213 #define DEBUG_VFS_GET_PATH 0 208 214 #define DEBUG_VFS_INODE_CREATE 0 209 215 #define DEBUG_VFS_INODE_LOAD_ALL 0 210 #define DEBUG_VFS_LOOKUP 0 216 #define DEBUG_VFS_LINK 0 217 #define DEBUG_VFS_LOOKUP 1 211 218 #define DEBUG_VFS_LSEEK 0 219 #define DEBUG_VFS_MKDIR 1 212 220 #define DEBUG_VFS_NEW_CHILD_INIT 0 213 221 #define DEBUG_VFS_OPEN 0 214 222 #define DEBUG_VFS_STAT 0 215 #define DEBUG_VFS_UNLINK 1223 #define DEBUG_VFS_UNLINK 0 216 224 217 225 #define DEBUG_VMM_CREATE_VSEG 0 … … 247 255 248 256 #define LOCK_THREAD_JOIN 10 // remote (B) protect join/exit between two threads 249 #define LOCK_ VFS_MAIN 11 // remote (B) protect vfs traversal (one per inode)257 #define LOCK_XHTAB_STATE 11 // remote (B) protect a distributed xhtab state 250 258 #define LOCK_CHDEV_QUEUE 12 // remote (B) protect chdev threads waiting queue 251 259 #define LOCK_CHDEV_TXT0 13 // remote (B) protect access to kernel terminal TXT0 … … 255 263 #define LOCK_CONDVAR_STATE 17 // remote (B) protect user condvar state 256 264 #define LOCK_SEM_STATE 18 // remote (B) protect user semaphore state 257 #define LOCK_ XHTAB_STATE 19 // remote (B) protect a distributed xhatb state265 #define LOCK_PROCESS_CWD 19 // remote (B) protect current working directory in process 258 266 259 267 #define BUSYLOCK_TYPE_MAX 20 … … 272 280 273 281 #define LOCK_MAPPER_STATE 30 // remote (RW) protect mapper state 274 #define LOCK_ PROCESS_CWD 31 // remote (RW) protect current working directory in process275 #define LOCK_VFS_ INODE 32 // remote (RW) protect inode state and associated mapper276 #define LOCK_V FS_FILE 33 // remote (RW) protect file descriptor state277 #define LOCK_VMM_ VSL 34 // remote (RW) protect VSL (local list of vsegs)278 #define LOCK_V MM_GPT 35 // remote (RW) protect GPT (local page table)282 #define LOCK_VFS_SIZE 31 // remote (RW) protect inode state and associated mapper 283 #define LOCK_VFS_FILE 32 // remote (RW) protect file descriptor state 284 #define LOCK_VMM_VSL 33 // remote (RW) protect VSL (local list of vsegs) 285 #define LOCK_VMM_GPT 34 // remote (RW) protect GPT (local page table) 286 #define LOCK_VFS_MAIN 35 // remote (RW) protect vfs traversal (in root inode) 279 287 280 288 -
trunk/kernel/libk/grdxt.c
r603 r610 133 133 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 134 134 135 printk("\n***** Generic Radix Tree for <%s> : %d / %d / %d\n", 136 name, 1<<w1 , 1<<w2 , 1<<w3 ); 135 printk("\n***** Generic Radix Tree for <%s>\n", name ); 137 136 138 137 for( ix1=0 ; ix1 < (uint32_t)(1<<w1) ; ix1++ ) … … 327 326 328 327 // get ptr1 329 void 328 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 330 329 331 330 // get ptr2 332 void 331 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 333 332 if( ptr2 == NULL ) return XPTR_NULL; 334 333 335 334 // get ptr3 336 void 335 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 337 336 if( ptr3 == NULL ) return XPTR_NULL; 338 337 339 // get value 340 xptr_t value = XPTR( rt_cxy , ptr3[ix3] ); 341 342 return value; 338 // get pointer on registered item 339 void * item_ptr = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) ); 340 341 // return extended pointer on registered item 342 if ( item_ptr == NULL ) return XPTR_NULL; 343 else return XPTR( rt_cxy , item_ptr ); 343 344 344 345 } // end grdxt_remote_lookup() -
trunk/kernel/libk/grdxt.h
r603 r610 40 40 * - Lookup can be done by a thread running in any cluster (local or remote). 41 41 ****************************************************************************************** 42 * It is used by the mapper implementing the file cache:42 * When it is used by the mapper implementing the file cache: 43 43 * - the key is the page index in file. 44 44 * - the registered value is a local pointer on the page descriptor. -
trunk/kernel/libk/htab.c
r563 r610 34 34 /////////////////////////////////////////////////////////////////////////////////////////// 35 35 // Item type specific (static) functions (two functions for each item type). 36 // Example below if for <vhs_inode_t>, where the identifier is the inum field. 37 /////////////////////////////////////////////////////////////////////////////////////////// 38 39 /////////////////////////////////////////////////////////////////////////////////////////// 40 // These static functions compute the hash index from the key. 36 // Example below if for <bloup_t> type, where the identifier is an uint32_t field. 37 /////////////////////////////////////////////////////////////////////////////////////////// 38 39 typedef struct bloup_s 40 { 41 uint32_t key; 42 list_entry_t list; 43 } 44 bloup_t; 45 46 /////////////////////////////////////////////////////////////////////////////////////////// 47 // This static function computes the hash index from the key. 41 48 /////////////////////////////////////////////////////////////////////////////////////////// 42 49 // @ key : local pointer on key. 43 50 // @ return the index value, from 0 to (HASHTAB_SIZE - 1) 44 51 /////////////////////////////////////////////////////////////////////////////////////////// 45 static uint32_t htab_inode_index( void * key ) 46 { 47 uint32_t * inum = key; 48 return (((*inum) >> 16) ^ ((*inum) & 0xFFFF)) % HASHTAB_SIZE; 52 static uint32_t htab_bloup_index( void * key ) 53 { 54 return (*(uint32_t *)key) % HASHTAB_SIZE; 49 55 } 50 56 51 57 /////////////////////////////////////////////////////////////////////////////////////// 52 // Th ese static functions areused by htab_lookup(), htab_insert(), and htab_remove().58 // This static function is used by htab_lookup(), htab_insert(), and htab_remove(). 53 59 // They scan one sub-list identified by <index> to find an item identified by <key>. 54 60 // The sub-list is not modified, but the lock must have been taken by the caller. … … 59 65 // @ return pointer on item if found / return NULL if not found. 60 66 /////////////////////////////////////////////////////////////////////////////////////// 61 static void * htab_ inode_scan( htab_t * htab,67 static void * htab_bloup_scan( htab_t * htab, 62 68 uint32_t index, 63 69 void * key ) 64 70 { 65 71 list_entry_t * list_entry; // pointer on list_entry_t (iterator) 66 vfs_inode_t * inode; // pointer on item72 bloup_t * bloup; // pointer on item 67 73 68 74 LIST_FOREACH( &htab->roots[index] , list_entry ) 69 75 { 70 inode = (vfs_inode_t *)LIST_ELEMENT( list_entry , vfs_inode_t , list );71 if( inode->inum == *(uint32_t *)key ) return inode;76 bloup = (bloup_t *)LIST_ELEMENT( list_entry , bloup_t , list ); 77 if( bloup->key == *(uint32_t *)key ) return bloup; 72 78 } 73 79 … … 91 97 htab->items = 0; 92 98 93 if( type == HTAB_ INODE_TYPE )94 { 95 htab->scan = &htab_ inode_scan;96 htab->index = &htab_ inode_index;99 if( type == HTAB_BLOUP_TYPE ) 100 { 101 htab->scan = &htab_bloup_scan; 102 htab->index = &htab_bloup_index; 97 103 } 98 104 else -
trunk/kernel/libk/htab.h
r563 r610 31 31 32 32 ///////////////////////////////////////////////////////////////////////////////////////// 33 // This file define a generic, embedded, hash table.33 // This file define a generic, embedded, local hash table. 34 34 // 35 35 // It can only be accessed by threads running in the local cluster. … … 70 70 typedef enum 71 71 { 72 HTAB_ INODE_TYPE = 1, /*! item is a vfs_inode_t*/72 HTAB_BLOUP_TYPE = 1, /*! item is a bloup_t */ 73 73 } 74 74 htab_item_type_t; -
trunk/kernel/libk/list.h
r457 r610 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 23 23 */ 24 24 25 #ifndef _ ALMOS_LIST_H_26 #define _ ALMOS_LIST_H_25 #ifndef _LIST_H_ 26 #define _LIST_H_ 27 27 28 28 #include <kernel_config.h> 29 29 #include <hal_kernel_types.h> 30 #include <printk.h> 30 31 31 32 #ifndef NULL … … 240 241 } 241 242 242 243 #endif /* _ALMOS_LIST_H_ */ 243 /*************************************************************************** 244 * This debug function displays all entries of a list. 245 * @ root : local pointer on the root list_entry_t. 246 * @ string : list identifier displayed in header. 247 * @ max : max number of éléments to display. 248 **************************************************************************/ 249 static inline void list_display( list_entry_t * root, 250 char * string, 251 uint32_t max ) 252 { 253 list_entry_t * iter; 254 list_entry_t * next; 255 list_entry_t * pred; 256 uint32_t index; 257 258 next = root->next; 259 pred = root->pred; 260 261 printk("\n***** root (%x) / next (%x) / pred (%x) / %s *****\n", 262 root, next, pred, string ); 263 264 if( list_is_empty( root ) == false ) 265 { 266 for( iter = next , index = 0 ; 267 (iter != root) && (index < max) ; 268 iter = next , index++ ) 269 { 270 next = iter->next; 271 pred = iter->pred; 272 273 printk(" - %d : iter (%x) / next (%x) / pred (%x)\n", 274 index, iter, next, pred ); 275 } 276 } 277 } // end list_display() 278 279 280 #endif /* _LIST_H_ */ -
trunk/kernel/libk/queuelock.c
r603 r610 45 45 busylock_init( &lock->lock , type ); 46 46 47 #if DEBUG_QUEUELOCK 47 #if DEBUG_QUEUELOCK_TYPE 48 48 thread_t * this = CURRENT_THREAD; 49 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles())49 if( DEBUG_QUEUELOCK_TYPE == type ) 50 50 printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n", 51 51 __FUNCTION__, this->process->pid, this->trdid, … … 70 70 { 71 71 72 #if DEBUG_QUEUELOCK 73 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles() ) 72 #if DEBUG_QUEUELOCK_TYPE 73 uint32_t lock_type = lock->lock.type; 74 if( DEBUG_QUEUELOCK_TYPE == lock_type ) 74 75 printk("\n[%s ] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n", 75 76 __FUNCTION__, this->process->pid, this->trdid, 76 lock_type_str[lock ->lock.type], local_cxy, lock );77 lock_type_str[lock_type], local_cxy, lock ); 77 78 #endif 78 79 // get pointer on calling thread … … 95 96 } 96 97 97 #if DEBUG_QUEUELOCK 98 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles())98 #if DEBUG_QUEUELOCK_TYPE 99 if( DEBUG_QUEUELOCK_TYPE == lock_type ) 99 100 printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n", 100 101 __FUNCTION__, this->process->pid, this->trdid, 101 lock_type_str[lock ->lock.type], local_cxy, lock );102 lock_type_str[lock_type], local_cxy, lock ); 102 103 #endif 103 104 … … 119 120 busylock_acquire( &lock->lock ); 120 121 121 #if DEBUG_QUEUELOCK 122 thread_t * this = CURRENT_THREAD; 123 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles() ) 122 #if DEBUG_QUEUELOCK_TYPE 123 uint32_t lock_type = lock->lock.type; 124 thread_t * this = CURRENT_THREAD; 125 if( DEBUG_QUEUELOCK_TYPE == lock_type ) 124 126 printk("\n[%s] thread[%x,%x] RELEASE q_lock %s [%x,%x]\n", 125 127 __FUNCTION__, this->process->pid, this->trdid, 126 lock_type_str[lock ->lock.type], local_cxy, lock );128 lock_type_str[lock_type], local_cxy, lock ); 127 129 #endif 128 130 … … 136 138 thread_t * thread = LIST_FIRST( &lock->root , thread_t , wait_list ); 137 139 138 #if DEBUG_QUEUELOCK 139 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles())140 #if DEBUG_QUEUELOCK_TYPE 141 if( DEBUG_QUEUELOCK_TYPE == lock_type ) 140 142 printk("\n[%s] thread[%x,%x] UNBLOCK thread [%x,%x] / q_lock %s [%x,%x]\n", 141 143 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, 142 lock_type_str[lock ->lock.type], local_cxy, lock );144 lock_type_str[lock_type], local_cxy, lock ); 143 145 #endif 144 146 // remove this waiting thread from waiting list -
trunk/kernel/libk/remote_queuelock.c
r603 r610 54 54 remote_busylock_init( XPTR( lock_cxy , &lock_ptr->lock ) , type ); 55 55 56 #if DEBUG_QUEUELOCK 56 #if DEBUG_QUEUELOCK_TYPE 57 57 thread_t * this = CURRENT_THREAD; 58 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles())58 if( DEBUG_QUEUELOCK_TYPE == type ) 59 59 printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n", 60 60 __FUNCTION__, this->process->pid, this->trdid, … … 76 76 remote_queuelock_t * lock_ptr = GET_PTR( lock_xp ); 77 77 78 #if DEBUG_QUEUELOCK 78 #if DEBUG_QUEUELOCK_TYPE 79 79 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 80 80 #endif … … 90 90 { 91 91 92 #if DEBUG_QUEUELOCK 93 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles())92 #if DEBUG_QUEUELOCK_TYPE 93 if( DEBUG_QUEUELOCK_TYPE == lock_type ) 94 94 printk("\n[%s] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n", 95 95 __FUNCTION__, this->process->pid, this->trdid, … … 116 116 } 117 117 118 #if DEBUG_QUEUELOCK 119 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles())118 #if DEBUG_QUEUELOCK_TYPE 119 if( DEBUG_QUEUELOCK_TYPE == lock_type ) 120 120 printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n", 121 121 __FUNCTION__, this->process->pid, this->trdid, … … 128 128 // release busylock 129 129 remote_busylock_release( busylock_xp ); 130 131 hal_fence(); 130 132 131 133 } // end remote_queuelock_acquire() … … 147 149 remote_busylock_acquire( busylock_xp ); 148 150 149 #if DEBUG_QUEUELOCK 151 #if DEBUG_QUEUELOCK_TYPE 150 152 thread_t * this = CURRENT_THREAD; 151 153 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 152 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles())154 if( DEBUG_QUEUELOCK_TYPE == lock_type ) 153 155 printk("\n[%s] thread[%x,%x] RELEASE q_lock %s (%x,%x)\n", 154 156 __FUNCTION__, this->process->pid, this->trdid, … … 168 170 thread_t * thread_ptr = GET_PTR( thread_xp ); 169 171 170 #if DEBUG_QUEUELOCK 171 if( DEBUG_QUEUELOCK < (uint32_t)hal_get_cycles())172 #if DEBUG_QUEUELOCK_TYPE 173 if( DEBUG_QUEUELOCK_TYPE == lock_type ) 172 174 { 173 175 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); -
trunk/kernel/libk/remote_rwlock.c
r603 r610 53 53 remote_busylock_init( XPTR( lock_cxy , &lock_ptr->lock ) , type ); 54 54 55 #if DEBUG_RWLOCK 55 #if DEBUG_RWLOCK_TYPE 56 56 thread_t * this = CURRENT_THREAD; 57 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())57 if( type == DEBUG_RWLOCK_TYPE ) 58 58 printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n", 59 59 __FUNCTION__, this->process->pid, this->trdid, … … 75 75 cxy_t lock_cxy = GET_CXY( lock_xp ); 76 76 77 #if DEBUG_RWLOCK 77 #if DEBUG_RWLOCK_TYPE 78 78 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 79 79 #endif … … 92 92 { 93 93 94 #if DEBUG_RWLOCK 95 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())94 #if DEBUG_RWLOCK_TYPE 95 if( lock_type == DEBUG_RWLOCK_TYPE ) 96 96 printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 97 97 __FUNCTION__, this->process->pid, this->trdid, … … 123 123 hal_fence(); 124 124 125 #if DEBUG_RWLOCK 126 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())125 #if DEBUG_RWLOCK_TYPE 126 if( lock_type == DEBUG_RWLOCK_TYPE ) 127 127 printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken = %d / count = %d\n", 128 128 __FUNCTION__, this->process->pid, this->trdid, … … 148 148 cxy_t lock_cxy = GET_CXY( lock_xp ); 149 149 150 #if DEBUG_RWLOCK 150 #if DEBUG_RWLOCK_TYPE 151 151 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 152 152 #endif … … 165 165 { 166 166 167 #if DEBUG_RWLOCK 168 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())167 #if DEBUG_RWLOCK_TYPE 168 if( lock_type == DEBUG_RWLOCK_TYPE ) 169 169 printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 170 170 __FUNCTION__, this->process->pid, this->trdid, … … 195 195 hal_remote_s32( taken_xp , 1 ); 196 196 197 #if DEBUG_RWLOCK 198 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())197 #if DEBUG_RWLOCK_TYPE 198 if( lock_type == DEBUG_RWLOCK_TYPE ) 199 199 printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 200 200 __FUNCTION__, this->process->pid, this->trdid, … … 231 231 hal_remote_atomic_add( count_xp , -1 ); 232 232 233 #if DEBUG_RWLOCK 233 #if DEBUG_RWLOCK_TYPE 234 234 thread_t * this = CURRENT_THREAD; 235 235 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 236 236 xptr_t taken_xp = XPTR( lock_cxy , &lock_ptr->taken ); 237 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())237 if( lock_type == DEBUG_RWLOCK_TYPE ) 238 238 printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 239 239 __FUNCTION__, this->process->pid, this->trdid, … … 257 257 thread_unblock( thread_xp , THREAD_BLOCKED_LOCK ); 258 258 259 #if DEBUG_RWLOCK 260 if( (uint32_t)hal_get_cycles() > DEBUG_RWLOCK)259 #if DEBUG_RWLOCK_TYPE 260 if( lock_type == DEBUG_RWLOCK_TYPE ) 261 261 { 262 262 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 288 288 thread_unblock( thread_xp , THREAD_BLOCKED_LOCK ); 289 289 290 #if DEBUG_RWLOCK 291 if( (uint32_t)hal_get_cycles() > DEBUG_RWLOCK)290 #if DEBUG_RWLOCK_TYPE 291 if( lock_type == DEBUG_RWLOCK_TYPE ) 292 292 { 293 293 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 330 330 hal_remote_s32( taken_xp , 0 ); 331 331 332 #if DEBUG_RWLOCK 332 #if DEBUG_RWLOCK_TYPE 333 333 thread_t * this = CURRENT_THREAD; 334 334 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 335 335 xptr_t count_xp = XPTR( lock_cxy , &lock_ptr->count ); 336 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())336 if( lock_type == DEBUG_RWLOCK_TYPE ) 337 337 printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 338 338 __FUNCTION__, this->process->pid, this->trdid, … … 355 355 thread_unblock( thread_xp , THREAD_BLOCKED_LOCK ); 356 356 357 #if DEBUG_RWLOCK 358 if( (uint32_t)hal_get_cycles() > DEBUG_RWLOCK)357 #if DEBUG_RWLOCK_TYPE 358 if( lock_type == DEBUG_RWLOCK_TYPE ) 359 359 { 360 360 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 385 385 thread_unblock( thread_xp , THREAD_BLOCKED_LOCK ); 386 386 387 #if DEBUG_RWLOCK 388 if( (uint32_t)hal_get_cycles() > DEBUG_RWLOCK)387 #if DEBUG_RWLOCK_TYPE 388 if( lock_type == DEBUG_RWLOCK_TYPE ) 389 389 { 390 390 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); -
trunk/kernel/libk/rwlock.c
r603 r610 50 50 busylock_init( &lock->lock , type ); 51 51 52 #if DEBUG_RWLOCK 52 #if DEBUG_RWLOCK_TYPE 53 53 thread_t * this = CURRENT_THREAD; 54 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())54 if( DEBUG_RWLOCK_TYPE == type ) 55 55 printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n", 56 56 __FUNCTION__, this->process->pid, this->trdid, … … 75 75 { 76 76 77 #if DEBUG_RWLOCK 78 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() ) 77 #if DEBUG_RWLOCK_TYPE 78 uint32_t lock_type = lock->lock.type; 79 if( DEBUG_RWLOCK_TYPE == lock_type ) 79 80 printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 80 81 __FUNCTION__, this->process->pid, this->trdid, 81 lock_type_str[lock ->lock.type], local_cxy, lock, lock->taken, lock->count );82 lock_type_str[lock_type], local_cxy, lock, lock->taken, lock->count ); 82 83 #endif 83 84 // register reader thread in waiting queue … … 100 101 lock->count++; 101 102 102 #if DEBUG_RWLOCK 103 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())103 #if DEBUG_RWLOCK_TYPE 104 if( DEBUG_RWLOCK_TYPE == lock_type ) 104 105 printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 105 106 __FUNCTION__, this->process->pid, this->trdid, 106 lock_type_str[lock ->lock.type], local_cxy, lock, lock->taken, lock->count );107 lock_type_str[lock_type], local_cxy, lock, lock->taken, lock->count ); 107 108 #endif 108 109 … … 127 128 { 128 129 129 #if DEBUG_RWLOCK 130 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() ) 130 #if DEBUG_RWLOCK_TYPE 131 uint32_t lock_type = lock->lock.type; 132 if( DEBUG_RWLOCK_TYPE == lock_type ) 131 133 printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 132 134 __FUNCTION__, this->process->pid, this->trdid, 133 lock_type_str[lock ->lock.type], local_cxy, lock, lock->taken, lock->count );135 lock_type_str[lock_type], local_cxy, lock, lock->taken, lock->count ); 134 136 #endif 135 137 // register writer in waiting queue … … 152 154 lock->taken = 1; 153 155 154 #if DEBUG_RWLOCK 155 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())156 #if DEBUG_RWLOCK_TYPE 157 if( DEBUG_RWLOCK_TYPE == lock_type ) 156 158 printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 157 159 __FUNCTION__, this->process->pid, this->trdid, 158 lock_type_str[lock ->lock.type], local_cxy, lock, lock->taken, lock->count );160 lock_type_str[lock_type], local_cxy, lock, lock->taken, lock->count ); 159 161 #endif 160 162 … … 176 178 lock->count--; 177 179 178 #if DEBUG_RWLOCK 180 #if DEBUG_RWLOCK_TYPE 179 181 thread_t * this = CURRENT_THREAD; 180 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() ) 182 uint32_t lock_type = lock->lock.type; 183 if( DEBUG_RWLOCK_TYPE == lock_type ) 181 184 printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 182 185 __FUNCTION__, this->process->pid, this->trdid, 183 lock_type_str[lock ->lock.type], local_cxy, lock, lock->taken, lock->count );186 lock_type_str[lock_type], local_cxy, lock, lock->taken, lock->count ); 184 187 #endif 185 188 … … 191 194 thread_t * thread = LIST_FIRST( &lock->wr_root , thread_t , wait_list ); 192 195 193 #if DEBUG_RWLOCK 194 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())196 #if DEBUG_RWLOCK_TYPE 197 if( DEBUG_RWLOCK_TYPE == lock_type ) 195 198 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 196 199 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, 197 lock_type_str[lock ->lock.type], local_cxy, lock );200 lock_type_str[lock_type], local_cxy, lock ); 198 201 #endif 199 202 … … 213 216 thread_t * thread = LIST_FIRST( &lock->wr_root , thread_t , wait_list ); 214 217 215 #if DEBUG_RWLOCK 216 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())218 #if DEBUG_RWLOCK_TYPE 219 if( DEBUG_RWLOCK_TYPE == lock_type ) 217 220 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 218 221 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, 219 lock_type_str[lock ->lock.type], local_cxy, lock );222 lock_type_str[lock_type], local_cxy, lock ); 220 223 #endif 221 224 … … 245 248 lock->taken = 0; 246 249 247 #if DEBUG_RWLOCK 250 #if DEBUG_RWLOCK_TYPE 248 251 thread_t * this = CURRENT_THREAD; 249 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles() ) 252 uint32_t lock_type = lock->lock.type; 253 if( DEBUG_RWLOCK_TYPE == lock_type ) 250 254 printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 251 255 __FUNCTION__, this->process->pid, this->trdid, 252 lock_type_str[lock ->lock.type], local_cxy, lock, lock->taken, lock->count );256 lock_type_str[lock_type], local_cxy, lock, lock->taken, lock->count ); 253 257 #endif 254 258 … … 259 263 thread_t * thread = LIST_FIRST( &lock->wr_root , thread_t , wait_list ); 260 264 261 #if DEBUG_RWLOCK 262 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())265 #if DEBUG_RWLOCK_TYPE 266 if( DEBUG_RWLOCK_TYPE == lock_type ) 263 267 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 264 268 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, 265 lock_type_str[lock ->lock.type], local_cxy, lock );269 lock_type_str[lock_type], local_cxy, lock ); 266 270 #endif 267 271 // remove this waiting thread from waiting list … … 280 284 thread_t * thread = LIST_FIRST( &lock->rd_root , thread_t , wait_list ); 281 285 282 #if DEBUG_RWLOCK 283 if( DEBUG_RWLOCK < (uint32_t)hal_get_cycles())286 #if DEBUG_RWLOCK_TYPE 287 if( DEBUG_RWLOCK_TYPE == lock_type ) 284 288 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 285 289 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, 286 lock_type_str[lock ->lock.type], local_cxy, lock );290 lock_type_str[lock_type], local_cxy, lock ); 287 291 #endif 288 292 // remove this waiting thread from waiting list -
trunk/kernel/libk/xhtab.c
r603 r610 40 40 41 41 /////////////////////////////////////////////////////////////////////////////////////////// 42 // vfs_dentry_t42 // XHTAB_DENTRY_TYPE 43 43 // This functions compute the hash index from the key, that is the directory entry name. 44 44 /////////////////////////////////////////////////////////////////////////////////////////// … … 58 58 59 59 /////////////////////////////////////////////////////////////////////////////////////////// 60 // vfs_dentry_t60 // XHTAB_DENTRY_TYPE 61 61 // This functions returns the extended pointer on the item, from the extended pointer 62 62 // on xlist contained in the item. … … 67 67 static xptr_t xhtab_dentry_item_from_xlist( xptr_t xlist_xp ) 68 68 { 69 return XLIST_ELEMENT( xlist_xp , vfs_dentry_t , list);69 return XLIST_ELEMENT( xlist_xp , vfs_dentry_t , children ); 70 70 } 71 71 72 72 //////////////////////////////////////////////////////////////////////////////////////////// 73 // vfs_dentry_t73 // XHTAB_DENTRY_TYPE 74 74 // This function compares the identifier of an item to a given <key>. 75 75 // it returns true when the directory name matches the name pointed by the <key> argument. … … 96 96 97 97 //////////////////////////////////////////////////////////////////////////////////////////// 98 // vfs_dentry_t98 // XHTAB_DENTRY_TYPE 99 99 // This function print the item key, that is the name for a vfs_dentry_t. 100 100 //////////////////////////////////////////////////////////////////////////////////////////// … … 150 150 xlist_root_init( XPTR( local_cxy , &xhtab->roots[i] ) ); 151 151 } 152 153 #if DEBUG_XHTAB 154 printk("\n@@@ %s for xhtab (%x,%x)\n" 155 " - index_from_key = %x (@ %x)\n" 156 " - item_match_key = %x (@ %x)\n" 157 " - item_from_xlist = %x (@ %x)\n", 158 __FUNCTION__, local_cxy, xhtab, 159 xhtab->index_from_key , &xhtab->index_from_key, 160 xhtab->item_match_key , &xhtab->item_match_key, 161 xhtab->item_from_xlist, &xhtab->item_from_xlist ); 162 #endif 152 163 153 164 } // end xhtab_init() -
trunk/kernel/libk/xhtab.h
r603 r610 73 73 /****************************************************************************************** 74 74 * This define the supported item types. 75 * - The XHTAB_DENTRY_TYPE is used to implement the set of directory entries for a 76 * directory inode : the "children" inode field is an embedded xhtab. 75 77 *****************************************************************************************/ 76 78 -
trunk/kernel/libk/xlist.h
r603 r610 5 5 * xlist.h - Double Circular Linked lists, using extended pointers. 6 6 * 7 * Author : Alain Greiner (2016 )7 * Author : Alain Greiner (2016,2017,2018) 8 8 * 9 9 * Copyright (c) UPMC Sorbonne Universites … … 31 31 #include <hal_kernel_types.h> 32 32 #include <hal_remote.h> 33 #include <printk.h> 33 34 34 35 /**** global variables ***/ … … 190 191 * double linked list. Four extended pointers must be modified. 191 192 * The lock protecting the list should have been previously taken. 192 * @ root : extended pointer on the root xlist_entry_t193 * @ entry : extended pointer on the xlist_entry_t to be inserted194 **************************************************************************/ 195 static inline void xlist_add_first( xptr_t root ,196 xptr_t entry )193 * @ root_xp : extended pointer on the root xlist_entry_t 194 * @ entry_xp : extended pointer on the xlist_entry_t to be inserted 195 **************************************************************************/ 196 static inline void xlist_add_first( xptr_t root_xp, 197 xptr_t entry_xp ) 197 198 { 198 199 // get the extended pointer on the first element in list 199 xptr_t first = (xptr_t)hal_remote_l64( root);200 201 // update root .next <= entry202 hal_remote_s64( root , (uint64_t)entry);203 204 // update entry .next <= first205 hal_remote_s64( entry , (uint64_t)first);206 207 // entry.pred <= root208 hal_remote_s64( entry + sizeof(xptr_t) , (uint64_t)root);200 xptr_t first_xp = hal_remote_l64( root_xp ); 201 202 // update root_xp->next <= entry_xp 203 hal_remote_s64( root_xp , entry_xp ); 204 205 // update entry_xp->next <= first_xp 206 hal_remote_s64( entry_xp , first_xp ); 207 208 // update entry_xp->pred <= root_xp 209 hal_remote_s64( entry_xp + sizeof(xptr_t) , root_xp ); 209 210 210 // first.pred <= new211 hal_remote_s64( first + sizeof(xptr_t) , (uint64_t)entry);211 // update first_xp->pred <= entry_xp 212 hal_remote_s64( first_xp + sizeof(xptr_t) , entry_xp ); 212 213 } 213 214 … … 216 217 * double linked list. Four extended pointers must be modified. 217 218 * The lock protecting the list should have been previously taken. 218 * @ root : extended pointer on the root xlist_entry_t219 * @ entry : extended pointer on the xlist_entry_t to be inserted220 **************************************************************************/ 221 static inline void xlist_add_last( xptr_t root ,222 xptr_t entry )219 * @ root_xp : extended pointer on the root xlist_entry_t 220 * @ entry_xp : extended pointer on the xlist_entry_t to be inserted 221 **************************************************************************/ 222 static inline void xlist_add_last( xptr_t root_xp, 223 xptr_t entry_xp ) 223 224 { 224 225 // get the extended pointer on the last element in list 225 xptr_t last = (xptr_t)hal_remote_l64( root+ sizeof(xptr_t) );226 227 // update root .pred <= entry228 hal_remote_s64( root + sizeof(xptr_t) , (uint64_t)entry);229 230 // update entry .pred <= last231 hal_remote_s64( entry + sizeof(xptr_t) , (uint64_t)last);232 233 // entry.next <= root234 hal_remote_s64( entry , (uint64_t)root);226 xptr_t last_xp = hal_remote_l64( root_xp + sizeof(xptr_t) ); 227 228 // update root_xp->pred <= entry_xp 229 hal_remote_s64( root_xp + sizeof(xptr_t) , entry_xp ); 230 231 // update entry_xp->pred <= last_xp 232 hal_remote_s64( entry_xp + sizeof(xptr_t) , last_xp ); 233 234 // update entry_xp->next <= root_xp 235 hal_remote_s64( entry_xp , root_xp ); 235 236 236 // last.next <= entry237 hal_remote_s64( last , (uint64_t)entry);237 // update last_xp->next <= entry_xp 238 hal_remote_s64( last_xp , entry_xp ); 238 239 } 239 240 … … 241 242 /*************************************************************************** 242 243 * This function returns true if the list is empty. 243 * @ root : extended pointer on the root xlist_entry_t244 **************************************************************************/ 245 static inline bool_t xlist_is_empty( xptr_t root )244 * @ root_xp : extended pointer on the root xlist_entry_t. 245 **************************************************************************/ 246 static inline bool_t xlist_is_empty( xptr_t root_xp ) 246 247 { 247 248 // get the extended pointer root.next value 248 xptr_t next = (xptr_t)hal_remote_l64( root );249 250 return ( root == next );249 xptr_t next = (xptr_t)hal_remote_l64( root_xp ); 250 251 return ( root_xp == next ); 251 252 } 252 253 … … 279 280 * Four extended pointers must be modified. 280 281 * The memory allocated to the removed entry is not released. 281 * @ old : extended pointer on the xlist_entry_t to be removed.282 * @ new : extended pointer on the xlist_entry_t to be inserted.282 * @ old : extended pointer on the xlist_entry_t to be removed. 283 * @ new : extended pointer on the xlist_entry_t to be inserted. 283 284 **************************************************************************/ 284 285 static inline void xlist_replace( xptr_t old, … … 307 308 } 308 309 310 /*************************************************************************** 311 * This debug function displays all entries of an xlist. 312 * @ root_xp : extended pointer on the root xlist_entry_t. 313 * @ string : list identifier displayed in header. 314 * @ max : max number of éléments to display. 315 **************************************************************************/ 316 static inline void xlist_display( xptr_t root_xp, 317 char * string, 318 uint32_t max ) 319 { 320 cxy_t root_cxy; 321 xlist_entry_t * root_ptr; 322 323 xptr_t iter_xp; 324 cxy_t iter_cxy; 325 xlist_entry_t * iter_ptr; 326 327 xptr_t next_xp; 328 cxy_t next_cxy; 329 xlist_entry_t * next_ptr; 330 331 xptr_t pred_xp; 332 cxy_t pred_cxy; 333 xlist_entry_t * pred_ptr; 334 335 uint32_t index; 336 337 root_cxy = GET_CXY( root_xp ); 338 root_ptr = GET_PTR( root_xp ); 339 340 next_xp = hal_remote_l64( XPTR( root_cxy , &root_ptr->next ) ); 341 next_cxy = GET_CXY( next_xp ); 342 next_ptr = GET_PTR( next_xp ); 343 344 pred_xp = hal_remote_l64( XPTR( root_cxy , &root_ptr->pred ) ); 345 pred_cxy = GET_CXY( pred_xp ); 346 pred_ptr = GET_PTR( pred_xp ); 347 348 printk("\n***** root (%x,%x) / next (%x,%x) / pred (%x,%x) / %s *****\n", 349 root_cxy, root_ptr, next_cxy, next_ptr, pred_cxy, pred_ptr, string ); 350 351 if( xlist_is_empty( root_xp ) == false ) 352 { 353 for( iter_xp = hal_remote_l64( XPTR( root_cxy , &root_ptr->next) ) , index = 0 ; 354 (iter_xp != root_xp) && (index < max) ; 355 iter_xp = next_xp , index++ ) 356 { 357 iter_cxy = GET_CXY( iter_xp ); 358 iter_ptr = GET_PTR( iter_xp ); 359 360 next_xp = hal_remote_l64( XPTR( iter_cxy , &iter_ptr->next ) ); 361 next_cxy = GET_CXY( next_xp ); 362 next_ptr = GET_PTR( next_xp ); 363 364 pred_xp = hal_remote_l64( XPTR( iter_cxy , &iter_ptr->pred ) ); 365 pred_cxy = GET_CXY( pred_xp ); 366 pred_ptr = GET_PTR( pred_xp ); 367 368 printk(" - %d : iter (%x,%x) / next (%x,%x) / pred (%x,%x)\n", 369 index, iter_cxy, iter_ptr, next_cxy, next_ptr, pred_cxy, pred_ptr ); 370 } 371 } 372 } // end xlist_display() 373 309 374 #endif /* _XLIST_H_ */ -
trunk/kernel/mm/mapper.c
r606 r610 188 188 { 189 189 190 if( mapper_cxy == local_cxy ) // mapper is local 191 { 192 190 193 #if (DEBUG_MAPPER_GET_PAGE & 1) 191 194 if( DEBUG_MAPPER_GET_PAGE < cycle ) 192 printk("\n[%s] missing page => load it from IOC device\n", __FUNCTION__ ); 193 #endif 194 if( mapper_cxy == local_cxy ) // mapper is local 195 { 195 printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ ); 196 #endif 196 197 error = mapper_handle_miss( mapper_ptr, 197 198 page_id, … … 200 201 else 201 202 { 203 204 #if (DEBUG_MAPPER_GET_PAGE & 1) 205 if( DEBUG_MAPPER_GET_PAGE < cycle ) 206 printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ ); 207 #endif 202 208 rpc_mapper_handle_miss_client( mapper_cxy, 203 209 mapper_ptr, … … 253 259 vfs_inode_t * inode = mapper->inode; 254 260 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 255 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 256 printk("\n[%s] enter for page %d in <%s> / cycle %d\n", 261 // if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 262 // if( (page_id == 1) && (cycle > 10000000) ) 263 printk("\n[%s] enter for page %d in <%s> / cycle %d", 257 264 __FUNCTION__, page_id, name, cycle ); 258 265 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 259 grdxt_display( &mapper->rt, name );260 #endif 261 262 // allocate one page from the mappercluster266 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name ); 267 #endif 268 269 // allocate one page from the local cluster 263 270 req.type = KMEM_PAGE; 264 271 req.size = 0; … … 313 320 #if DEBUG_MAPPER_HANDLE_MISS 314 321 cycle = (uint32_t)hal_get_cycles(); 315 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 316 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d\n", 322 // if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 323 // if( (page_id == 1) && (cycle > 10000000) ) 324 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d", 317 325 __FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle ); 318 326 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 319 grdxt_display( &mapper->rt, name );327 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name ); 320 328 #endif 321 329 … … 348 356 } // end mapper_release_page() 349 357 350 //////////////////////////////////////////// 351 error_t mapper_move_user( mapper_t * mapper,358 /////////////////////////////////////////////// 359 error_t mapper_move_user( xptr_t mapper_xp, 352 360 bool_t to_buffer, 353 361 uint32_t file_offset, … … 355 363 uint32_t size ) 356 364 { 357 xptr_t mapper_xp; // extended pointer on local mapper358 365 uint32_t page_offset; // first byte to move to/from a mapper page 359 366 uint32_t page_count; // number of bytes to move to/from a mapper page … … 371 378 #endif 372 379 373 // build extended pointer on mapper374 mapper_xp = XPTR( local_cxy , mapper );375 376 380 // compute offsets of first and last bytes in file 377 381 uint32_t min_byte = file_offset; … … 384 388 #if (DEBUG_MAPPER_MOVE_USER & 1) 385 389 if( DEBUG_MAPPER_MOVE_USER < cycle ) 386 printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last ); 390 printk("\n[%s] thread[%x,%x] : first_page %d / last_page %d\n", 391 __FUNCTION__, this->process->pid, this->trdid, first, last ); 387 392 #endif 388 393 … … 404 409 #if (DEBUG_MAPPER_MOVE_USER & 1) 405 410 if( DEBUG_MAPPER_MOVE_USER < cycle ) 406 printk("\n[%s] page_id = %d / page_offset = %d / page_count = %d\n",407 __FUNCTION__ 411 printk("\n[%s] thread[%x,%x] : page_id = %d / page_offset = %d / page_count = %d\n", 412 __FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_count ); 408 413 #endif 409 414 … … 412 417 413 418 if ( page_xp == XPTR_NULL ) return -1; 419 420 #if (DEBUG_MAPPER_MOVE_USER & 1) 421 if( DEBUG_MAPPER_MOVE_USER < cycle ) 422 printk("\n[%s] thread[%x,%x] : get page (%x,%x) from mapper\n", 423 __FUNCTION__, this->process->pid, this->trdid, GET_CXY(page_xp), GET_PTR(page_xp) ); 424 #endif 414 425 415 426 // compute pointer in mapper … … 547 558 } 548 559 560 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 561 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 562 printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n", 563 __FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr ); 564 #endif 565 549 566 // move fragment 550 567 hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); -
trunk/kernel/mm/mapper.h
r606 r610 45 45 * - The leaves are pointers on physical page descriptors, dynamically allocated 46 46 * in the local cluster. 47 * - In a given cluster, a mapper is a "private" structure: a thread accessing the mapper48 * must be running in the cluster containing it (can be a local thread or a RPC thread).49 * - The mapper is protected by a blocking "rwlock", to support several simultaneous50 * readers, and only one writer. This lock implement a busy waiting policy.51 * - The mapper_get_page() function that return a page descriptor pointer from a page52 * index in file is in charge of handling the miss on the mapper cache.47 * - The mapper is protected by a "remote_rwlock", to support several simultaneous 48 * "readers", and only one "writer". 49 * - A "reader" thread, calling the mapper_remote_get_page() function to get a page 50 * descriptor pointer from the page index in file, can be remote (running in any cluster). 51 * - A "writer" thread, calling the mapper_handle_miss() function to handle a page miss 52 * must be local (running in the mapper cluster). 53 53 * - The vfs_mapper_move_page() function access the file system to handle a mapper miss, 54 54 * or update a dirty page on device. 55 * - The vfs_mapper_load_all() functions is used to load all pages of a given file56 * or directory into the mapper.55 * - The vfs_mapper_load_all() functions is used to load all pages of a directory 56 * into the mapper (prefetch). 57 57 * - the mapper_move_user() function is used to move data to or from an user buffer. 58 58 * This user space buffer can be physically distributed in several clusters. … … 137 137 138 138 /******************************************************************************************* 139 * This function move data between a local mapper, and a distributed user buffer.140 * It must be called by a thread running in cluster containing the mapper.139 * This function move data between a remote mapper, dentified by the <mapper_xp> argument, 140 * and a distributed user buffer. It can be called by a thread running in any cluster. 141 141 * It is called by the vfs_user_move() to implement sys_read() and sys_write() syscalls. 142 142 * If required, the data transfer is split in "fragments", where one fragment contains … … 144 144 * It uses "hal_uspace" accesses to move a fragment to/from the user buffer. 145 145 * In case of write, the dirty bit is set for all pages written in the mapper. 146 * The mapper being an extendable cache, it is automatically extended when required 147 * for both read and write accesses. 146 * The mapper being an extendable cache, it is automatically extended when required. 148 147 * The "offset" field in the file descriptor, and the "size" field in inode descriptor 149 148 * are not modified by this function. 150 149 ******************************************************************************************* 151 * @ mapper : localpointer on mapper.150 * @ mapper_xp : extended pointer on mapper. 152 151 * @ to_buffer : mapper -> buffer if true / buffer -> mapper if false. 153 152 * @ file_offset : first byte to move in file. … … 156 155 * returns O if success / returns -1 if error. 157 156 ******************************************************************************************/ 158 error_t mapper_move_user( mapper_t * mapper,157 error_t mapper_move_user( xptr_t mappe_xp, 159 158 bool_t to_buffer, 160 159 uint32_t file_offset, -
trunk/kernel/mm/ppm.c
r606 r610 413 413 xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock ); 414 414 415 // printk("\n@@@ %s : before dirty_list lock aquire\n", __FUNCTION__ ); 416 415 417 // lock the remote PPM dirty_list 416 418 remote_queuelock_acquire( dirty_lock_xp ); 417 419 420 // printk("\n@@@ %s : after dirty_list lock aquire\n", __FUNCTION__ ); 421 418 422 // lock the remote page 419 423 remote_busylock_acquire( page_lock_xp ); 424 425 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ ); 420 426 421 427 // get remote page flags … … 460 466 } 461 467 468 // printk("\n@@@ %s : before page lock release\n", __FUNCTION__ ); 469 462 470 // unlock the remote page 463 471 remote_busylock_release( page_lock_xp ); 464 472 473 // printk("\n@@@ %s : after page lock release\n", __FUNCTION__ ); 474 465 475 // unlock the remote PPM dirty_list 466 476 remote_queuelock_release( dirty_lock_xp ); 477 478 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ ); 467 479 468 480 return done; -
trunk/kernel/mm/ppm.h
r606 r610 62 62 * also rooted in the PPM, in order to be able to save all dirty pages on disk. 63 63 * This dirty list is protected by a specific remote_queuelock, because it can be 64 * modified by a remote thread, but it is implemented as a local list, because it 65 * contains only local pages. 64 * modified by a remote thread, but it contains only local pages. 66 65 ****************************************************************************************/ 67 66 … … 193 192 * It can be called by a thread running in any cluster. 194 193 * - it takes the queuelock protecting the PPM dirty_list. 194 * - it takes the busylock protecting the page flags. 195 195 * - it test the PG_DIRTY flag in the page descriptor. 196 196 * . if page already dirty => do nothing 197 197 * . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list. 198 * - it releases the busylock protcting the page flags. 198 199 * - it releases the queuelock protecting the PPM dirty_list. 199 200 ***************************************************************************************** … … 207 208 * It can be called by a thread running in any cluster. 208 209 * - it takes the queuelock protecting the PPM dirty_list. 210 * - it takes the busylock protecting the page flags. 209 211 * - it test the PG_DIRTY flag in the page descriptor. 210 212 * . if page not dirty => do nothing 211 213 * . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list. 214 * - it releases the busylock protcting the page flags. 212 215 * - it releases the queuelock protecting the PPM dirty_list. 213 216 ***************************************************************************************** -
trunk/kernel/mm/vmm.c
r606 r610 1444 1444 #endif 1445 1445 1446 // compute target cluster1447 1446 page_t * page_ptr; 1448 1447 cxy_t page_cxy; … … 1611 1610 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1612 1611 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1613 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" ,1612 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" 1614 1613 " %d bytes from mapper / %d bytes from BSS\n", 1615 1614 __FUNCTION__, this->process->pid, this->trdid, vpn, … … 1674 1673 (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, 1675 1674 &vseg ); 1676 1677 1675 if( error ) 1678 1676 { … … 1933 1931 #endif 1934 1932 1933 // access local GPT to get GPT_COW flag 1934 bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn ); 1935 1936 if( cow == false ) return EXCP_USER_ERROR; 1937 1935 1938 // get local vseg 1936 1939 error = vmm_get_vseg( process, 1937 1940 (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, 1938 1941 &vseg ); 1939 1940 1942 if( error ) 1941 1943 { … … 1950 1952 ref_ptr = GET_PTR( process->ref_xp ); 1951 1953 1952 // build relevant extended pointers on GPT and GPT lock1954 // build relevant extended pointers on relevant GPT and GPT lock 1953 1955 // - access local GPT for a private vseg 1954 1956 // - access reference GPT for a public vseg -
trunk/kernel/mm/vmm.h
r595 r610 158 158 bool_t mapping ); 159 159 160 /******************************************************************************************* 160 /********************************************************************************************* 161 161 * This function adds a vseg descriptor in the VSL of a given VMM, 162 162 * and updates the vmm field in the vseg descriptor. 163 163 * It takes the lock protecting VSL. 164 ******************************************************************************************* 164 ********************************************************************************************* 165 165 * @ vmm : pointer on the VMM 166 166 * @ vseg : pointer on the vseg descriptor 167 ****************************************************************************************** /167 ********************************************************************************************/ 168 168 void vmm_vseg_attach( struct vmm_s * vmm, 169 169 vseg_t * vseg ); 170 170 171 /******************************************************************************************* 171 /********************************************************************************************* 172 172 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM, 173 173 * and updates the vmm field in the vseg descriptor. No memory is released. 174 174 * It takes the lock protecting VSL. 175 ******************************************************************************************* 175 ********************************************************************************************* 176 176 * @ vmm : pointer on the VMM 177 177 * @ vseg : pointer on the vseg descriptor 178 ****************************************************************************************** /178 ********************************************************************************************/ 179 179 void vmm_vseg_detach( struct vmm_s * vmm, 180 180 vseg_t * vseg ); … … 326 326 * (d) if the removed region cut the vseg in three parts, it is modified, and a new 327 327 * vseg is created with same type. 328 * FIXME [AG] this function mustbe called by a thread running in the reference cluster,329 * and the VMM mustbe updated in all process descriptors copies.328 * FIXME [AG] this function should be called by a thread running in the reference cluster, 329 * and the VMM should be updated in all process descriptors copies. 330 330 ********************************************************************************************* 331 331 * @ process : pointer on process descriptor … … 357 357 /********************************************************************************************* 358 358 * This function is called by the generic exception handler in case of page-fault event, 359 * detected for a given <vpn> in a given <process> in any cluster.359 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM. 360 360 * It checks the missing VPN and returns an user error if it is not in a registered vseg. 361 361 * For a legal VPN, there is actually 3 cases: … … 370 370 * on vseg type, and updates directly (without RPC) the local GPT and the reference GPT. 371 371 * Other GPT copies will updated on demand. 372 * In the three cases, concurrent accesses to the GPT are handled, thanks to the372 * Concurrent accesses to the GPT are handled, thanks to the 373 373 * remote_rwlock protecting each GPT copy. 374 374 ********************************************************************************************* 375 * @ process : pointer on local process descriptor copy.376 * @ vpn 375 * @ process : local pointer on local process. 376 * @ vpn : VPN of the missing PTE. 377 377 * @ returns EXCP_NON_FATAL / EXCP_USER_ERROR / EXCP_KERNEL_PANIC after analysis 378 378 ********************************************************************************************/ … … 381 381 382 382 /********************************************************************************************* 383 * This function is called by the generic exception handler in case of copy-on-writeevent,384 * detected for a given <vpn> in a given <process> in any cluster.383 * This function is called by the generic exception handler in case of WRITE violation event, 384 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM. 385 385 * It returns a kernel panic if VPN is not in a registered vseg or is not mapped. 386 386 * For a legal mapped vseg there is two cases: … … 399 399 * Finally it calls the vmm_global_update_pte() function to reset the COW flag and set 400 400 * the WRITE flag in all the GPT copies, using a RPC if the reference cluster is remote. 401 * In both cases, concurrent accesses to the GPT are handled, thanks to the402 * remote_rwlock protecting each GPT copy.401 * In both cases, concurrent accesses to the GPT are protected by the remote_rwlock 402 * atached to the GPT copy in VMM. 403 403 ********************************************************************************************* 404 404 * @ process : pointer on local process descriptor copy. -
trunk/kernel/syscalls/shared_include/syscalls_numbers.h
r584 r610 41 41 SYS_MUTEX = 9, 42 42 43 SYS_ EXIT= 10,43 SYS_RENAME = 10, 44 44 SYS_MUNMAP = 11, 45 45 SYS_OPEN = 12, … … 85 85 SYS_IS_FG = 49, 86 86 87 SYSCALLS_NR = 50, 87 SYS_EXIT = 50, 88 89 SYSCALLS_NR = 51, 90 88 91 } syscalls_t; 89 92 -
trunk/kernel/syscalls/sys_chdir.c
r566 r610 1 1 /* 2 * sys_chdir : change process current working directory2 * sys_chdir.c - kernel function implementing the "chdir" syscall. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 * Copyright (c) 2011,2012UPMC Sorbonne Universites6 * Copyright (c) UPMC Sorbonne Universites 7 7 * 8 8 * This file is part of ALMOS-MKH. … … 38 38 { 39 39 error_t error; 40 vseg_t * vseg; 41 xptr_t root_inode_xp; 42 40 43 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 41 44 42 45 thread_t * this = CURRENT_THREAD; 43 46 process_t * process = this->process; 47 48 #if (DEBUG_SYS_CHDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 49 uint64_t tm_start = hal_get_cycles(); 50 #endif 44 51 45 52 // check pathname length … … 48 55 49 56 #if DEBUG_SYSCALLS_ERROR 50 printk("\n[ERROR] in %s : pathname too long / thread %x in process %x\n",51 __FUNCTION__, this->trdid, process->pid );57 printk("\n[ERROR] in %s : pathname too long / thread[%x,%x]\n", 58 __FUNCTION__, process->pid, this->trdid ); 52 59 #endif 53 this->errno = E NFILE;60 this->errno = EINVAL; 54 61 return -1; 55 62 } 63 64 // check pathname in user space 65 if( vmm_get_vseg( process, (intptr_t)pathname , &vseg ) ) 66 { 67 68 #if DEBUG_SYSCALLS_ERROR 69 printk("\n[ERROR] in %s : user buffer unmapped %x for thread[%x,%x]\n", 70 __FUNCTION__ , (intptr_t)pathname , process->pid, this->trdid ); 71 #endif 72 this->errno = EINVAL; 73 return -1; 74 } 56 75 57 76 // copy pathname in kernel space 58 77 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 59 78 60 printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__ ); 61 return -1; 79 #if DEBUG_SYS_CHDIR 80 if( DEBUG_SYS_CHDIR < tm_start ) 81 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n", 82 __FUNCTION__, process->pid, this->trdid, kbuf, (uint32_t)tm_start ); 83 #endif 62 84 63 // get cluster and local pointer on reference process 64 // xptr_t ref_xp = process->ref_xp; 65 // process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 66 // cxy_t ref_cxy = GET_CXY( ref_xp ); 85 // compute root inode for path 86 if( kbuf[0] == '/' ) // absolute path 87 { 88 // use extended pointer on VFS root inode 89 root_inode_xp = process->vfs_root_xp; 90 } 91 else // relative path 92 { 93 // get cluster and local pointer on reference process 94 xptr_t ref_xp = process->ref_xp; 95 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 96 cxy_t ref_cxy = GET_CXY( ref_xp ); 67 97 68 // get extended pointer on cwd lock in reference process 69 // xptr_t lock_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 98 // use extended pointer on CWD inode 99 root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 100 } 70 101 71 // get cwd lock in read mode 72 // remote_rwlock_rd_acquire( lock_xp ); 73 74 // TODO ce n'et pas au VFS de le faire [AG] 75 // error = vfs_chdir( process->vfs_cwd_xp , kbuf ); 76 77 // release cwd lock 78 // remote_rwlock_rd_release( lock_xp ); 102 // call the relevant VFS function 103 error = vfs_chdir( root_inode_xp , kbuf ); 79 104 80 105 if( error ) 81 106 { 82 printk("\n[ERROR] in %s : cannot change current directory\n", __FUNCTION__ ); 107 108 #if DEBUG_SYSCALLS_ERROR 109 printk("\n[ERROR] in %s / thread[%x,%x] : cannot change CWD\n", 110 __FUNCTION__ , process->pid , this->trdid ); 111 #endif 83 112 this->errno = error; 84 113 return -1; 85 114 } 86 115 116 hal_fence(); 117 118 #if (DEBUG_SYS_CHDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 119 uint64_t tm_end = hal_get_cycles(); 120 #endif 121 122 #if DEBUG_SYS_CHDIR 123 if( DEBUG_SYS_CHDIR < tm_end ) 124 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 125 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_end ); 126 #endif 127 128 #if CONFIG_INSTRUMENTATION_SYSCALLS 129 hal_atomic_add( &syscalls_cumul_cost[SYS_CHDIR] , tm_end - tm_start ); 130 hal_atomic_add( &syscalls_occurences[SYS_CHDIR] , 1 ); 131 #endif 132 87 133 return 0; 88 134 } -
trunk/kernel/syscalls/sys_getcwd.c
r566 r610 1 1 /* 2 * sys_getcwd.c - get process current work directory2 * sys_getcwd.c - kernel function implementing the "getcwd" syscall. 3 3 * 4 4 * Author Alain Greiner (2016,2017,2018) … … 35 35 #include <syscalls.h> 36 36 37 /* TODO: user page(s) need to be locked [AG] */ 38 39 //////////////////////////////// 40 int sys_getcwd ( char * buf, 37 /////////////////////////////////// 38 int sys_getcwd ( char * buffer, 41 39 uint32_t nbytes ) 42 40 { 43 error_t error; 44 vseg_t * vseg; 45 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 46 41 error_t error; 42 vseg_t * vseg; 43 char * first; // first character valid in buffer 44 45 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 46 47 47 thread_t * this = CURRENT_THREAD; 48 48 process_t * process = this->process; 49 50 #if (DEBUG_SYS_GETCWD || CONFIG_INSTRUMENTATION_SYSCALLS) 51 uint64_t tm_start = hal_get_cycles(); 52 #endif 49 53 50 54 // check buffer size … … 53 57 54 58 #if DEBUG_SYSCALLS_ERROR 55 printk("\n[ERROR] in %s : buffer too small / thread %x / process %x\n",56 __FUNCTION__ , this->trdid , process->pid );59 printk("\n[ERROR] in %s : buffer too small for thread %x,%x]\n", 60 __FUNCTION__ , process->pid, this->trdid ); 57 61 #endif 58 62 this->errno = EINVAL; … … 61 65 62 66 // check buffer in user space 63 error = vmm_get_vseg( process, (intptr_t)buf , &vseg );67 error = vmm_get_vseg( process, (intptr_t)buffer , &vseg ); 64 68 65 69 if( error ) … … 67 71 68 72 #if DEBUG_SYSCALLS_ERROR 69 printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",70 __FUNCTION__ , (intptr_t)buf , this->trdid , process->pid );73 printk("\n[ERROR] in %s : user buffer unmapped %x for thread[%x,%x]\n", 74 __FUNCTION__ , (intptr_t)buffer , process->pid, this->trdid ); 71 75 #endif 72 76 this->errno = EINVAL; … … 74 78 } 75 79 76 // get reference process cluster and local pointer 80 #if DEBUG_SYS_GETCWD 81 if( DEBUG_SYS_GETCWD < tm_start ) 82 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 83 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_start ); 84 #endif 85 86 // get extended pointer on CWD inode from the reference process 77 87 xptr_t ref_xp = process->ref_xp; 88 process_t * ref_ptr = GET_PTR( ref_xp ); 78 89 cxy_t ref_cxy = GET_CXY( ref_xp ); 79 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 80 81 // get CWD lock in read mode 82 remote_rwlock_rd_acquire( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 90 xptr_t cwd_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 83 91 84 92 // call relevant VFS function 85 error = vfs_get_path( XPTR( ref_cxy , &ref_ptr->vfs_cwd_xp ) , 86 kbuf , CONFIG_VFS_MAX_PATH_LENGTH ); 87 88 // release CWD lock in read mode 89 remote_rwlock_rd_release( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 93 error = vfs_get_path( cwd_xp, 94 kbuf, 95 &first, 96 CONFIG_VFS_MAX_PATH_LENGTH ); 90 97 91 98 // copy kernel buffer to user space 92 hal_ copy_to_uspace( buf , kbuf, CONFIG_VFS_MAX_PATH_LENGTH );99 hal_strcpy_to_uspace( buffer , first , CONFIG_VFS_MAX_PATH_LENGTH ); 93 100 94 101 hal_fence(); 102 103 #if (DEBUG_SYS_GETCWD || CONFIG_INSTRUMENTATION_SYSCALLS) 104 uint64_t tm_end = hal_get_cycles(); 105 #endif 106 107 #if DEBUG_SYS_GETCWD 108 if( DEBUG_SYS_GETCWD < tm_end ) 109 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 110 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_end ); 111 #endif 112 113 #if CONFIG_INSTRUMENTATION_SYSCALLS 114 hal_atomic_add( &syscalls_cumul_cost[SYS_GETCWD] , tm_end - tm_start ); 115 hal_atomic_add( &syscalls_occurences[SYS_GETCWD] , 1 ); 116 #endif 95 117 96 118 return 0; -
trunk/kernel/syscalls/sys_mkdir.c
r566 r610 1 1 /* 2 * sys_mkdir.c - Create a new directory in file system.2 * sys_mkdir.c - creates a new directory in VFS 3 3 * 4 * Author Alain Greiner (2016,2017)4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 * Copyright (c) UPMC Sorbonne Universites6 * Copyright (c) UPMC Sorbonne Universites 7 7 * 8 * This file is part of ALMOS- MKH.8 * This file is part of ALMOS-kernel. 9 9 * 10 10 * ALMOS-MKH is free software; you can redistribute it and/or modify it … … 22 22 */ 23 23 24 #include <kernel_config.h> 24 25 #include <hal_kernel_types.h> 25 26 #include <hal_uspace.h> 27 #include <errno.h> 26 28 #include <vfs.h> 27 #include <vmm.h>28 #include <errno.h>29 29 #include <process.h> 30 30 #include <thread.h> 31 31 #include <printk.h> 32 32 33 /////////////////////////////////// 34 int sys_mkdir( char * pathname, 35 uint32_t mode __attribute__((unused)) ) 33 #include <syscalls.h> 34 35 //////////////////////////////////// 36 int sys_mkdir ( char * pathname, 37 uint32_t rights __attribute__((unused)) ) 36 38 { 37 error_t error; 38 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 39 error_t error; 40 xptr_t root_inode_xp; // extended pointer on root inode 41 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 39 42 40 43 thread_t * this = CURRENT_THREAD; 41 44 process_t * process = this->process; 42 45 43 // check fd_array not full 44 if( process_fd_array_full() ) 46 #if (DEBUG_SYS_MKDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 47 uint64_t tm_start = hal_get_cycles(); 48 #endif 49 50 // check pathname length 51 if( hal_strlen_from_uspace( pathname ) >= CONFIG_VFS_MAX_PATH_LENGTH ) 45 52 { 46 printk("\n[ERROR] in %s : file descriptor array full for process %x\n", 47 __FUNCTION__ , process->pid ); 53 54 #if DEBUG_SYSCALLS_ERROR 55 printk("\n[ERROR] in %s : pathname too long\n", __FUNCTION__ ); 56 #endif 48 57 this->errno = ENFILE; 49 58 return -1; 50 59 } 51 60 52 // check pathname length 53 if( hal_strlen_from_uspace( pathname ) >= CONFIG_VFS_MAX_PATH_LENGTH ) 61 // copy pathname in kernel space 62 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 63 64 #if DEBUG_SYS_MKDIR 65 if( DEBUG_SYS_MKDIR < tm_start ) 66 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n", 67 __FUNCTION__, process->pid, this->trdid, kbuf, (uint32_t)tm_start ); 68 #endif 69 70 // compute root inode for path 71 if( kbuf[0] == '/' ) // absolute path 54 72 { 55 printk("\n[ERROR] in %s : pathname too long\n", __FUNCTION__ ); 73 // use extended pointer on VFS root inode 74 root_inode_xp = process->vfs_root_xp; 75 } 76 else // relative path 77 { 78 // get cluster and local pointer on reference process 79 xptr_t ref_xp = process->ref_xp; 80 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 81 cxy_t ref_cxy = GET_CXY( ref_xp ); 82 83 // use extended pointer on CWD inode 84 root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 85 } 86 87 // call relevant VFS function 88 error = vfs_mkdir( root_inode_xp , kbuf , rights ); 89 90 if( error ) 91 { 92 93 #if DEBUG_SYSCALLS_ERROR 94 printk("\n[ERROR] in %s : cannot create directory <%s>\n", __FUNCTION__, kbuf ); 95 #endif 56 96 this->errno = ENFILE; 57 97 return -1; 58 98 } 59 99 60 printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__ ); 61 return -1; 62 63 // copy pathname in kernel space 64 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 100 #if (DEBUG_SYS_MKDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 101 uint64_t tm_end = hal_get_cycles(); 102 #endif 65 103 66 // get cluster and local pointer on reference process 67 // xptr_t ref_xp = process->ref_xp; 68 // process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 69 // cxy_t ref_cxy = GET_CXY( ref_xp ); 70 71 // get extended pointer on cwd inode 72 // xptr_t cwd_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_cwd_xp ) ); 73 74 // get the cwd lock in read mode from reference process 75 // remote_rwlock_rd_lock( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 76 77 // call the relevant VFS function 78 // error = vfs_mkdir( cwd_xp, 79 // kbuf, 80 // mode ); 81 82 // release the cwd lock 83 // remote_rwlock_rd_unlock( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 84 85 if( error ) 86 { 87 printk("\n[ERROR] in %s : cannot create directory %s\n", 88 __FUNCTION__ , kbuf ); 89 this->errno = error; 90 return -1; 91 } 104 #if DEBUG_SYS_MKDIR 105 if( DEBUG_SYS_MKDIR < tm_end ) 106 printk("\n[%s] thread[%x,%x] exit for <%s> / cycle %d\n", 107 __FUNCTION__, process->pid, this->trdid, kbuf, (uint32_t)tm_end ); 108 #endif 109 110 #if CONFIG_INSTRUMENTATION_SYSCALLS 111 hal_atomic_add( &syscalls_cumul_cost[SYS_MKDIR] , tm_end - tm_start ); 112 hal_atomic_add( &syscalls_occurences[SYS_MKDIR] , 1 ); 113 #endif 92 114 93 115 return 0; 94 } 116 117 } // end sys_mkdir() -
trunk/kernel/syscalls/sys_open.c
r604 r610 40 40 { 41 41 error_t error; 42 xptr_t file_xp; // extended pointer on vfs_file_t 43 uint32_t file_id; // file descriptor index 42 xptr_t file_xp; // extended pointer on vfs_file_t 43 uint32_t file_id; // file descriptor index 44 xptr_t root_inode_xp; // extended pointer on path root inode 45 44 46 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 45 47 … … 88 90 cxy_t ref_cxy = GET_CXY( ref_xp ); 89 91 90 // get the cwd lock in read mode from reference process 91 remote_rwlock_rd_acquire( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 92 // compute root inode for path 93 if( kbuf[0] == '/' ) // absolute path 94 { 95 // use extended pointer on VFS root inode 96 root_inode_xp = process->vfs_root_xp; 97 } 98 else // relative path 99 { 100 // use extended pointer on CWD inode 101 root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 102 } 92 103 93 104 // call the relevant VFS function 94 error = vfs_open( process,105 error = vfs_open( root_inode_xp, 95 106 kbuf, 107 ref_xp, 96 108 flags, 97 109 mode, 98 110 &file_xp, 99 111 &file_id ); 100 101 // release the cwd lock102 remote_rwlock_rd_release( XPTR( ref_cxy , &ref_ptr->cwd_lock ) );103 112 104 113 if( error ) -
trunk/kernel/syscalls/sys_opendir.c
r473 r610 2 2 * sys_opendir.c - open a directory. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 23 23 24 24 #include <hal_kernel_types.h> 25 #include <hal_uspace.h> 25 26 #include <thread.h> 26 27 #include <process.h> … … 35 36 DIR ** dirp ) 36 37 { 37 printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__, pathname, dirp ); 38 return -1; 39 } // end sys opendir() 38 error_t error; 39 vseg_t * vseg; // for user space checking 40 xptr_t root_inode_xp; // extended pointer on path root inode 41 42 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 43 44 thread_t * this = CURRENT_THREAD; 45 process_t * process = this->process; 46 47 #if (DEBUG_SYS_OPENDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 48 uint64_t tm_start = hal_get_cycles(); 49 #endif 50 51 // check DIR buffer in user space 52 error = vmm_get_vseg( process , (intptr_t)dirp, &vseg ); 53 54 if( error ) 55 { 56 57 #if DEBUG_SYSCALLS_ERROR 58 printk("\n[ERROR] in %s / thread[%x,%x] : DIR buffer %x unmapped\n", 59 __FUNCTION__ , process->pid , this->trdid, dirp ); 60 vmm_display( process , false ); 61 #endif 62 this->errno = EINVAL; 63 return -1; 64 } 65 66 // check pathname length 67 if( hal_strlen_from_uspace( pathname ) >= CONFIG_VFS_MAX_PATH_LENGTH ) 68 { 69 70 #if DEBUG_SYSCALLS_ERROR 71 printk("\n[ERROR] in %s / thread[%x,%x] : pathname too long\n", 72 __FUNCTION__ , process->pid , this->trdid ); 73 #endif 74 this->errno = ENFILE; 75 return -1; 76 } 77 78 // copy pathname in kernel space 79 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 80 81 #if DEBUG_SYS_OPENDIR 82 if( DEBUG_SYS_OPENDIR < tm_start ) 83 printk("\n[%s] thread[%x,%x] enter for directory <%s> / cycle %d\n", 84 __FUNCTION__, process->pid, this->trdid, kbuf, (uint32_t)tm_start ); 85 #endif 86 87 // compute root inode for path 88 if( kbuf[0] == '/' ) // absolute path 89 { 90 // use extended pointer on VFS root inode 91 root_inode_xp = process->vfs_root_xp; 92 } 93 else // relative path 94 { 95 // get cluster and local pointer on reference process 96 xptr_t ref_xp = process->ref_xp; 97 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 98 cxy_t ref_cxy = GET_CXY( ref_xp ); 99 100 // use extended pointer on CWD inode 101 root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 102 } 103 104 /* 105 // call the relevant VFS function ??? 106 error = vfs_opendir( root_inode_xp, 107 kbuf ); 108 if( error ) 109 { 110 111 #if DEBUG_SYSCALLS_ERROR 112 printk("\n[ERROR] in %s / thread[%x,%x] : cannot open directory <%s>\n", 113 __FUNCTION__ , process->pid , this->trdid , pathname ); 114 #endif 115 this->errno = ENFILE; 116 return -1; 117 } 118 119 // copy to user space ??? 120 */ 121 122 hal_fence(); 123 124 #if (DEBUG_SYS_OPENDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 125 uint64_t tm_end = hal_get_cycles(); 126 #endif 127 128 #if DEBUG_SYS_OPENDIR 129 if( DEBUG_SYS_OPENDIR < tm_end ) 130 printk("\n[%s] thread[%x,%x] exit for directory <%s> / cycle %d\n", 131 __FUNCTION__, process->pid, this->trdid, kbuf, (uint32_t)tm_end ); 132 #endif 133 134 #if CONFIG_INSTRUMENTATION_SYSCALLS 135 hal_atomic_add( &syscalls_cumul_cost[SYS_OPENDIR] , tm_end - tm_start ); 136 hal_atomic_add( &syscalls_occurences[SYS_OPENDIR] , 1 ); 137 #endif 138 139 return 0; 140 141 } // end sys_opendir() -
trunk/kernel/syscalls/sys_read.c
r604 r610 78 78 #if DEBUG_SYS_READ 79 79 if( DEBUG_SYS_READ < tm_start ) 80 printk("\n[ DBG] %s :thread[%x,%x] enter / vaddr %x / count %d / cycle %d\n",80 printk("\n[%s] thread[%x,%x] enter / vaddr %x / count %d / cycle %d\n", 81 81 __FUNCTION__, process->pid, this->trdid, vaddr, count, (uint32_t)tm_start ); 82 82 #endif … … 246 246 #if DEBUG_SYS_READ 247 247 if( DEBUG_SYS_READ < tm_end ) 248 printk("\n[ DBG] %s :thread[%x,%x] exit / cycle %d\n",248 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 249 249 __FUNCTION__ , process->pid, this->trdid, (uint32_t)tm_end ); 250 250 #endif -
trunk/kernel/syscalls/sys_stat.c
r604 r610 1 1 /* 2 * sys_stat.c - Return statistics on a file or directory.2 * sys_stat.c - kernel function implementing the "stat" syscall. 3 3 * 4 4 * Author Alain Greiner (2016,2017,2018) … … 41 41 vseg_t * vseg; // for user space checking 42 42 struct stat k_stat; // in kernel space 43 xptr_t inode_xp; // extended pointer on target inode43 xptr_t root_inode_xp; // extended pointer on path root inode 44 44 45 45 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; … … 59 59 60 60 #if DEBUG_SYSCALLS_ERROR 61 printk("\n[ERROR] in %s / thread[%x,%x] : stat structure unmapped\n",62 __FUNCTION__ , process->pid , this->trdid );61 printk("\n[ERROR] in %s / thread[%x,%x] : stat structure %x unmapped\n", 62 __FUNCTION__ , process->pid , this->trdid, u_stat ); 63 63 vmm_display( process , false ); 64 64 #endif … … 88 88 #endif 89 89 90 // get cluster and local pointer on reference process 91 xptr_t ref_xp = process->ref_xp; 92 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 93 cxy_t ref_cxy = GET_CXY( ref_xp ); 90 // compute root inode for path 91 if( kbuf[0] == '/' ) // absolute path 92 { 93 // use extended pointer on VFS root inode 94 root_inode_xp = process->vfs_root_xp; 95 } 96 else // relative path 97 { 98 // get cluster and local pointer on reference process 99 xptr_t ref_xp = process->ref_xp; 100 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 101 cxy_t ref_cxy = GET_CXY( ref_xp ); 94 102 95 // get extended pointer on cwd inode 96 xptr_t cwd_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_cwd_xp ) ); 97 98 // get the cwd lock in read mode from reference process 99 remote_rwlock_rd_acquire( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 100 101 // get extended pointer on remote file descriptor 102 error = vfs_lookup( cwd_xp, 103 kbuf, 104 0, 105 &inode_xp ); 106 107 // release the cwd lock 108 remote_rwlock_rd_release( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 109 110 if( error ) 111 { 112 113 #if DEBUG_SYSCALLS_ERROR 114 printk("\n[ERROR] in %s / thread[%x,%x] : cannot found file <%s>\n", 115 __FUNCTION__ , process->pid , this->trdid , pathname ); 116 #endif 117 this->errno = ENFILE; 118 return -1; 103 // use extended pointer on CWD inode 104 root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 119 105 } 120 106 121 #if (DEBUG_SYS_STAT & 1) 122 if( DEBUG_SYS_STAT < tm_start ) 123 printk("\n[%s] thread[%x,%x] got inode %x in cluster %x for <%s>\n", 124 __FUNCTION__, process->pid, this->trdid, GET_PTR(inode_xp), GET_CXY(inode_xp), kbuf ); 125 #endif 126 127 // call VFS function to get stat info 128 error = vfs_stat( inode_xp, 129 &k_stat ); 107 // call the relevant VFS function 108 error = vfs_stat( root_inode_xp, 109 kbuf, 110 &k_stat ); 130 111 if( error ) 131 112 { … … 139 120 } 140 121 141 #if (DEBUG_SYS_STAT & 1)142 if( DEBUG_SYS_STAT < tm_start )143 printk("\n[%s] thread[%x,%x] set kstat : inum %d / size %d / mode %d\n",144 __FUNCTION__, process->pid, this->trdid, k_stat.st_ino, k_stat.st_size, k_stat.st_mode );145 #endif146 147 122 // copy k_stat to u_stat 148 123 hal_copy_to_uspace( u_stat , &k_stat , sizeof(struct stat) ); -
trunk/kernel/syscalls/sys_unlink.c
r604 r610 1 1 /* 2 * sys_unlink.c - file unlink a file2 * sys_unlink.c - unlink a file or directorya from VFS 3 3 * 4 4 * Author Alain Greiner (2016,2017,2018) … … 37 37 { 38 38 error_t error; 39 xptr_t root_inode_xp; // extended pointer on path root inode 40 39 41 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 40 42 … … 66 68 #endif 67 69 68 // get cluster and local pointer on reference process 69 xptr_t ref_xp = process->ref_xp; 70 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 71 cxy_t ref_cxy = GET_CXY( ref_xp ); 70 // compute root inode for path 71 if( kbuf[0] == '/' ) // absolute path 72 { 73 // use extended pointer on VFS root inode 74 root_inode_xp = process->vfs_root_xp; 75 } 76 else // relative path 77 { 78 // get cluster and local pointer on reference process 79 xptr_t ref_xp = process->ref_xp; 80 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 81 cxy_t ref_cxy = GET_CXY( ref_xp ); 72 82 73 // get the cwd lock in write mode from reference process 74 remote_rwlock_wr_acquire( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 83 // use extended pointer on CWD inode 84 root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 85 } 75 86 76 // get extended pointer on cwd inode 77 xptr_t cwd_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_cwd_xp ) ); 78 79 // call relevant VFS function 80 error = vfs_unlink( cwd_xp , kbuf ); 81 82 // release the cwd lock in reference process 83 remote_rwlock_wr_release( XPTR( ref_cxy , &ref_ptr->cwd_lock ) ); 87 // call the relevant VFS function 88 error = vfs_unlink( root_inode_xp , kbuf ); 84 89 85 90 if( error ) -
trunk/kernel/syscalls/sys_write.c
r604 r610 77 77 tm_start = hal_get_cycles(); 78 78 if( DEBUG_SYS_WRITE < tm_start ) 79 printk("\n[ DBG] %s :thread[%x,%x] enter / vaddr %x / count %d / cycle %d\n",79 printk("\n[%s] thread[%x,%x] enter / vaddr %x / count %d / cycle %d\n", 80 80 __FUNCTION__, process->pid, this->trdid, vaddr, count, (uint32_t)tm_start ); 81 81 #endif … … 223 223 #if DEBUG_SYS_WRITE 224 224 if( DEBUG_SYS_WRITE < tm_end ) 225 printk("\n[ DBG] %s :thread[%x,%x] exit / cycle %d\n",225 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 226 226 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_end ); 227 227 #endif -
trunk/kernel/syscalls/syscalls.h
r594 r610 170 170 171 171 /****************************************************************************************** 172 * [10] This function implement the exit system call terminating a POSIX process. 173 * It can be called by any thread running in any cluster. 174 * It uses both remote accesses to access the owner process descriptor, and the 175 * RPC_PROCESS_SIGACTION to delete remote process and thread descriptors. 176 * In the present implementation, this function implements actually the _exit(): 177 * - it does not flush open output streams. 178 * - it does not close open streams. 179 ****************************************************************************************** 180 * @ status : terminaison status (not used in present implementation). 181 *****************************************************************************************/ 182 int sys_exit( uint32_t status ); 172 * [10] This function causes the file named <old> to be renamed as <new>. 173 * If new exists, it is first removed. Both old and new must be of the same type (both 174 * must be either directories or non-directories) and must reside on the same file system. 175 * It guarantees that an instance of <new> will always exist, even if the system should 176 * crash in the middle of the operation. 177 ****************************************************************************************** 178 * @ old : old file name. 179 * @ new : new file name. 180 * @ return 0 if success / return -1 if failure. 181 *****************************************************************************************/ 182 int sys_rename( char *old, 183 char *new ); 183 184 184 185 /****************************************************************************************** … … 301 302 302 303 /****************************************************************************************** 303 * [21] This function creates a new directory in file system. 304 ****************************************************************************************** 305 * @ pathname : pathname (can be relative or absolute). 306 * @ mode : access rights (as defined in chmod). 307 * @ return 0 if success / returns -1 if failure. 308 *****************************************************************************************/ 309 int sys_mkdir( char * pathname, 310 uint32_t mode ); 304 * [21] This function implements the "mkdir" system call, creating a new directory in 305 * the file system, as defined by the <pathname> argument, with the access permission 306 * defined by the <rights> argument. All nodes but the last in the pathname must exist. 307 * It can be called by any thread running in any cluster. 308 ****************************************************************************************** 309 * @ pathname : pathname defining the new directory location in file system. 310 * @ rights : access rights (non used yet). 311 * @ return 0 if success / return -1 if failure. 312 *****************************************************************************************/ 313 int sys_mkdir( char * pathname, 314 uint32_t rights ); 311 315 312 316 /****************************************************************************************** … … 653 657 uint32_t * is_fg ); 654 658 659 /****************************************************************************************** 660 * [50] This function implements the exit system call terminating a POSIX process. 661 * It can be called by any thread running in any cluster. 662 * It uses both remote accesses to access the owner process descriptor, and the 663 * RPC_PROCESS_SIGACTION to delete remote process and thread descriptors. 664 * In the present implementation, this function implements actually the _exit(): 665 * - it does not flush open output streams. 666 * - it does not close open streams. 667 ****************************************************************************************** 668 * @ status : terminaison status. 669 *****************************************************************************************/ 670 int sys_exit( uint32_t status ); 671 655 672 #endif // _SYSCALLS_H_
Note: See TracChangeset
for help on using the changeset viewer.