Changeset 367 for trunk/kernel
- Timestamp:
- Aug 14, 2017, 11:39:03 AM (7 years ago)
- Location:
- trunk/kernel
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/core.c
r337 r367 47 47 core->time_stamp = 0; 48 48 core->ticks_nr = 0; 49 core->ticks_period = CONFIG_SCHED_TICK_PERIOD;50 49 core->usage = 0; 51 50 core->spurious_irqs = 0; … … 72 71 uint32_t * tm_us ) 73 72 { 74 uint64_t cycles = hal_get_cycles(); 75 76 *tm_s = (cycles / CONFIG_CYCLES_PER_MS); 77 *tm_us = (cycles % CONFIG_CYCLES_PER_MS) / (CONFIG_CYCLES_PER_MS / 1000000); 73 *tm_s = (core->ticks_nr*CONFIG_SCHED_TICK_PERIOD)/1000; 74 *tm_us = (core->ticks_nr*CONFIG_SCHED_TICK_PERIOD*1000)%1000000; 78 75 } 79 76 77 /* deprecated 14/08/2017 [AG] 80 78 ////////////////////////////////////// 81 79 void core_time_update( core_t * core ) … … 99 97 hal_fence(); 100 98 } 99 */ 101 100 102 101 //////////////////////////////// … … 105 104 uint32_t ticks; 106 105 107 // update cycles and ticks counter 108 core_time_update( core ); 109 110 // get current ticks number 111 ticks = core->ticks_nr; 106 // update ticks counter 107 ticks = core->ticks_nr++; 112 108 113 109 // handle pending alarms TODO ??? [AG] 114 110 // alarm_clock( &core->alarm_mgr , ticks ); 115 111 116 // handle scheduler TODO improve the scheduling condition ... AG 117 if( (ticks % 10) == 0 ) sched_yield( NULL ); 118 119 /* 120 // compute elapsed time, taking into account 32 bits register wrap 121 uint32_t elapsed; 122 uint32_t time_now = hal_get_cycles(); 123 uint32_t time_last = this->time_last_check; 124 if( time_now < time_last ) elapsed = (0xFFFFFFFF - time_last) + time_now; 125 else elapsed = time_now - time_last; 126 127 // update thread time 128 this->time_last_check = time_now; 129 130 // check elapsed time 131 if( elapsed < CONFIG_CORE_CHECK_EVERY ) return false; 132 else return true; 133 */ 112 // handle scheduler 113 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( NULL ); 134 114 135 115 // update DQDT TODO This update should depend on the cluster identifier, 136 116 // to avoid simultaneous updates from various clusters ... AG 137 if( ((ticks % CONFIG_DQDT_PERIOD) == 0) && (core->lid == 0) ) dqdt_global_update(); 117 if( ((ticks % CONFIG_DQDT_TICKS_PER_QUANTUM) == 0) && (core->lid == 0) ) 118 dqdt_global_update(); 138 119 } 139 120 … … 171 152 void core_reset_stats( core_t * core ) 172 153 { 173 core_time_update(core);174 175 154 core->ticks_nr = 0; 176 155 core->usage = 0; -
trunk/kernel/kern/core.h
r279 r367 50 50 lid_t lid; /*! core local index in cluster */ 51 51 gid_t gid; /*! core global identifier (hardware index) */ 52 52 53 uint64_t cycles; /*! total number of cycles (from hard reset) */ 53 54 uint32_t time_stamp; /*! previous time stamp (read from register) */ 55 54 56 uint32_t ticks_nr; /*! number of elapsed ticks */ 55 uint32_t ticks_period; /*! number of cycles between two ticks */56 57 uint32_t usage; /*! cumulated busy_percent (idle / total) */ 57 58 uint32_t spurious_irqs; /*! for instrumentation... */ -
trunk/kernel/kern/printk.h
r337 r367 250 250 251 251 #if CONFIG_RPC_DEBUG 252 #define rpc_dmsg(...) printk(__VA_ARGS__)252 #define rpc_dmsg(...) if(hal_time_stamp() > CONFIG_RPC_DEBUG) printk(__VA_ARGS__) 253 253 #else 254 254 #define rpc_dmsg(...) -
trunk/kernel/kern/process.c
r337 r367 632 632 parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 633 633 634 exec_dmsg("\n[INFO] %s : enters in cluster %xfor path = %s\n",635 __FUNCTION__ , local_cxy, path );634 exec_dmsg("\n[INFO] %s : thread %x on core[%x,‰d] enters for path = %s\n", 635 __FUNCTION__, CURRENT_THREAD->trdid, local_cxy, CURRENT_THREAD->core->lid, path ); 636 636 637 637 // create new process descriptor … … 659 659 process_reference_init( process , pid , parent_xp ); 660 660 661 exec_dmsg("\n[INFO] %s : created process %x in cluster%x / path = %s\n",662 __FUNCTION__, pid , local_cxy, path );661 exec_dmsg("\n[INFO] %s : thread %x on core[%x,‰d] created process %x / path = %s\n", 662 __FUNCTION__, CURRENT_THREAD->trdid, local_cxy, CURRENT_THREAD->core->lid, pid, path ); 663 663 664 664 // initialize vfs_root and vfs_cwd from parent process … … 676 676 677 677 exec_dmsg("\n[INFO] %s : fd_array copied from process %x to process %x\n", 678 678 __FUNCTION__, parent_pid , pid ); 679 679 680 680 // initialize signal manager TODO ??? [AG] … … 693 693 } 694 694 695 exec_dmsg("\n[INFO] %s : code and data vsegs from <%s> registered for process %x\n",696 __FUNCTION__ , path , pid);695 exec_dmsg("\n[INFO] %s : code and data vsegs registered for process %x / path = %s\n", 696 __FUNCTION__ , pid , path ); 697 697 698 698 // select a core in cluster -
trunk/kernel/kern/thread.c
r338 r367 638 638 { 639 639 thread_t * this = CURRENT_THREAD; 640 return ( (this->local_locks == 0) && (this->remote_locks == 0));641 } 642 643 ///////////////////////// //644 bool_tthread_check_sched()640 return (this->local_locks == 0) && (this->remote_locks == 0); 641 } 642 643 ///////////////////////// 644 void thread_check_sched() 645 645 { 646 646 thread_t * this = CURRENT_THREAD; 647 647 648 // check locks count 649 if( (this->local_locks != 0) || (this->remote_locks != 0) )650 return false;651 652 if( this->flags & THREAD_FLAG_SCHED )648 if( (this->local_locks == 0) && 649 (this->remote_locks == 0) && 650 (this->flags & THREAD_FLAG_SCHED) ) 651 { 652 this->flags &= ~THREAD_FLAG_SCHED; 653 653 sched_yield( NULL ); 654 655 return true; 656 657 #if 0 658 // compute elapsed time, taking into account 32 bits register wrap 659 uint32_t elapsed; 660 uint32_t time_now = hal_get_cycles(); 661 uint32_t time_last = this->time_last_check; 662 if( time_now < time_last ) elapsed = (0xFFFFFFFF - time_last) + time_now; 663 else elapsed = time_now - time_last; 664 665 // update thread time 666 this->time_last_check = time_now; 667 668 // check elapsed time 669 if( elapsed < CONFIG_CORE_CHECK_EVERY ) return false; 670 else return true; 671 #endif 654 } 672 655 } 673 656 -
trunk/kernel/kern/thread.h
r337 r367 368 368 *************************************************************************************** 369 369 * @ thread : local pointer on target thread. 370 * @ mask : mask on selected signal.370 *s released all locks @ mask : mask on selected signal. 371 371 **************************************************************************************/ 372 372 inline void thread_set_signal( thread_t * thread, … … 400 400 401 401 /*************************************************************************************** 402 * This function checks if the calling thread must be descheduled.403 * **************************************************************************************404 * @ returns true if no locks taken, and elapsed time.405 **************************************************************************************/ 406 bool_tthread_check_sched();402 * This function implements the delayed descheduling machanism : It is called by 403 * all lock release functions, and calls the sched_yield() function when all locks 404 * have beeen released and the THREAD_FLAG_SCHED flag is set. 405 **************************************************************************************/ 406 void thread_check_sched(); 407 407 408 408 /*************************************************************************************** -
trunk/kernel/libk/elf.c
r337 r367 238 238 } 239 239 240 elf_dmsg("\n[INFO] %s : file <%s> open\n", __FUNCTION__ , pathname );240 elf_dmsg("\n[INFO] %s : open file <%s>\n", __FUNCTION__ , pathname ); 241 241 242 242 // load header in local buffer … … 288 288 } 289 289 290 elf_dmsg("\n[INFO] %s : segments array allocated for %s\n", __FUNCTION__ , pathname ); 291 290 292 // load seg descriptors array to local buffer 291 error = vfs_ user_move( true,// to_buffer292 file_xp,293 segs_base,294 segs_size );293 error = vfs_kernel_move( true, // to_buffer 294 file_xp, 295 XPTR( local_cxy , segs_base ), 296 segs_size ); 295 297 296 298 if( error ) -
trunk/kernel/mm/mapper.c
r334 r367 218 218 219 219 // launch I/O operation to load page from file system 220 error = vfs_mapper_move_page( page , true ); // to mapper 220 error = vfs_mapper_move_page( page, 221 true ); // to mapper 221 222 222 223 if( error ) … … 448 449 449 450 // get page base address 450 uint8_t * base = (uint8_t *)GET_PTR( XPTR( local_cxy , page ) ); 451 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 452 uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp ); 451 453 452 454 // compute source and destination pointers … … 454 456 { 455 457 dst_ptr = buffer_ptr + done; 456 src_ptr = base + page_offset;458 src_ptr = base_ptr + page_offset; 457 459 } 458 460 else 459 461 { 460 462 src_ptr = buffer_ptr + done; 461 dst_ptr = base + page_offset;463 dst_ptr = base_ptr + page_offset; 462 464 463 465 page_do_dirty( page ); -
trunk/kernel/vfs/fatfs.c
r315 r367 264 264 265 265 #if (CONFIG_FATFS_DEBUG > 1) 266 uint32_t * buf = (uint32_t *)ppm_page2vaddr( mapper_get_page ( mapper , 0 ) ); 267 uint32_t line , word; 266 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , mapper_get_page ( mapper , 0 ) ) ); 267 uint32_t * buf = (uint32_t *)GET_PTR( base_xp ); 268 uint32_t line , word; 268 269 printk("\n*** FAT mapper content for first 256 entries ***\n"); 269 270 for( line = 0 ; line < 16 ; line++ ) … … 369 370 uint32_t line; 370 371 uint32_t byte = 0; 371 printk("\n*** boot record at cycle %d ***\n", hal_get_cycles());372 printk("\n***** FAT boot record\n" ); 372 373 for ( line = 0 ; line < 32 ; line++ ) 373 374 { … … 554 555 if( error ) return EIO; 555 556 556 fatfs_dmsg("\n[INFO] %s : exit for inode %x / page = %x/ mapper = %x\n",557 __FUNCTION__ , inode , page, mapper );557 fatfs_dmsg("\n[INFO] %s : exit for inode %x / page_index = %d / mapper = %x\n", 558 __FUNCTION__ , inode , index , mapper ); 558 559 } 559 560 … … 612 613 uint32_t * buf = (uint32_t *)base; 613 614 uint32_t line , word; 614 printk("\n*** DIRECTORY content for first 16 entries ***\n");615 printk("\n***** first 16 dir entries for parent inode %x\n", parent_inode ); 615 616 for( line = 0 ; line < 16 ; line++ ) 616 617 { -
trunk/kernel/vfs/fatfs.h
r265 r367 248 248 * This function moves a page from/to the mapper to/from the FATFS file system on device. 249 249 * It must be called by a thread running in cluster containing the mapper. 250 * The pointer on the mapper and the page index in file are registered 251 * in the page descriptor. 250 * The pointer on the mapper and the page index in file are found in the page descriptor. 252 251 * WARNING : The inode field in the mapper must be NULL for the FAT mapper. 253 252 * This is used to implement a specific behaviour to access the FAT zone on device. … … 260 259 bool_t to_mapper ); 261 260 262 263 261 /***************************************************************************************** 264 262 * This function scan an existing parent directory, identified by the <parent> argument, 265 263 * to find a directory entry identified by the <name> argument and update the remote 266 * child inode , identified by the <child_xp> argument.264 * child inode descriptor, identified by the <child_xp> argument. 267 265 * It set the "type", "size", and "extend" (FAT cluster index) fields in child inode. 268 266 * It must be called by a thread running in the cluster containing the parent inode. -
trunk/kernel/vfs/vfs.c
r337 r367 1103 1103 inode_type = hal_remote_lw( XPTR( inode_cxy , &inode_ptr->type ) ); 1104 1104 1105 // get local pointer on associated mapper 1106 mapper_t * mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 1107 1105 1108 // make a local copy of node name 1106 1109 hal_remote_strcpy( XPTR( local_cxy , name ) , name_xp ); 1107 1110 1108 1111 // display inode 1109 nolock_printk("%s%s <%s> inode_xp = %l / dentry_xp= %l\n",1110 indent_str[indent], vfs_inode_type_str( inode_type ), 1111 name , inode_xp, dentry_xp );1112 nolock_printk("%s%s <%s> : inode = %l / mapper = %l / dentry = %l\n", 1113 indent_str[indent], vfs_inode_type_str( inode_type ), name, 1114 inode_xp , XPTR( inode_cxy , mapper_ptr ) , dentry_xp ); 1112 1115 1113 1116 // scan directory entries … … 1165 1168 1166 1169 // get extended pointer on associated dentry 1167 dentry_xp = hal_remote_lwd( XPTR( inode_cxy , &inode_ptr->parent_xp ) );1170 dentry_xp = hal_remote_lwd( XPTR( inode_cxy , &inode_ptr->parent_xp ) ); 1168 1171 1169 1172 // check if target inode is the File System root … … 1604 1607 parent_ptr = (vfs_inode_t *)GET_PTR( parent_xp ); 1605 1608 1606 vfs_dmsg("\n[INFO] %s : enter in cluster %x for %s/ child_cxy = %x / parent_xp = %l\n",1609 vfs_dmsg("\n[INFO] %s : enter in cluster %x for <%s> / child_cxy = %x / parent_xp = %l\n", 1607 1610 __FUNCTION__ , local_cxy , name , child_cxy , parent_xp ); 1608 1611 … … 1615 1618 &dentry_xp ); 1616 1619 1617 vfs_dmsg("\n[INFO] %s : dentry created in local cluster %x\n",1618 __FUNCTION__ , local_cxy );1620 vfs_dmsg("\n[INFO] %s : dentry <%s> created in local cluster %x\n", 1621 __FUNCTION__ , name , local_cxy ); 1619 1622 } 1620 1623 else // parent cluster is remote … … 1627 1630 &error ); 1628 1631 1629 vfs_dmsg("\n[INFO] %s : dentry created in remote cluster %x\n",1630 __FUNCTION__ , parent_cxy );1632 vfs_dmsg("\n[INFO] %s : dentry <%s> created in remote cluster %x\n", 1633 __FUNCTION__ , name , parent_cxy ); 1631 1634 } 1632 1635 … … 1656 1659 &inode_xp ); 1657 1660 1658 vfs_dmsg("\n[INFO] %s : inode created in local cluster %x\n",1659 __FUNCTION__ , local_cxy );1661 vfs_dmsg("\n[INFO] %s : inode %l created in local cluster %x\n", 1662 __FUNCTION__ , inode_xp , local_cxy ); 1660 1663 } 1661 1664 else // child cluster is remote … … 1673 1676 &error ); 1674 1677 1675 vfs_dmsg("\n[INFO] %s : inode created in remote cluster %x\n",1676 __FUNCTION__ , child_cxy );1678 vfs_dmsg("\n[INFO] %s : inode %l created in remote cluster %x\n", 1679 __FUNCTION__ , inode_xp , child_cxy ); 1677 1680 } 1678 1681 … … 1693 1696 hal_remote_swd( XPTR( dentry_cxy , &dentry_ptr->child_xp ) , inode_xp ); 1694 1697 1695 vfs_dmsg("\n[INFO] %s : exit in cluster %x for %s\n",1698 vfs_dmsg("\n[INFO] %s : exit in cluster %x for <%s>\n", 1696 1699 __FUNCTION__ , local_cxy , name ); 1697 1700 … … 1716 1719 mapper_t * mapper = page->mapper; 1717 1720 1718 1719 1721 assert( (mapper != NULL) , __FUNCTION__ , "no mapper for page\n" ); 1720 1722 1721 vfs_dmsg("\n[INFO] %s : enters for page = %d in mapper = %x\n",1722 __FUNCTION__ , page->index , mapper);1723 vfs_dmsg("\n[INFO] %s : enters for page %d in mapper / inode %l\n", 1724 __FUNCTION__ , page->index , XPTR( local_cxy , &mapper->inode ) ); 1723 1725 1724 1726 // get FS type … … 1745 1747 } 1746 1748 1747 vfs_dmsg("\n[INFO] %s : exit for page = %d in mapper = %x\n",1748 __FUNCTION__ , page->index , mapper);1749 vfs_dmsg("\n[INFO] %s : exit for page %d in mapper / inode %l\n", 1750 __FUNCTION__ , page->index , XPTR( local_cxy , &mapper->inode) ); 1749 1751 1750 1752 return error;
Note: See TracChangeset
for help on using the changeset viewer.