Changeset 101 for trunk/kernel/kern
- Timestamp:
- Jun 29, 2017, 4:44:52 PM (7 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/cluster.c
r50 r101 103 103 104 104 cluster_dmsg("\n[INFO] %s : PPM initialized in cluster %x at cycle %d\n", 105 __FUNCTION__ , local_cxy , hal_ time_stamp() );105 __FUNCTION__ , local_cxy , hal_get_cycles() ); 106 106 107 107 // initialises embedded KHM … … 109 109 110 110 cluster_dmsg("\n[INFO] %s : KHM initialized in cluster %x at cycle %d\n", 111 __FUNCTION__ , local_cxy , hal_ time_stamp() );111 __FUNCTION__ , local_cxy , hal_get_cycles() ); 112 112 113 113 // initialises embedded KCM … … 115 115 116 116 cluster_dmsg("\n[INFO] %s : KCM initialized in cluster %x at cycle %d\n", 117 __FUNCTION__ , local_cxy , hal_ time_stamp() );117 __FUNCTION__ , local_cxy , hal_get_cycles() ); 118 118 119 119 // initialises all cores descriptors … … 126 126 127 127 cluster_dmsg("\n[INFO] %s : cores initialized in cluster %x at cycle %d\n", 128 __FUNCTION__ , local_cxy , hal_ time_stamp() );128 __FUNCTION__ , local_cxy , hal_get_cycles() ); 129 129 130 130 // initialises RPC fifo … … 132 132 133 133 cluster_dmsg("\n[INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n", 134 __FUNCTION__ , local_cxy , hal_ time_stamp() );134 __FUNCTION__ , local_cxy , hal_get_cycles() ); 135 135 136 136 // initialise pref_tbl[] in process manager … … 149 149 150 150 // initialise copies_lists in process manager 151 for( lpid = 1; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )151 for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) 152 152 { 153 153 remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) ); … … 157 157 158 158 cluster_dmsg("\n[INFO] %s Process Manager initialized in cluster %x at cycle %d\n", 159 __FUNCTION__ , local_cxy , hal_ time_stamp() );159 __FUNCTION__ , local_cxy , hal_get_cycles() ); 160 160 161 161 hal_wbflush(); -
trunk/kernel/kern/cluster.h
r50 r101 187 187 /****************************************************************************************** 188 188 * This function returns an extended pointer on the reference process descriptor 189 * from the process PID. This PID can be be different from the calling thread process.189 * from the process PID. This PID can be be different from the calling process PID. 190 190 * It can be called by any thread running in any cluster, 191 191 ****************************************************************************************** -
trunk/kernel/kern/core.c
r68 r101 57 57 core->thread_idle = NULL; 58 58 core->fpu_owner = NULL; 59 core->rand_last = hal_ time_stamp() & 0xFFF;59 core->rand_last = hal_get_cycles() & 0xFFF; 60 60 61 61 sched_init( core ); … … 66 66 { 67 67 uint32_t value = ((core->rand_last * CONFIG_RDNG_PARAM_A) + 68 CONFIG_RDNG_PARAM_C) ^ (hal_ time_stamp() & 0xFFF);68 CONFIG_RDNG_PARAM_C) ^ (hal_get_cycles() & 0xFFF); 69 69 core->rand_last = value; 70 70 return value; 71 }72 73 ////////////////////////////////////////////////74 inline uint64_t core_get_cycles( core_t * core )75 {76 uint32_t elapsed;77 uint64_t cycles;78 uint32_t time_stamp = core->time_stamp;79 uint32_t time_now = hal_time_stamp();80 81 // compute number of elapsed cycles, taking into account 32 bits register wrap82 if(time_now < time_stamp) elapsed = (0xFFFFFFFF - time_stamp) + time_now;83 else elapsed = (time_now - time_stamp);84 85 cycles = core->cycles + elapsed;86 87 // update core time88 core->time_stamp = time_now;89 core->cycles = cycles;90 hal_wbflush();91 92 return cycles;93 71 } 94 72 … … 98 76 uint32_t * tm_us ) 99 77 { 100 // uint64_t cycles = core_get_cycles( core);78 uint64_t cycles = hal_get_cycles(); 101 79 102 // TODO ces deux ligne ne compilent pas : "undefined referenc to __udivdi3" 103 104 // *tm_ms = (cycles / CONFIG_CYCLES_PER_MS); 105 // *tm_us = (cycles % CONFIG_CYCLES_PER_MS) / (CONFIG_CYCLES_PER_MS / 1000000); 106 107 printk("\n[PANIC] in %s : not implemented yet\n", __FUNCTION__ ); 80 *tm_s = (cycles / CONFIG_CYCLES_PER_MS); 81 *tm_us = (cycles % CONFIG_CYCLES_PER_MS) / (CONFIG_CYCLES_PER_MS / 1000000); 108 82 } 109 83 … … 115 89 uint64_t cycles = core->cycles; 116 90 uint32_t time_stamp = core->time_stamp; 117 uint32_t time_now = hal_ time_stamp();91 uint32_t time_now = hal_get_cycles(); 118 92 119 93 // compute number of elapsed cycles taking into account 32 bits register wrap -
trunk/kernel/kern/core.h
r68 r101 100 100 101 101 /*************************************************************************************** 102 * This function returns the current date (cycles) from both 103 * the hardware 32 bits cycles counter and the core descriptor cycles counter, 104 * taking into account the 32 bits hardware register overflow. 105 * The core descriptor time is updated. 106 *************************************************************************************** 107 * @ core : pointer on core descriptor. 108 * @ returns the number of cycles. 109 **************************************************************************************/ 110 inline uint64_t core_get_cycles( core_t * core ); 111 112 /*************************************************************************************** 113 * This function returns the current date (seconds & micro-seconds) from both 114 * the hardware 32 bits cycles counter and the core descriptor cycles counter, 115 * taking into account the 32 bits hardware register overflow. 116 * The core descriptor time is updated. 102 * This function returns the current date (seconds & micro-seconds) from 103 * the 64 bits calling core cycles counter. 117 104 *************************************************************************************** 118 105 * @ core : pointer on core descriptor. -
trunk/kernel/kern/do_exception.c
r16 r101 84 84 vmm = &process->vmm; 85 85 86 vmm_dmsg("\n[INFO] %s : enters for thread %x / process %x"87 " / bad_vaddr = %x / excep_code = %x\n",88 __FUNCTION__, this->trdid , process->pid , bad_vaddr , excep_code );89 90 86 // get relevant values from MMU 91 87 hal_get_mmu_excp( &mmu_ins_excp_code, … … 110 106 } 111 107 108 vmm_dmsg("\n[INFO] %s : enters for thread %x / process %x" 109 " / bad_vaddr = %x / excep_code = %x\n", 110 __FUNCTION__, this->trdid , process->pid , bad_vaddr , excp_code ); 112 111 113 112 // a kernel thread should not rise an MMU exception … … 179 178 } 180 179 181 vmm_dmsg("\n[INFO] %s : found vseg for thread %x / vseg_ base = %x / vseg_size= %x\n",182 __FUNCTION__ , this->trdid , vseg-> begin , vseg->size);180 vmm_dmsg("\n[INFO] %s : found vseg for thread %x / vseg_min = %x / vseg_max = %x\n", 181 __FUNCTION__ , this->trdid , vseg->min , vseg->max ); 183 182 184 183 // analyse exception code -
trunk/kernel/kern/kernel_init.c
r77 r101 56 56 #include <devfs.h> 57 57 #include <mapper.h> 58 58 #include <soclib_tty.h> 59 59 60 60 #define KERNEL_INIT_SYNCHRO 0xA5A5B5B5 … … 189 189 remote_spinlock_init( XPTR( local_cxy , &txt0_chdev.wait_lock ) ); 190 190 191 // complete TXT-specific initialization 192 hal_drivers_txt_init( &txt0_chdev ); 191 // TODO use generic device initialisation 192 // hal_drivers_txt_init( &txt0_chdev ); 193 194 if( impl == IMPL_TXT_TTY ) 195 { 196 txt0_chdev.cmd = &soclib_tty_cmd; 197 txt0_chdev.isr = &soclib_tty_isr; 198 soclib_tty_init( &txt0_chdev ); 199 } 193 200 194 201 // initialize the replicated chdev_dir[x][y] structures … … 206 213 " / paddr = %l at cycle %d\n", 207 214 __FUNCTION__ , local_cxy , chdev_func_str( func ), 208 XPTR(local_cxy , &txt0_chdev) , hal_ time_stamp() );215 XPTR(local_cxy , &txt0_chdev) , hal_get_cycles() ); 209 216 } 210 217 … … 295 302 296 303 kinit_dmsg("\n[INFO] %s : core[%x][0] created ICU chdev at cycle %d\n", 297 __FUNCTION__ , local_cxy , hal_ time_stamp() );304 __FUNCTION__ , local_cxy , hal_get_cycles() ); 298 305 299 306 /////////// MMC internal chdev /////////// … … 337 344 338 345 kinit_dmsg("\n[INFO] %s : core[%x][0] created MMC chdev at cycle %d\n", 339 __FUNCTION__ , local_cxy , hal_ time_stamp() );346 __FUNCTION__ , local_cxy , hal_get_cycles() ); 340 347 } 341 348 … … 372 379 373 380 kinit_dmsg("\n[INFO] %s : core[%x][0] created DMA[%d] chdev at cycle %d\n", 374 __FUNCTION__ , local_cxy , channel , hal_ time_stamp() );381 __FUNCTION__ , local_cxy , channel , hal_get_cycles() ); 375 382 } 376 383 } … … 530 537 kinit_dmsg("\n[INFO] %s : core[%x][0] create chdev %s[%d] at cycle %d\n", 531 538 __FUNCTION__ , local_cxy , chdev_func_str( func ), 532 channel , hal_ time_stamp() );539 channel , hal_get_cycles() ); 533 540 534 541 } // end if match … … 677 684 678 685 kinit_dmsg("\n[INFO] %s : core[%x][%d] exit barrier 0 at cycle %d\n", 679 __FUNCTION__ , core_cxy , core_lid , hal_ time_stamp() );686 __FUNCTION__ , core_cxy , core_lid , hal_get_cycles() ); 680 687 681 688 // all cores check core identifiers … … 709 716 710 717 kinit_dmsg("\n[INFO] %s : core[%x][%d] exit barrier 1 at cycle %d\n", 711 __FUNCTION__ , core_cxy , core_lid , hal_ time_stamp() );718 __FUNCTION__ , core_cxy , core_lid , hal_get_cycles() ); 712 719 713 720 // all cores get pointer on local cluster manager and on core descriptor … … 716 723 717 724 // CP0 initializes the process_zero descriptor 718 if( core_lid == 0 ) process_ zero_init( info);725 if( core_lid == 0 ) process_reference_init( &process_zero , 0 , XPTR_NULL ); 719 726 720 727 // CP0 allocates and initialises the internal peripheral chdev descriptors. … … 727 734 // external devices initialisation to enforce the rule : 728 735 // "The WTI index for the IPI routed to core[lid] is lid" 729 if( core_lid == 0)736 if( core_lid == 1 ) 730 737 { 731 738 uint32_t wti_id; … … 760 767 761 768 kinit_dmsg("\n[INFO] %s : core[%x][%d] exit barrier 2 at cycle %d\n", 762 __FUNCTION__ , core_cxy , core_lid , hal_ time_stamp() );769 __FUNCTION__ , core_cxy , core_lid , hal_get_cycles() ); 763 770 764 771 … … 783 790 784 791 kinit_dmsg("\n[INFO] %s : core[%x][%d] created idle thread %x at cycle %d\n", 785 __FUNCTION__ , core_cxy , core_lid , thread , hal_ time_stamp());792 __FUNCTION__ , core_cxy , core_lid , thread , hal_get_cycles()); 786 793 } 787 794 … … 809 816 } 810 817 818 // register VFS root inode in process_zero 819 process_zero.vfs_root_xp = root_inode_xp; 820 process_zero.vfs_cwd_xp = root_inode_xp; 821 811 822 // mount the DEVFS File system 812 823 devfs_mount( root_inode_xp , "dev" ); 813 824 } 814 825 815 // CP0 in I/O cluster print banner 826 827 // CP0 in I/O cluster creates the process_init and print banner 816 828 if( (core_lid == 0) && (local_cxy == info->io_cxy) ) 817 829 { 830 process_init_create(); 831 818 832 print_banner( (info->x_size * info->y_size) , info->cores_nr ); 819 833 … … 869 883 870 884 kinit_dmsg("\n[INFO] %s : core[%x][%d] exit barrier 3 at cycle %d\n", 871 __FUNCTION__ , core_cxy , core_lid , hal_ time_stamp() );885 __FUNCTION__ , core_cxy , core_lid , hal_get_cycles() ); 872 886 873 887 // each core activates its private PTI IRQ … … 875 889 dev_icu_enable_irq( core_lid , PTI_TYPE , core_lid , NULL ); 876 890 877 // each core get its private IRQ masks values and891 // each core get its private IRQ masks values 878 892 uint32_t hwi_mask; 879 893 uint32_t wti_mask; … … 883 897 thread_dmsg("\n[INFO] %s : core[%x][%d] activates scheduler at cycle %d\n" 884 898 " hwi_mask = %x / wti_mask = %x / pti_mask = %x\n", 885 __FUNCTION__ , local_cxy , core_lid , hal_ time_stamp() ,899 __FUNCTION__ , local_cxy , core_lid , hal_get_cycles() , 886 900 hwi_mask , wti_mask , pti_mask ); 887 901 -
trunk/kernel/kern/process.c
r23 r101 81 81 } 82 82 83 ////////////////////////////////////////////84 void process_zero_init( boot_info_t * info )85 {86 // reset process descriptor87 memset( &process_zero , 0 , sizeof(process_t) );88 89 // initialize kernel code & data vsegs base addresses90 process_zero.vmm.code_vpn_base = (vpn_t)(info->kernel_code_start >> CONFIG_PPM_PAGE_SHIFT);91 process_zero.vmm.data_vpn_base = (vpn_t)(info->kernel_data_start >> CONFIG_PPM_PAGE_SHIFT);92 93 // reset threads and childs number94 process_zero.th_nr = 0;95 process_zero.children_nr = 0;96 97 // set PID98 process_zero.pid = 0;99 }100 101 83 ///////////////////////////////////////////////// 102 84 void process_reference_init( process_t * process, 103 85 pid_t pid, 104 pid_t ppid ) 105 { 106 // reset reference process vmm 107 vmm_init( process ); 86 xptr_t parent_xp ) 87 { 88 cxy_t parent_cxy; 89 process_t * parent_ptr; 90 pid_t parent_pid; 91 92 process_dmsg("\n[INFO] %s : enters for process %x in cluster %x / parent_xp = %l\n", 93 __FUNCTION__ , pid , parent_xp ); 94 95 // get parent process cluster, local pointer, and pid 96 // for all processes other than process_zero 97 if( process == &process_zero ) 98 { 99 assert( (pid == 0) , __FUNCTION__ , "process_zero must have PID = 0\n"); 100 101 parent_pid = 0; // process_zero is its own parent... 102 } 103 else 104 { 105 assert( (parent_xp != XPTR_NULL) , __FUNCTION__ , "parent_xp cannot be NULL\n"); 106 107 parent_cxy = GET_CXY( parent_xp ); 108 parent_ptr = (process_t *)GET_PTR( parent_xp ); 109 parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 110 } 111 112 // reset reference process vmm (not for kernel process) 113 if( pid ) vmm_init( process ); 108 114 109 115 // reset reference process file descriptors array 110 116 process_fd_init( process ); 111 117 112 // reset reference process files structures and c d_lock118 // reset reference process files structures and cwd_lock 113 119 process->vfs_root_xp = XPTR_NULL; 114 120 process->vfs_bin_xp = XPTR_NULL; … … 127 133 remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) ); 128 134 129 // register new process in the parent children list 130 xptr_t entry = XPTR( local_cxy , &process->brothers_list ); 131 xptr_t root = XPTR( local_cxy , &process->children_root ); 132 xlist_add_first( root , entry ); 135 // register new process in the parent children list (not for kernel process) 136 if( pid ) 137 { 138 xptr_t entry = XPTR( local_cxy , &process->brothers_list ); 139 xptr_t root = XPTR( parent_cxy , &parent_ptr->children_root ); 140 xlist_add_first( root , entry ); 141 } 133 142 134 143 // reset th_tbl[] array as empty … … 143 152 // initialize PID and PPID 144 153 process->pid = pid; 145 process->ppid = p pid;154 process->ppid = parent_pid; 146 155 147 156 // set ref_xp field … … 157 166 158 167 hal_wbflush(); 168 169 process_dmsg("\n[INFO] %s : exit for process %x in cluster %x\n", 170 __FUNCTION__ , pid ); 159 171 160 172 } // end process_reference_init() … … 576 588 577 589 return (found) ? 0 : ENOMEM; 578 } 590 591 } // process_register_thread() 579 592 580 593 /////////////////////////////////////////////// … … 595 608 process->th_tbl[ltid] = NULL; 596 609 process->th_nr--; 597 } 610 611 } // process_remove_thread() 612 598 613 599 614 ///////////////////////////////////////////////////// … … 601 616 { 602 617 char * path; // pathname to .elf file 603 process_t * process; // pointer on new process618 process_t * process; // local pointer on new process 604 619 pid_t pid; // new process pid 620 xptr_t parent_xp; // extended pointer on parent process 621 cxy_t parent_cxy; 622 process_t * parent_ptr; 623 uint32_t parent_pid; 605 624 thread_t * thread; // pointer on new thread 606 625 pthread_attr_t attr; // main thread attributes … … 609 628 error_t error; 610 629 611 // get pid and pathname to .elf file 612 path = exec_info->path; 613 pid = exec_info->pid; 630 // get parent and .elf pathname from exec_info 631 path = exec_info->path; 632 parent_xp = exec_info->parent_xp; 633 634 // get parent process cluster and local pointer 635 parent_cxy = GET_CXY( parent_xp ); 636 parent_ptr = (process_t *)GET_PTR( parent_xp ); 637 parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 614 638 615 assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ , "illegal PID\n" ); 616 617 exec_dmsg("\n[INFO] %s enters in cluster %x for process %x / path = %s\n", 618 __FUNCTION__ , local_cxy , pid , path ); 639 exec_dmsg("\n[INFO] %s enters in cluster %x for path = %s\n", 640 __FUNCTION__ , local_cxy , path ); 619 641 620 642 // create new process descriptor … … 623 645 if( process == NULL ) 624 646 { 625 printk("\n[ERROR] in %s : no memory in cluster %x for process%x / path = %s\n",626 __FUNCTION__ , local_cxy , p id , path );647 printk("\n[ERROR] in %s : no memory / cluster = %x / ppid = %x / path = %s\n", 648 __FUNCTION__ , local_cxy , parent_pid , path ); 627 649 return ENOMEM; 628 650 } 629 651 652 // get a pid from the local cluster 653 error = cluster_pid_alloc( XPTR( local_cxy , process ) , &pid ); 654 655 if( error ) 656 { 657 printk("\n[ERROR] in %s : cannot get PID / cluster = %x / ppid = %x / path = %s\n", 658 __FUNCTION__ , local_cxy , parent_pid , path ); 659 return ENOMEM; 660 } 661 630 662 // initialize the process descriptor as the reference 631 process_reference_init( process , pid , exec_info->ppid);663 process_reference_init( process , pid , parent_xp ); 632 664 633 // restore from exec_info the extended pointer on vfs root, cwd, and bin 634 process->vfs_root_xp = exec_info->vfs_root_xp; 635 process->vfs_cwd_xp = exec_info->vfs_cwd_xp; 636 process->vfs_bin_xp = exec_info->vfs_bin_xp; 637 638 // restore from exec_info the embedded fd_array 639 process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ), exec_info->fd_array_xp ); 640 641 exec_dmsg("\n[INFO] %s restaured fd-array in cluster %x for process %x / path = %s\n", 642 __FUNCTION__, local_cxy , pid , path ); 665 exec_dmsg("\n[INFO] %s created process %x cluster %x / path = %s\n", 666 __FUNCTION__, parent_pid , local_cxy , path ); 667 668 // initializes vfs_root and vfs_cwd from parent process 669 xptr_t vfs_root_xp = hal_remote_lwd( XPTR( parent_cxy , &parent_ptr->vfs_root_xp ) ); 670 vfs_file_count_up( vfs_root_xp ); 671 process->vfs_root_xp = vfs_root_xp; 672 673 xptr_t vfs_cwd_xp = hal_remote_lwd( XPTR( parent_cxy , &parent_ptr->vfs_cwd_xp ) ); 674 vfs_file_count_up( vfs_cwd_xp ); 675 process->vfs_cwd_xp = vfs_cwd_xp; 676 677 // initialize embedded fd_array from parent process 678 process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ), 679 XPTR( parent_cxy , &parent_ptr->fd_array) ); 643 680 644 681 // initialize signal manager TODO ??? [AG] … … 663 700 } 664 701 665 // create "heap" vseg descriptor702 // register "heap" vseg descriptor in VMM 666 703 vseg_t * heap_vseg = vmm_create_vseg( process, 667 704 CONFIG_VMM_HEAP_BASE, … … 699 736 } 700 737 738 // update children list in parent process 739 xlist_add_last( XPTR( parent_cxy , &parent_ptr->children_root ), 740 XPTR( local_cxy , &process->brothers_list ) ); 741 hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr) , 1 ); 742 701 743 // Register thread in scheduler 702 744 sched_register_thread( core , thread ); 703 745 704 746 exec_dmsg("\n[INFO] %s created thread for process %x on core %d in cluster %x\n", 705 __FUNCTION__ , p rocess->pid , core->lid , local_cxy );747 __FUNCTION__ , pid , core->lid , local_cxy ); 706 748 707 749 // activate new thread … … 715 757 void process_init_create() 716 758 { 717 process_t * process_init; // process_init descriptor718 pid_t init_pid; // process_init pid719 759 exec_info_t exec_info; // structure to be passed to process_make_exec() 720 760 721 error_t error1 = 0; 722 error_t error2 = 0; 723 error_t error3 = 0; 724 725 process_dmsg("\n[INFO] %s enters in cluster %x\n", __FUNCTION__ , local_cxy ); 726 727 // allocate memory for process descriptor 728 process_init = process_alloc(); 729 if( process_init == NULL ) 730 { 731 printk("\n[PANIC] in %s : no memory for process descriptor in cluster %x\n", 732 __FUNCTION__ , local_cxy ); 733 hal_core_sleep(); 734 } 735 736 // get a pid from the local cluster 737 xptr_t xp_process = XPTR( local_cxy , process_init ); 738 error1 = cluster_pid_alloc( xp_process , &init_pid ); 739 if( error1 ) 740 { 741 printk("\n[PANIC] in %s : cannot get PID in cluster %x\n", 742 __FUNCTION__ , local_cxy ); 743 hal_core_sleep(); 744 } 745 746 // initializes process_init descriptor as the reference 747 process_reference_init( process_init , init_pid , process_zero.pid ); 748 749 // initialize process_init VMM 750 vmm_init( process_init ); 751 752 // initializes vfs_root and vfs_cwd from process_zero 753 vfs_file_count_up( process_zero.vfs_root_xp ); 754 process_init->vfs_root_xp = process_zero.vfs_root_xp; 755 756 vfs_file_count_up( process_zero.vfs_cwd_xp ); 757 process_init->vfs_cwd_xp = process_zero.vfs_cwd_xp; 758 759 // update children list in process_zero 760 xlist_add_last( XPTR( local_cxy , &process_zero.children_root ), 761 XPTR( local_cxy , &process_init->brothers_list ) ); 762 process_zero.children_nr = 1; 763 764 // TODO create inodes for stdin / stdout / stderr pseudo-files 765 // these inodes should be created in the cluster containing the relevant TXT chdev 766 767 // open stdin / stdout / stderr pseudo-files 761 error_t error1; 762 error_t error2; 763 error_t error3; 768 764 xptr_t stdin_xp; 769 765 xptr_t stdout_xp; … … 773 769 uint32_t stderr_id; 774 770 771 process_dmsg("\n[INFO] %s enters in cluster %x\n", __FUNCTION__ , local_cxy ); 772 773 // open stdin / stdout / stderr pseudo-files 775 774 error1 = vfs_open( XPTR_NULL, CONFIG_DEV_STDIN , O_RDONLY, 0, &stdin_xp , &stdin_id ); 776 775 error2 = vfs_open( XPTR_NULL, CONFIG_DEV_STDOUT, O_WRONLY, 0, &stdout_xp, &stdout_id ); … … 782 781 if( !error2 ) vfs_close( stdout_xp , stdout_id ); 783 782 if( !error3 ) vfs_close( stderr_xp , stderr_id ); 784 785 783 printk("\n[PANIC] in %s : cannot open stdin/stdout/stderr in cluster %x\n", 786 784 __FUNCTION__ , local_cxy ); … … 791 789 if( (stdin_id != 0) || (stdout_id != 1) || (stderr_id != 2) ) 792 790 { 791 vfs_close( stdin_xp , stdin_id ); 792 vfs_close( stdout_xp , stdout_id ); 793 vfs_close( stderr_xp , stderr_id ); 793 794 printk("\n[PANIC] in %s : bad indexes for stdin/stdout/stderr in cluster %x\n", 794 795 __FUNCTION__ , local_cxy ); … … 797 798 798 799 // initialize the exec_info structure 799 exec_info.pid = process_init->pid; 800 exec_info.ppid = process_init->ppid; 801 exec_info.fd_array_xp = XPTR( local_cxy , &process_init->fd_array ); 802 exec_info.vfs_root_xp = process_init->vfs_root_xp; 803 exec_info.vfs_cwd_xp = process_init->vfs_cwd_xp; 804 exec_info.vfs_bin_xp = process_init->vfs_bin_xp; 805 806 // TODO thread_init ??? 807 // exec_info.args_nr = 1; 808 // exec_info.envs_nr = 1; 809 // strcpy( exec_info.args[0] , "init" ); 810 // strcpy( exec_info.envs[0] , "ALMOS-MKH.CONFIG = "CONFIG_ALMOS_VERSION ); 811 // strcpy( exec_info.path , INIT_PATHNAME ); 800 exec_info.parent_xp = XPTR( local_cxy , &process_zero ); 801 strcpy( exec_info.path , CONFIG_PROCESS_INIT_PATH ); 802 exec_info.args_nr = 0; 803 exec_info.envs_nr = 0; 812 804 813 805 // create process_init and thread_init 814 806 error1 = process_make_exec( &exec_info ); 807 815 808 if( error1 ) 816 809 { -
trunk/kernel/kern/process.h
r23 r101 65 65 typedef struct fd_array_s 66 66 { 67 remote_spinlock_t lock; /*! lock protecting fd_array[] change.*/68 uint32_t current; /*! current number of open files*/69 xptr_t array[CONFIG_PROCESS_FILE_MAX_NR]; 67 remote_spinlock_t lock; /*! lock protecting fd_array */ 68 uint32_t current; /*! current number of open files */ 69 xptr_t array[CONFIG_PROCESS_FILE_MAX_NR]; /*! xptr on open file descriptors */ 70 70 } 71 71 fd_array_t; … … 136 136 137 137 /********************************************************************************************* 138 * This structure defines the minimal information required by the process_make_exec()139 * function to builda new reference process descriptor, and the associated main thread.138 * This structure defines the information required by the process_make_exec() function 139 * to create a new reference process descriptor, and the associated main thread. 140 140 ********************************************************************************************/ 141 141 142 142 typedef struct exec_info_s 143 143 { 144 pid_t pid; /*! process identifier */ 145 pid_t ppid; /*! parent process identifier */ 146 147 xptr_t fd_array_xp; /*! extended pointer on parent process fd_array */ 148 149 xptr_t vfs_root_xp; /*! extended pointer on file system root */ 150 xptr_t vfs_cwd_xp; /*! extended pointer on current working directory */ 151 xptr_t vfs_bin_xp; /*! extended pointer on process .elf file */ 152 144 xptr_t parent_xp; /*! extended pointer on parent process descriptor */ 145 146 xptr_t stdin_xp; /*! extended pointer on stdin pseudo-file */ 153 147 char path[CONFIG_VFS_MAX_PATH_LENGTH]; /*! .elf file path */ 154 148 … … 180 174 void process_free( process_t * process ); 181 175 182 /*********************************************************************************************183 * This function initializes the kernel process_zero descriptor (global variable) from184 * informations found in the boot_info.185 *********************************************************************************************186 * @ info : pointer on local boot_info_t structure.187 ********************************************************************************************/188 void process_zero_init( boot_info_t * info );189 190 176 /********************************************************************************************* 191 177 * This function allocates memory and initialises the "process_init" descriptor and the 192 * associated thread descriptor from information found in the "process_zero" descriptor. 193 ********************************************************************************************* 194 * Any error gives a kernel panic. 195 ********************************************************************************************/ 178 * associated "thread_init" descriptor. It should be called once at the end of the kernel 179 * initialisation procedure, by the kernel "process_zero". 180 * The "process_init" is the first user process, and all other user process will be forked 181 * from this process. The code executed by "process_init" is stored in a .elf file, whose 182 * pathname is defined by the CONFIG_PROCESS_INIT_PATH argument. It use fork/exec syscalls 183 * to create the "shell" user process, and various other user daemon processes. 184 * Practically, it build the exec_info structure, register the stdin / stdout / stderr 185 * pseudo-file descriptors and the vfs_root and vfs_cwd in parent process_zero, and call 186 * the generic process_make_exec() function, that makes the real job. 187 ********************************************************************************************/ 196 188 void process_init_create(); 197 189 … … 202 194 * It set the pid / ppid / ref_xp fields. 203 195 * It registers this process descriptor in three lists: 204 * - the children_list in the parent process descriptor.196 * - the children_list in the parent reference process descriptor. 205 197 * - the local_list, rooted in the reference cluster manager. 206 198 * - the copies_list, rooted in the owner cluster manager. … … 209 201 * @ process : [in] pointer on process descriptor to initialize. 210 202 * @ pid : [in] process identifier defined by owner cluster. 211 * @ p pid : [in] parent process identifier.203 * @ parent_xp : [in] extended pointer on parent process. 212 204 ********************************************************************************************/ 213 205 void process_reference_init( process_t * process, 214 206 pid_t pid, 215 pid_t ppid);207 xptr_t parent_xp ); 216 208 217 209 /********************************************************************************************* … … 259 251 260 252 /********************************************************************************************* 261 * This function builds a new reference process descriptor and associated main thread. 253 * This function allocates memory and initializes a new user process descriptor, 254 * and the associated main thread, from informationd found in the <exec_info> structure 255 * (defined in the process.h file), that must be build by the caller. 256 * The new process inherit from the parent process (i) the open file descriptors, (ii) the 257 * vfs_root and the vfs_cwd inodes. 258 * It access to the .elf file to get the size of the code and data segments, and initialize 259 * the vsegs list in the VMM. 262 260 * It is executed in the local cluster, that becomes both "owner" and "reference". 263 * The new process descriptor and the main_thread are initialised using only informations264 * found in the local exec_info structure, that must be build by the caller.265 261 * - It can be called by the process_init_create() function to build the "init" process. 266 262 * - It can be called directly by the sys_exec() function in case of local exec. -
trunk/kernel/kern/rpc.c
r68 r101 1451 1451 1452 1452 rpc_dmsg("\n[INFO] %s creates RPC thread %x on core %x in cluster %x at cycle %d\n", 1453 __FUNCTION__ , thread , core->gid , local_cxy , hal_ time_stamp() );1453 __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() ); 1454 1454 1455 1455 // update core descriptor counter … … 1461 1461 1462 1462 rpc_dmsg ("\n[INFO] %s activates RPC thread %x on core %x in cluster %x at cycle %d\n", 1463 __FUNCTION__ , thread , core->gid , local_cxy , hal_ time_stamp() );1463 __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() ); 1464 1464 1465 1465 // current thread deschedules / RPC thread start execution … … 1521 1521 1522 1522 rpc_dmsg("\n[INFO] RPC thread %x created on core %d in cluster %x at cycle %d\n", 1523 this->trdid , this->core->lid , local_cxy , hal_ time_stamp() );1523 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1524 1524 1525 1525 // this infinite loop is not preemptable … … 1546 1546 { 1547 1547 rpc_dmsg("\n[INFO] RPC thread %x suicide on core %d in cluster %x at cycle %d\n", 1548 this->trdid , this->core->lid , local_cxy , hal_ time_stamp() );1548 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1549 1549 1550 1550 // update core descriptor counter … … 1557 1557 // block and deschedule 1558 1558 rpc_dmsg("\n[INFO] RPC thread %x deschedule on core %d in cluster %x at cycle %d\n", 1559 this->trdid , this->core->lid , local_cxy , hal_ time_stamp() );1559 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1560 1560 1561 1561 thread_block( this , THREAD_BLOCKED_IDLE ); … … 1563 1563 1564 1564 rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n", 1565 this->trdid , this->core->lid , local_cxy , hal_ time_stamp() );1565 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1566 1566 } 1567 1567 } // end rpc_thread_func() -
trunk/kernel/kern/scheduler.c
r60 r101 240 240 " has not released all locks at cycle %d\n", 241 241 __FUNCTION__, current->trdid, current->process->pid, 242 local_cxy , core->lid , hal_ time_stamp() );242 local_cxy , core->lid , hal_get_cycles() ); 243 243 hal_core_sleep(); 244 244 } -
trunk/kernel/kern/thread.c
r68 r101 514 514 assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" ); 515 515 516 tm_start = hal_ time_stamp();516 tm_start = hal_get_cycles(); 517 517 518 518 // update intrumentation values … … 557 557 thread_release( thread ); 558 558 559 tm_end = hal_ time_stamp();559 tm_end = hal_get_cycles(); 560 560 561 561 thread_dmsg("\n[INFO] %s : exit for thread %x in process %x / duration = %d\n", … … 655 655 // compute elapsed time, taking into account 32 bits register wrap 656 656 uint32_t elapsed; 657 uint32_t time_now = hal_ time_stamp();657 uint32_t time_now = hal_get_cycles(); 658 658 uint32_t time_last = this->time_last_check; 659 659 if( time_now < time_last ) elapsed = (0xFFFFFFFF - time_last) + time_now; … … 753 753 { 754 754 idle_dmsg("\n[INFO] %s : core[%x][%d] goes to sleep at cycle %d\n", 755 __FUNCTION__ , local_cxy , lid , hal_ time_stamp() );755 __FUNCTION__ , local_cxy , lid , hal_get_cycles() ); 756 756 757 757 // force core to sleeping state … … 759 759 760 760 idle_dmsg("\n[INFO] %s : core[%x][%d] wake up at cycle %d\n", 761 __FUNCTION__ , local_cxy , lid , hal_ time_stamp() );761 __FUNCTION__ , local_cxy , lid , hal_get_cycles() ); 762 762 763 763 // acknowledge IRQ
Note: See TracChangeset
for help on using the changeset viewer.