Changeset 406 for trunk/kernel
- Timestamp:
- Aug 29, 2017, 12:03:37 PM (7 years ago)
- Location:
- trunk/kernel
- Files:
-
- 42 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_dma.c
r296 r406 88 88 thread_t * this = CURRENT_THREAD; 89 89 90 dma_dmsg("\n[ INFO] %s : enters for thread %x / dst = %l /src = %l / size = %x\n",90 dma_dmsg("\n[DMSG] %s : enters for thread %x / dst = %l /src = %l / size = %x\n", 91 91 __FUNCTION__ , this->trdid , dst_xp , src_xp , size ); 92 92 … … 110 110 chdev_register_command( dev_xp , this ); 111 111 112 dma_dmsg("\n[ INFO] %s : completes for thread %x / error = %d\n",112 dma_dmsg("\n[DMSG] %s : completes for thread %x / error = %d\n", 113 113 __FUNCTION__ , this->trdid , this->dma_cmd.error ); 114 114 -
trunk/kernel/devices/dev_fbf.c
r279 r406 143 143 } 144 144 145 fbf_dmsg("\n[ INFO] %s : thread %x in process %x / vaddr = %p / paddr = %l\n",145 fbf_dmsg("\n[DMSG] %s : thread %x in process %x / vaddr = %p / paddr = %l\n", 146 146 __FUNCTION__ , this->trdid , this->process->pid , buffer , buf_paddr ); 147 147 -
trunk/kernel/devices/dev_ioc.c
r401 r406 99 99 thread_t * this = CURRENT_THREAD; // pointer on client thread 100 100 101 ioc_dmsg("\n[ INFO] %s : thread %x in process %x"101 ioc_dmsg("\n[DMSG] %s : thread %x in process %x" 102 102 " for lba = %x / buffer = %x / at cycle %d\n", 103 103 __FUNCTION__ , this->trdid , this->process->pid , … … 128 128 chdev_register_command( dev_xp , this ); 129 129 130 ioc_dmsg("\n[ INFO] in %s : thread %x in process %x"130 ioc_dmsg("\n[DMSG] in %s : thread %x in process %x" 131 131 " completes / error = %d / at cycle %d\n", 132 132 __FUNCTION__ , this->trdid , this->process->pid , … … 162 162 thread_t * this = CURRENT_THREAD; 163 163 164 ioc_dmsg("\n[ INFO] %s : core[%x,%d] enter for %d blocks / lba = %x / cycle %d\n",164 ioc_dmsg("\n[DMSG] %s : core[%x,%d] enter for %d blocks / lba = %x / cycle %d\n", 165 165 __FUNCTION__ , local_cxy , this->core->lid , count , lba , hal_time_stamp() ); 166 166 … … 199 199 dev_pic_enable_irq( lid , ioc_xp ); 200 200 201 ioc_dmsg("\n[ INFO] %s : core[%x,%d] exit / error = %d / cycle %d\n",201 ioc_dmsg("\n[DMSG] %s : core[%x,%d] exit / error = %d / cycle %d\n", 202 202 __FUNCTION__ , local_cxy , this->core->lid , this->ioc_cmd.error , hal_time_stamp() ); 203 203 -
trunk/kernel/devices/dev_mmc.c
r279 r406 99 99 thread_t * this = CURRENT_THREAD; 100 100 101 mmc_dmsg("\n[ INFO] %s enters for thread %x in process %x / buf_xp = %l\n",101 mmc_dmsg("\n[DMSG] %s enters for thread %x in process %x / buf_xp = %l\n", 102 102 __FUNCTION__ , this->trdid , this->process->pid , buf_xp ); 103 103 … … 124 124 error = dev_mmc_access( this ); 125 125 126 mmc_dmsg("\n[ INFO] %s completes for thread %x in process %x / error = %d\n",126 mmc_dmsg("\n[DMSG] %s completes for thread %x in process %x / error = %d\n", 127 127 __FUNCTION__ , this->trdid , this->process->pid , error ); 128 128 … … 139 139 thread_t * this = CURRENT_THREAD; 140 140 141 mmc_dmsg("\n[ INFO] %s enters for thread %x in process %x / buf_xp = %l\n",141 mmc_dmsg("\n[DMSG] %s enters for thread %x in process %x / buf_xp = %l\n", 142 142 __FUNCTION__ , this->trdid , this->process->pid , buf_xp ); 143 143 … … 164 164 error = dev_mmc_access( this ); 165 165 166 mmc_dmsg("\n[ INFO] %s completes for thread %x in process %x / error = %d\n",166 mmc_dmsg("\n[DMSG] %s completes for thread %x in process %x / error = %d\n", 167 167 __FUNCTION__ , this->trdid , this->process->pid , error ); 168 168 -
trunk/kernel/devices/dev_nic.c
r296 r406 97 97 core_t * core = thread_ptr->core; 98 98 99 nic_dmsg("\n[ INFO] %s enters for NIC-RX thread on core %d in cluster %x\n",99 nic_dmsg("\n[DMSG] %s enters for NIC-RX thread on core %d in cluster %x\n", 100 100 __FUNCTION__ , core->lid , local_cxy ); 101 101 … … 147 147 pkd->length = thread_ptr->nic_cmd.length; 148 148 149 nic_dmsg("\n[ INFO] %s exit for NIC-RX thread on core %d in cluster %x\n",149 nic_dmsg("\n[DMSG] %s exit for NIC-RX thread on core %d in cluster %x\n", 150 150 __FUNCTION__ , core->lid , local_cxy ); 151 151 … … 167 167 core_t * core = thread_ptr->core; 168 168 169 nic_dmsg("\n[ INFO] %s enters for NIC-RX thread on core %d in cluster %x\n",169 nic_dmsg("\n[DMSG] %s enters for NIC-RX thread on core %d in cluster %x\n", 170 170 __FUNCTION__ , core->lid , local_cxy ); 171 171 … … 215 215 if( error ) return error; 216 216 217 nic_dmsg("\n[ INFO] %s exit for NIC-TX thread on core %d in cluster %x\n",217 nic_dmsg("\n[DMSG] %s exit for NIC-TX thread on core %d in cluster %x\n", 218 218 __FUNCTION__ , core->lid , local_cxy ); 219 219 -
trunk/kernel/devices/dev_pic.c
r337 r406 83 83 xptr_t src_chdev_xp ) 84 84 { 85 irq_dmsg("\n[ INFO] %s : core = [%x,%d] / source_chdev_xp = %l\n",86 __FUNCTION__ , local_cxy , lid , src_chdev_xp);85 irq_dmsg("\n[DMSG] %s : core = [%x,%d] / src_chdev_cxy = %x / src_chdev_ptr = %x\n", 86 __FUNCTION__ , local_cxy , lid , GET_CXY(src_chdev_xp) , GET_PTR(src_chdev_xp) ); 87 87 88 88 // get pointer on PIC chdev … … 101 101 xptr_t src_chdev_xp ) 102 102 { 103 irq_dmsg("\n[ INFO] %s : core = [%x,%d] / source_chdev_xp = %l\n",104 __FUNCTION__ , local_cxy , lid , src_chdev_xp);103 irq_dmsg("\n[DMSG] %s : core = [%x,%d] / src_chdev_cxy = %x / src_chdev_ptr = %x\n", 104 __FUNCTION__ , local_cxy , lid , GET_CXY(src_chdev_xp) , GET_PTR(src_chdev_xp) ); 105 105 106 106 // get pointer on PIC chdev … … 118 118 void dev_pic_enable_timer( uint32_t period ) 119 119 { 120 irq_dmsg("\n[ INFO] %s : core = [%x,%d] / period = %d\n",121 120 irq_dmsg("\n[DMSG] %s : core = [%x,%d] / period = %d\n", 121 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , period ); 122 122 123 123 // get pointer on PIC chdev … … 135 135 void dev_pic_enable_ipi() 136 136 { 137 irq_dmsg("\n[ INFO] %s : core = [%x,?]\n",138 __FUNCTION__ , local_cxy);137 irq_dmsg("\n[DMSG] %s : core = [%x,%d]\n", 138 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 139 139 140 140 // get pointer on PIC chdev … … 153 153 lid_t lid ) 154 154 { 155 irq_dmsg("\n[ INFO] %s : enter / src_core = [%x,%d] / dst_core = [%x,%d] / cycle %d\n",156 155 irq_dmsg("\n[DMSG] %s : enter / src_core = [%x,%d] / dst_core = [%x,%d] / cycle %d\n", 156 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, hal_time_stamp() ); 157 157 158 158 // get pointer on PIC chdev … … 166 166 f( cxy , lid ); 167 167 168 irq_dmsg("\n[ INFO] %s : exit / src_core = [%x,%d] / dst_core = [%x,%d] / cycle %d\n",169 168 irq_dmsg("\n[DMSG] %s : exit / src_core = [%x,%d] / dst_core = [%x,%d] / cycle %d\n", 169 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, hal_time_stamp() ); 170 170 } 171 171 -
trunk/kernel/devices/dev_pic.h
r279 r406 212 212 /***************************************************************************************** 213 213 * This function activates the TIM_IRQ for the calling core. 214 * The <period> argument define the number of cycles between twoo successive IRQs. 215 ***************************************************************************************** 216 * @ period : number of cycles between IRQs. 214 * The <period> argument is a number of milli-seconds between two successive IRQs. 215 * It is converted to a number of cycles by the PIC driver implementation. 216 ***************************************************************************************** 217 * @ period : number of milliseconds. 217 218 ****************************************************************************************/ 218 219 void dev_pic_enable_timer( uint32_t period ); -
trunk/kernel/devices/dev_txt.c
r296 r406 101 101 thread_t * this = CURRENT_THREAD; 102 102 103 txt_dmsg("\n[ INFO] in %s : thread %x in process %x enters\n",103 txt_dmsg("\n[DMSG] in %s : thread %x in process %x enters\n", 104 104 __FUNCTION__ , this->trdid , this->process->pid ); 105 105 … … 123 123 chdev_register_command( dev_xp , this ); 124 124 125 txt_dmsg("\n[ INFO] in %s : thread %x in process %x completes / error = %d\n",125 txt_dmsg("\n[DMSG] in %s : thread %x in process %x completes / error = %d\n", 126 126 __FUNCTION__ , this->trdid , this->process->pid , this->txt_cmd.error ); 127 127 -
trunk/kernel/kern/cluster.c
r374 r406 82 82 spinlock_init( &cluster->kcm_lock ); 83 83 84 cluster_dmsg("\n[ INFO] %s for cluster %x enters\n",84 cluster_dmsg("\n[DMSG] %s for cluster %x enters\n", 85 85 __FUNCTION__ , local_cxy ); 86 86 … … 102 102 } 103 103 104 cluster_dmsg("\n[ INFO] %s : PPM initialized in cluster %x at cycle %d\n",104 cluster_dmsg("\n[DMSG] %s : PPM initialized in cluster %x at cycle %d\n", 105 105 __FUNCTION__ , local_cxy , hal_get_cycles() ); 106 106 … … 108 108 khm_init( &cluster->khm ); 109 109 110 cluster_dmsg("\n[ INFO] %s : KHM initialized in cluster %x at cycle %d\n",110 cluster_dmsg("\n[DMSG] %s : KHM initialized in cluster %x at cycle %d\n", 111 111 __FUNCTION__ , local_cxy , hal_get_cycles() ); 112 112 … … 114 114 kcm_init( &cluster->kcm , KMEM_KCM ); 115 115 116 cluster_dmsg("\n[ INFO] %s : KCM initialized in cluster %x at cycle %d\n",116 cluster_dmsg("\n[DMSG] %s : KCM initialized in cluster %x at cycle %d\n", 117 117 __FUNCTION__ , local_cxy , hal_get_cycles() ); 118 118 … … 125 125 } 126 126 127 cluster_dmsg("\n[ INFO] %s : cores initialized in cluster %x at cycle %d\n",127 cluster_dmsg("\n[DMSG] %s : cores initialized in cluster %x at cycle %d\n", 128 128 __FUNCTION__ , local_cxy , hal_get_cycles() ); 129 129 … … 132 132 cluster->rpc_threads = 0; 133 133 134 cluster_dmsg("\n[ INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n",134 cluster_dmsg("\n[DMSG] %s : RPC fifo inialized in cluster %x at cycle %d\n", 135 135 __FUNCTION__ , local_cxy , hal_get_cycles() ); 136 136 … … 157 157 } 158 158 159 cluster_dmsg("\n[ INFO] %s Process Manager initialized in cluster %x at cycle %d\n",159 cluster_dmsg("\n[DMSG] %s Process Manager initialized in cluster %x at cycle %d\n", 160 160 __FUNCTION__ , local_cxy , hal_get_cycles() ); 161 161 -
trunk/kernel/kern/core.c
r380 r406 113 113 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( NULL ); 114 114 115 // update DQDT TODO This update should depend on the cluster identifier, 116 // to avoid simultaneous updates from various clusters ... AG 115 // update DQDT 117 116 if( ((ticks % CONFIG_DQDT_TICKS_PER_QUANTUM) == 0) && (core->lid == 0) ) 118 117 dqdt_global_update(); -
trunk/kernel/kern/do_syscall.c
r375 r406 116 116 } 117 117 118 syscall_dmsg("\n[ INFO] %s : pid = %x / trdid = %x / service #%d\n"118 syscall_dmsg("\n[DMSG] %s : pid = %x / trdid = %x / service #%d\n" 119 119 " arg0 = %x / arg1 = %x / arg2 = %x / arg3 = %x\n", 120 120 __FUNCTION__ , this->process->pid , this->trdid , service_num , -
trunk/kernel/kern/dqdt.c
r374 r406 60 60 for ( i = 0 ; i < 4 ; i++ ) 61 61 { 62 if ( local_node.children[i] != XPTR_NULL ) dqdt_global_print( local_node.children[i] ); 62 if ( local_node.children[i] != XPTR_NULL ) 63 dqdt_global_print( local_node.children[i] ); 63 64 } 64 65 } … … 70 71 uint32_t y_width ) 71 72 { 72 if( (x_size > 32) || (y_size > 32) ) 73 { 74 panic("illegal mesh size for DQDT support"); 75 } 73 assert( ((x_size <= 32) && (y_size <= 32)) , __FUNCTION__ , "illegal mesh size\n"); 76 74 77 75 dqdt_node_t * node; -
trunk/kernel/kern/kernel_init.c
r401 r406 137 137 " /_/ \\_\\ |______| |_| |_| \\_____/ |______/ |_| |_| |_| \\_\\ |_| |_| \n" 138 138 "\n\n\t\t Advanced Locality Management Operating System / Multi Kernel Hybrid\n" 139 "\n\n\t\t\t Version 0.0 : %d cluster(s) /%d core(s) per cluster\n\n", nclusters , ncores );139 "\n\n\t\t\t Version 0.0 / %d cluster(s) / %d core(s) per cluster\n\n", nclusters , ncores ); 140 140 } 141 141 … … 274 274 275 275 #if( CONFIG_KINIT_DEBUG > 1 ) 276 printk("\n[ INFO] %s : created MMC in cluster %x / chdev = %x\n",276 printk("\n[DMSG] %s : created MMC in cluster %x / chdev = %x\n", 277 277 __FUNCTION__ , channel , local_cxy , chdev_ptr ); 278 278 #endif … … 302 302 303 303 #if( CONFIG_KINIT_DEBUG > 1 ) 304 printk("\n[ INFO] %s : created DMA[%d] in cluster %x / chdev = %x\n",304 printk("\n[DMSG] %s : created DMA[%d] in cluster %x / chdev = %x\n", 305 305 __FUNCTION__ , channel , local_cxy , chdev_ptr ); 306 306 #endif … … 436 436 437 437 #if( CONFIG_KINIT_DEBUG > 1 ) 438 printk("\n[ INFO] %s : create chdev %s[%d] in cluster %x / chdev = %x\n",438 printk("\n[DMSG] %s : create chdev %s[%d] in cluster %x / chdev = %x\n", 439 439 __FUNCTION__ , chdev_func_str( func ), channel , local_cxy , chdev ); 440 440 #endif … … 554 554 555 555 #if( CONFIG_KINIT_DEBUG > 1 ) 556 printk("\n[ INFO] %s created PIC chdev in cluster %x at cycle %d\n",556 printk("\n[DMSG] %s created PIC chdev in cluster %x at cycle %d\n", 557 557 __FUNCTION__ , local_cxy , (uint32_t)hal_time_stamp() ); 558 558 #endif … … 728 728 729 729 if( (core_lid == 0) && (local_cxy == 0) ) 730 kinit_dmsg("\n[INFO] %s : exit barrier 0 : TXT0 initialized / cycle %d\n",730 printk("\n[KINIT] %s : exit barrier 0 : TXT0 initialized / cycle %d\n", 731 731 __FUNCTION__, hal_time_stamp() ); 732 732 … … 762 762 763 763 if( (core_lid == 0) && (local_cxy == 0) ) 764 kinit_dmsg("\n[INFO] %s : exit barrier 1 : clusters initialised / cycle %d\n",764 printk("\n[KINIT] %s : exit barrier 1 : clusters initialised / cycle %d\n", 765 765 __FUNCTION__, hal_time_stamp() ); 766 766 … … 787 787 788 788 if( (core_lid == 0) && (local_cxy == 0) ) 789 kinit_dmsg("\n[INFO] %s : exit barrier 2 : PIC initialised / cycle %d\n",789 printk("\n[KINIT] %s : exit barrier 2 : PIC initialised / cycle %d\n", 790 790 __FUNCTION__, hal_time_stamp() ); 791 791 … … 818 818 819 819 if( (core_lid == 0) && (local_cxy == 0) ) 820 kinit_dmsg("\n[INFO] %s : exit barrier 3 : all chdev initialised / cycle %d\n",820 printk("\n[KINIT] %s : exit barrier 3 : all chdev initialised / cycle %d\n", 821 821 __FUNCTION__, hal_time_stamp()); 822 822 … … 829 829 ///////////////////////////////////////////////////////////////////////////////// 830 830 831 #if ( CONFIG_KINIT_DEBUG > 1 )831 #if CONFIG_KINIT_DEBUG 832 832 chdev_dir_display(); 833 833 #endif … … 856 856 core->scheduler.idle = thread; 857 857 858 #if ( CONFIG_KINIT_DEBUG > 1 )858 #if CONFIG_KINIT_DEBUG 859 859 sched_display(); 860 860 #endif … … 928 928 929 929 if( (core_lid == 0) && (local_cxy == 0) ) 930 kinit_dmsg("\n[INFO] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n",930 printk("\n[KINIT] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n", 931 931 __FUNCTION__, vfs_root_inode_xp , hal_time_stamp()); 932 932 … … 987 987 988 988 if( (core_lid == 0) && (local_cxy == 0) ) 989 kinit_dmsg("\n[INFO] %s : exit barrier 5 : VFS_root = %l in cluster IO / cycle %d\n",989 printk("\n[KINIT] %s : exit barrier 5 : VFS_root = %l in cluster IO / cycle %d\n", 990 990 __FUNCTION__, vfs_root_inode_xp , hal_time_stamp() ); 991 991 … … 1020 1020 1021 1021 if( (core_lid == 0) && (local_cxy == 0) ) 1022 kinit_dmsg("\n[INFO] %s : exit barrier 6 : dev_root = %l in cluster IO / cycle %d\n",1022 printk("\n[KINIT] %s : exit barrier 6 : dev_root = %l in cluster IO / cycle %d\n", 1023 1023 __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() ); 1024 1024 … … 1057 1057 1058 1058 if( (core_lid == 0) && (local_cxy == 0) ) 1059 kinit_dmsg("\n[INFO] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n",1059 printk("\n[KINIT] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n", 1060 1060 __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() ); 1061 1061 … … 1076 1076 1077 1077 if( (core_lid == 0) && (local_cxy == 0) ) 1078 kinit_dmsg("\n[INFO] %s : exit barrier 8 : process init created / cycle %d\n",1078 printk("\n[KINIT] %s : exit barrier 8 : process init created / cycle %d\n", 1079 1079 __FUNCTION__ , hal_time_stamp() ); 1080 1080 … … 1087 1087 print_banner( (info->x_size * info->y_size) , info->cores_nr ); 1088 1088 1089 #if CONFIG_KINIT_DEBUG 1090 1089 1091 vfs_display( vfs_root_inode_xp ); 1090 1091 kinit_dmsg("\n\n***** memory fooprint for main kernel objects\n\n"1092 1093 printk("\n\n***** memory fooprint for main kernel objects\n\n" 1092 1094 " - thread descriptor : %d bytes\n" 1093 1095 " - process descriptor : %d bytes\n" … … 1130 1132 sizeof( rwlock_t ), 1131 1133 sizeof( remote_rwlock_t )); 1134 #endif 1135 1132 1136 } 1133 1137 -
trunk/kernel/kern/printk.c
r372 r406 327 327 case ('l'): /* 64 bits hexadecimal unsigned */ 328 328 { 329 u int64_t val = va_arg( *args , uint64_t);329 unsigned long long val = va_arg( *args , unsigned long long ); 330 330 txt_write( channel, busy, "0x" , 2 ); 331 331 for(i = 0; i < 16; i++) … … 340 340 case ('L'): /* 64 bits hexadecimal unsigned on 18 char */ 341 341 { 342 u int64_t val = va_arg( *args , uint64_t);342 unsigned long long val = va_arg( *args , unsigned long long ); 343 343 txt_write( channel, busy, "0x" , 2 ); 344 344 for(i = 0; i < 16; i++) -
trunk/kernel/kern/printk.h
r389 r406 109 109 110 110 #if CONFIG_CLUSTER_DEBUG 111 #define cluster_dmsg(...) printk(__VA_ARGS__)111 #define cluster_dmsg(...) if(hal_time_stamp() > CONFIG_CLUSTER_DEBUG) printk(__VA_ARGS__) 112 112 #else 113 113 #define cluster_dmsg(...) … … 115 115 116 116 #if CONFIG_CONTEXT_DEBUG 117 #define context_dmsg(...) printk(__VA_ARGS__)117 #define context_dmsg(...) if(hal_time_stamp() > CONFIG_CONTEXT_DEBUG) printk(__VA_ARGS__) 118 118 #else 119 119 #define context_dmsg(...) … … 121 121 122 122 #if CONFIG_CORE_DEBUG 123 #define core_dmsg(...) printk(__VA_ARGS__)123 #define core_dmsg(...) if(hal_time_stamp() > CONFIG_CORE_DEBUG) printk(__VA_ARGS__) 124 124 #else 125 125 #define core_dmsg(...) … … 127 127 128 128 #if CONFIG_DEVFS_DEBUG 129 #define devfs_dmsg(...) printk(__VA_ARGS__)129 #define devfs_dmsg(...) if(hal_time_stamp() > CONFIG_DEVFS_DEBUG) printk(__VA_ARGS__) 130 130 #else 131 131 #define devfs_dmsg(...) … … 133 133 134 134 #if CONFIG_DMA_DEBUG 135 #define dma_dmsg(...) printk(__VA_ARGS__)135 #define dma_dmsg(...) if(hal_time_stamp() > CONFIG_DMA_DEBUG) printk(__VA_ARGS__) 136 136 #else 137 137 #define dma_dmsg(...) … … 139 139 140 140 #if CONFIG_DQDT_DEBUG 141 #define dma_dmsg(...) printk(__VA_ARGS__) 142 #else 143 #define dma_dmsg(...) 144 #endif 145 146 #if CONFIG_DQDT_DEBUG 147 #define dqdt_dmsg(...) printk(__VA_ARGS__) 141 #define dqdt_dmsg(...) if(hal_time_stamp() > CONFIG_DQDT_DEBUG) printk(__VA_ARGS__) 148 142 #else 149 143 #define dqdt_dmsg(...) … … 151 145 152 146 #if CONFIG_ELF_DEBUG 153 #define elf_dmsg(...) printk(__VA_ARGS__)147 #define elf_dmsg(...) if(hal_time_stamp() > CONFIG_ELF_DEBUG) printk(__VA_ARGS__) 154 148 #else 155 149 #define elf_dmsg(...) … … 157 151 158 152 #if CONFIG_EXEC_DEBUG 159 #define exec_dmsg(...) printk(__VA_ARGS__)153 #define exec_dmsg(...) if(hal_time_stamp() > CONFIG_EXEC_DEBUG) printk(__VA_ARGS__) 160 154 #else 161 155 #define exec_dmsg(...) 162 156 #endif 163 157 158 #if CONFIG_EXCP_DEBUG 159 #define excp_dmsg(...) if(hal_time_stamp() > CONFIG_EXCP_DEBUG) printk(__VA_ARGS__) 160 #else 161 #define excp_dmsg(...) 162 #endif 163 164 164 #if CONFIG_FATFS_DEBUG 165 #define fatfs_dmsg(...) printk(__VA_ARGS__)165 #define fatfs_dmsg(...) if(hal_time_stamp() > CONFIG_FATFS_DEBUG) printk(__VA_ARGS__) 166 166 #else 167 167 #define fatfs_dmsg(...) … … 169 169 170 170 #if CONFIG_FBF_DEBUG 171 #define fbf_dmsg(...) printk(__VA_ARGS__)171 #define fbf_dmsg(...) if(hal_time_stamp() > CONFIG_FBF_DEBUG) printk(__VA_ARGS__) 172 172 #else 173 173 #define fbf_dmsg(...) … … 175 175 176 176 #if CONFIG_FORK_DEBUG 177 #define fork_dmsg(...) printk(__VA_ARGS__)177 #define fork_dmsg(...) if(hal_time_stamp() > CONFIG_FORK_DEBUG) printk(__VA_ARGS__) 178 178 #else 179 179 #define fork_dmsg(...) 180 180 #endif 181 181 182 #if CONFIG_GPT_DEBUG 183 #define gpt_dmsg(...) if(hal_time_stamp() > CONFIG_GPT_DEBUG) printk(__VA_ARGS__) 184 #else 185 #define gpt_dmsg(...) 186 #endif 187 182 188 #if CONFIG_IDLE_DEBUG 183 #define idle_dmsg(...) printk(__VA_ARGS__)189 #define idle_dmsg(...) if(hal_time_stamp() > CONFIG_IDLE_DEBUG) printk(__VA_ARGS__) 184 190 #else 185 191 #define idle_dmsg(...) … … 193 199 194 200 #if CONFIG_IRQ_DEBUG 195 #define irq_dmsg(...) printk(__VA_ARGS__)201 #define irq_dmsg(...) if(hal_time_stamp() > CONFIG_IRQ_DEBUG) printk(__VA_ARGS__) 196 202 #else 197 203 #define irq_dmsg(...) … … 199 205 200 206 #if CONFIG_KCM_DEBUG 201 #define kcm_dmsg(...) printk(__VA_ARGS__)207 #define kcm_dmsg(...) if(hal_time_stamp() > CONFIG_KCM_DEBUG) printk(__VA_ARGS__) 202 208 #else 203 209 #define kcm_dmsg(...) … … 205 211 206 212 #if CONFIG_KHM_DEBUG 207 #define khm_dmsg(...) printk(__VA_ARGS__)213 #define khm_dmsg(...) if(hal_time_stamp() > CONFIG_KHM_DEBUG) printk(__VA_ARGS__) 208 214 #else 209 215 #define khm_dmsg(...) … … 211 217 212 218 #if CONFIG_KINIT_DEBUG 213 #define kinit_dmsg(...) printk(__VA_ARGS__)219 #define kinit_dmsg(...) if(hal_time_stamp() > CONFIG_KINIT_DEBUG) printk(__VA_ARGS__) 214 220 #else 215 221 #define kinit_dmsg(...) … … 217 223 218 224 #if CONFIG_KMEM_DEBUG 219 #define kmem_dmsg(...) printk(__VA_ARGS__)225 #define kmem_dmsg(...) if(hal_time_stamp() > CONFIG_KMEM_DEBUG) printk(__VA_ARGS__) 220 226 #else 221 227 #define kmem_dmsg(...) … … 223 229 224 230 #if CONFIG_MAPPER_DEBUG 225 #define mapper_dmsg(...) printk(__VA_ARGS__)231 #define mapper_dmsg(...) if(hal_time_stamp() > CONFIG_MAPPER_DEBUG) printk(__VA_ARGS__) 226 232 #else 227 233 #define mapper_dmsg(...) … … 229 235 230 236 #if CONFIG_MMC_DEBUG 231 #define mmc_dmsg(...) printk(__VA_ARGS__)237 #define mmc_dmsg(...) if(hal_time_stamp() > CONFIG_MMC_DEBUG) printk(__VA_ARGS__) 232 238 #else 233 239 #define mmc_dmsg(...) … … 235 241 236 242 #if CONFIG_NIC_DEBUG 237 #define nic_dmsg(...) printk(__VA_ARGS__)243 #define nic_dmsg(...) if(hal_time_stamp() > CONFIG_NIC_DEBUG) printk(__VA_ARGS__) 238 244 #else 239 245 #define nic_dmsg(...) … … 241 247 242 248 #if CONFIG_PIC_DEBUG 243 #define pic_dmsg(...) printk(__VA_ARGS__)249 #define pic_dmsg(...) if(hal_time_stamp() > CONFIG_PIC_DEBUG) printk(__VA_ARGS__) 244 250 #else 245 251 #define pic_dmsg(...) … … 247 253 248 254 #if CONFIG_PPM_DEBUG 249 #define ppm_dmsg(...) printk(__VA_ARGS__)255 #define ppm_dmsg(...) if(hal_time_stamp() > CONFIG_PPM_DEBUG) printk(__VA_ARGS__) 250 256 #else 251 257 #define ppm_dmsg(...) … … 253 259 254 260 #if CONFIG_PROCESS_DEBUG 255 #define process_dmsg(...) printk(__VA_ARGS__)261 #define process_dmsg(...) if(hal_time_stamp() > CONFIG_PROCESS_DEBUG) printk(__VA_ARGS__) 256 262 #else 257 263 #define process_dmsg(...) … … 264 270 #endif 265 271 266 #if CONFIG_RPCG_DEBUG267 #define rpcg_dmsg(...) printk(__VA_ARGS__)268 #else269 #define rpcg_dmsg(...)270 #endif271 272 272 #if CONFIG_SCHED_DEBUG 273 #define sched_dmsg(...) printk(__VA_ARGS__)273 #define sched_dmsg(...) if(hal_time_stamp() > CONFIG_SCHED_DEBUG) printk(__VA_ARGS__) 274 274 #else 275 275 #define sched_dmsg(...) … … 277 277 278 278 #if CONFIG_SIGNAL_DEBUG 279 #define signal_dmsg(...) printk(__VA_ARGS__)279 #define signal_dmsg(...) if(hal_time_stamp() > CONFIG_SIGNAL_DEBUG) printk(__VA_ARGS__) 280 280 #else 281 281 #define signal_dmsg(...) … … 283 283 284 284 #if CONFIG_SYSCALL_DEBUG 285 #define syscall_dmsg(...) printk(__VA_ARGS__)285 #define syscall_dmsg(...) if(hal_time_stamp() > CONFIG_SYSCALL_DEBUG) printk(__VA_ARGS__) 286 286 #else 287 287 #define syscall_dmsg(...) … … 289 289 290 290 #if CONFIG_THREAD_DEBUG 291 #define thread_dmsg(...) printk(__VA_ARGS__)291 #define thread_dmsg(...) if(hal_time_stamp() > CONFIG_THREAD_DEBUG) printk(__VA_ARGS__) 292 292 #else 293 293 #define thread_dmsg(...) … … 295 295 296 296 #if CONFIG_TXT_DEBUG 297 #define txt_dmsg(...) printk(__VA_ARGS__)297 #define txt_dmsg(...) if(hal_time_stamp() > CONFIG_TXT_DEBUG) printk(__VA_ARGS__) 298 298 #else 299 299 #define txt_dmsg(...) … … 307 307 308 308 #if CONFIG_VMM_DEBUG 309 #define vmm_dmsg(...) printk(__VA_ARGS__)309 #define vmm_dmsg(...) if(hal_time_stamp() > CONFIG_VMM_DEBUG) printk(__VA_ARGS__) 310 310 #else 311 311 #define vmm_dmsg(...) -
trunk/kernel/kern/process.c
r380 r406 90 90 pid_t parent_pid; 91 91 92 process_dmsg("\n[ INFO] %s : enters for process %x in cluster%x\n",93 __FUNCTION__ , pid , local_cxy);92 process_dmsg("\n[DMSG] %s : core[%x,%d] enters for process %x\n", 93 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 94 94 95 95 // get parent process cluster, local pointer, and pid … … 116 116 process->ppid = parent_pid; 117 117 118 // resetreference process vmm (not for kernel process)118 // initialize reference process vmm (not for kernel process) 119 119 if( pid ) vmm_init( process ); 120 120 … … 169 169 hal_fence(); 170 170 171 process_dmsg("\n[ INFO] %s : exit for process %x in cluster %x\n",171 process_dmsg("\n[DMSG] %s : exit for process %x in cluster %x\n", 172 172 __FUNCTION__ , pid ); 173 173 … … 198 198 local_process->ref_xp = reference_process_xp; 199 199 200 process_dmsg("\n[ INFO] %s : enter for process %x in cluster %x\n",200 process_dmsg("\n[DMSG] %s : enter for process %x in cluster %x\n", 201 201 __FUNCTION__ , local_process->pid ); 202 202 … … 233 233 hal_fence(); 234 234 235 process_dmsg("\n[ INFO] %s : exit for process %x in cluster %x\n",235 process_dmsg("\n[DMSG] %s : exit for process %x in cluster %x\n", 236 236 __FUNCTION__ , local_process->pid ); 237 237 … … 288 288 vmm_destroy( process ); 289 289 290 process_dmsg("\n[ INFO] %s for pid %d / page_faults = %d\n",290 process_dmsg("\n[DMSG] %s for pid %d / page_faults = %d\n", 291 291 __FUNCTION__ , process->pid, process->vmm.pgfault_nr ); 292 292 } … … 627 627 parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 628 628 629 exec_dmsg("\n[ INFO] %s : thread %x on core[%x,%d] enters for path = %s\n",629 exec_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] enters for path = %s\n", 630 630 __FUNCTION__, CURRENT_THREAD->trdid, local_cxy, CURRENT_THREAD->core->lid , path ); 631 631 … … 654 654 process_reference_init( process , pid , parent_xp ); 655 655 656 exec_dmsg("\n[ INFO] %s : thread %x on core[%x,%d] created process %x / path = %s\n",656 exec_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] created process %x / path = %s\n", 657 657 __FUNCTION__, CURRENT_THREAD->trdid, local_cxy, CURRENT_THREAD->core->lid, pid, path ); 658 658 … … 670 670 XPTR( parent_cxy , &parent_ptr->fd_array) ); 671 671 672 exec_dmsg("\n[ INFO] %s : fd_array copied from process %x to process %x\n",672 exec_dmsg("\n[DMSG] %s : fd_array copied from process %x to process %x\n", 673 673 __FUNCTION__, parent_pid , pid ); 674 674 … … 688 688 } 689 689 690 exec_dmsg("\n[ INFO] %s : code and data vsegs registered for process %x / path = %s\n",690 exec_dmsg("\n[DMSG] %s : code and data vsegs registered for process %x / path = %s\n", 691 691 __FUNCTION__ , pid , path ); 692 692 … … 714 714 } 715 715 716 exec_dmsg("\n[ INFO] %s : thread created for process %x on core %d in cluster %x\n",716 exec_dmsg("\n[DMSG] %s : thread created for process %x on core %d in cluster %x\n", 717 717 __FUNCTION__ , pid , core->lid , local_cxy ); 718 719 #if CONFIG_EXEC_DEBUG 720 if( hal_time_stamp() > CONFIG_EXEC_DEBUG ) 721 { 722 grdxt_print( &process->vmm.grdxt , GRDXT_TYPE_VSEG , process->pid ); 723 hal_gpt_print( &process->vmm.gpt , process->pid ); 724 } 725 #endif 718 726 719 727 // update children list in parent process … … 725 733 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); 726 734 727 exec_dmsg("\n[ INFO] %s : exit for process %x\n",735 exec_dmsg("\n[DMSG] %s : exit for process %x\n", 728 736 __FUNCTION__, process->pid ); 729 737 … … 747 755 uint32_t stderr_id; 748 756 749 process_dmsg("\n[ INFO] %s : enters in cluster %x\n", __FUNCTION__ , local_cxy );757 process_dmsg("\n[DMSG] %s : enters in cluster %x\n", __FUNCTION__ , local_cxy ); 750 758 751 759 // open stdin / stdout / stderr pseudo-files … … 771 779 assert( (error1 == 0) , __FUNCTION__ , "cannot create process_init\n"); 772 780 773 process_dmsg("\n[ INFO] %s : exit in cluster %x\n", __FUNCTION__ , local_cxy );781 process_dmsg("\n[DMSG] %s : exit in cluster %x\n", __FUNCTION__ , local_cxy ); 774 782 775 783 hal_fence(); -
trunk/kernel/kern/rpc.c
r389 r406 97 97 page_t ** page ) // out 98 98 { 99 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",99 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 100 100 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 101 101 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 117 117 *page = (page_t *)(intptr_t)rpc.args[1]; 118 118 119 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",119 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 120 120 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 121 121 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 125 125 void rpc_pmem_get_pages_server( xptr_t xp ) 126 126 { 127 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",127 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 128 128 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 129 129 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 142 142 hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 143 143 144 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",144 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 145 145 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 146 146 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 157 157 pid_t * pid ) // out 158 158 { 159 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",159 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 160 160 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 161 161 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 178 178 *error = (error_t)rpc.args[2]; 179 179 180 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",180 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 181 181 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 182 182 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 190 190 pid_t pid; // output : process identifier 191 191 192 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",192 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 193 193 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 194 194 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 209 209 hal_remote_sw( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)pid ); 210 210 211 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",211 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 212 212 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 213 213 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 224 224 error_t * error ) // out 225 225 { 226 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",226 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 227 227 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 228 228 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 244 244 *error = (error_t)rpc.args[1]; 245 245 246 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",246 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 247 247 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 248 248 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 256 256 error_t error; // local error error status 257 257 258 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",258 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 259 259 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 260 260 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 278 278 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 279 279 280 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",280 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 281 281 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 282 282 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 291 291 void rpc_process_kill_client( process_t * process ) 292 292 { 293 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",293 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 294 294 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 295 295 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 325 325 } 326 326 327 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",327 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 328 328 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 329 329 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 336 336 process_t * process; 337 337 338 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",338 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 339 339 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 340 340 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 360 360 } 361 361 362 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",362 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 363 363 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 364 364 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 379 379 error_t * error ) // out 380 380 { 381 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",381 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 382 382 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 383 383 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 403 403 *error = (error_t)rpc.args[5]; 404 404 405 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",405 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 406 406 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 407 407 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 421 421 error_t error; 422 422 423 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",423 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 424 424 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 425 425 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 456 456 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp ); 457 457 458 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",458 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 459 459 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 460 460 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 473 473 error_t * error ) // out 474 474 { 475 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",475 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 476 476 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 477 477 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 496 496 *error = (error_t)rpc.args[4]; 497 497 498 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",498 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 499 499 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 500 500 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 509 509 error_t error; 510 510 511 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",511 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 512 512 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 513 513 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 533 533 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp ); 534 534 535 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",535 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 536 536 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 537 537 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 547 547 uint32_t sig_id ) // in 548 548 { 549 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",549 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 550 550 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 551 551 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 565 565 rpc_send_sync( cxy , &rpc ); 566 566 567 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",567 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 568 568 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 569 569 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 576 576 uint32_t sig_id; // signal index 577 577 578 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",578 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 579 579 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 580 580 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 591 591 signal_rise( process , sig_id ); 592 592 593 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",593 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 594 594 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 595 595 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 613 613 error_t * error ) // out 614 614 { 615 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",615 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 616 616 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 617 617 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 641 641 *error = (error_t)rpc.args[9]; 642 642 643 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",643 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 644 644 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 645 645 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 660 660 error_t error; 661 661 662 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",662 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 663 663 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 664 664 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 693 693 hal_remote_swd( XPTR( client_cxy , &desc->args[9] ) , (uint64_t)error ); 694 694 695 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",695 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 696 696 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 697 697 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 706 706 struct vfs_inode_s * inode ) 707 707 { 708 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",708 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 709 709 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 710 710 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 723 723 rpc_send_sync( cxy , &rpc ); 724 724 725 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",725 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 726 726 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 727 727 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 733 733 vfs_inode_t * inode; 734 734 735 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",735 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 736 736 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 737 737 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 747 747 vfs_inode_destroy( inode ); 748 748 749 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",749 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 750 750 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 751 751 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 764 764 error_t * error ) // out 765 765 { 766 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",766 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 767 767 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 768 768 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 787 787 *error = (error_t)rpc.args[4]; 788 788 789 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",789 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 790 790 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 791 791 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 803 803 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 804 804 805 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",805 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 806 806 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 807 807 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 829 829 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 830 830 831 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",831 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 832 832 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 833 833 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 843 843 vfs_dentry_t * dentry ) 844 844 { 845 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",845 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 846 846 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 847 847 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 860 860 rpc_send_sync( cxy , &rpc ); 861 861 862 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",862 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 863 863 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 864 864 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 870 870 vfs_dentry_t * dentry; 871 871 872 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",872 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 873 873 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 874 874 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 884 884 vfs_dentry_destroy( dentry ); 885 885 886 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",886 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 887 887 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 888 888 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 901 901 error_t * error ) // out 902 902 { 903 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",903 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 904 904 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 905 905 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 923 923 *error = (error_t)rpc.args[3]; 924 924 925 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",925 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 926 926 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 927 927 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 936 936 error_t error; 937 937 938 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",938 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 939 939 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 940 940 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 957 957 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 958 958 959 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",959 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 960 960 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 961 961 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 970 970 vfs_file_t * file ) 971 971 { 972 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",972 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 973 973 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 974 974 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 987 987 rpc_send_sync( cxy , &rpc ); 988 988 989 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",989 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 990 990 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 991 991 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 997 997 vfs_file_t * file; 998 998 999 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",999 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1000 1000 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1001 1001 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1011 1011 vfs_file_destroy( file ); 1012 1012 1013 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1013 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1014 1014 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1015 1015 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1027 1027 error_t * error ) // out 1028 1028 { 1029 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1029 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1030 1030 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1031 1031 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1049 1049 *error = (error_t)rpc.args[3]; 1050 1050 1051 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1051 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1052 1052 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1053 1053 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1064 1064 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 1065 1065 1066 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1066 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1067 1067 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1068 1068 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1087 1087 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1088 1088 1089 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1089 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1090 1090 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1091 1091 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1101 1101 error_t * error ) // out 1102 1102 { 1103 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1103 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1104 1104 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1105 1105 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1121 1121 *error = (error_t)rpc.args[1]; 1122 1122 1123 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1123 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1124 1124 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1125 1125 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1132 1132 vfs_inode_t * inode; 1133 1133 1134 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1134 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1135 1135 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1136 1136 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1149 1149 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 1150 1150 1151 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1151 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1152 1152 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1153 1153 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1166 1166 error_t * error ) // out 1167 1167 { 1168 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1168 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1169 1169 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1170 1170 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1189 1189 *error = (error_t)rpc.args[4]; 1190 1190 1191 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1191 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1192 1192 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1193 1193 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1203 1203 error_t error; 1204 1204 1205 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1205 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1206 1206 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1207 1207 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1223 1223 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1224 1224 1225 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1225 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1226 1226 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1227 1227 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1239 1239 error_t * error ) // out 1240 1240 { 1241 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1241 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1242 1242 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1243 1243 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1261 1261 *error = (error_t)rpc.args[3]; 1262 1262 1263 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1263 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1264 1264 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1265 1265 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1275 1275 error_t error; 1276 1276 1277 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1277 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1278 1278 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1279 1279 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1295 1295 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1296 1296 1297 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1297 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1298 1298 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1299 1299 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1313 1313 error_t * error ) // out 1314 1314 { 1315 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1315 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1316 1316 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1317 1317 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1336 1336 *error = (error_t)rpc.args[4]; 1337 1337 1338 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1338 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1339 1339 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1340 1340 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1350 1350 error_t error; 1351 1351 1352 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1352 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1353 1353 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1354 1354 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1370 1370 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1371 1371 1372 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1372 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1373 1373 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1374 1374 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1384 1384 xptr_t * buf_xp ) // out 1385 1385 { 1386 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1386 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1387 1387 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1388 1388 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1404 1404 *buf_xp = (xptr_t)rpc.args[1]; 1405 1405 1406 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1406 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1407 1407 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1408 1408 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1412 1412 void rpc_kcm_alloc_server( xptr_t xp ) 1413 1413 { 1414 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1414 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1415 1415 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1416 1416 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1433 1433 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp ); 1434 1434 1435 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1435 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1436 1436 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1437 1437 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1447 1447 uint32_t kmem_type ) // in 1448 1448 { 1449 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1449 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1450 1450 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1451 1451 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1465 1465 rpc_send_sync( cxy , &rpc ); 1466 1466 1467 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1467 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1468 1468 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1469 1469 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1473 1473 void rpc_kcm_free_server( xptr_t xp ) 1474 1474 { 1475 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1475 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1476 1476 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1477 1477 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1491 1491 kmem_free( &req ); 1492 1492 1493 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1493 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1494 1494 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1495 1495 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1510 1510 error_t * error ) // out 1511 1511 { 1512 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1512 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1513 1513 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1514 1514 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1535 1535 *error = (error_t)rpc.args[6]; 1536 1536 1537 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1537 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1538 1538 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1539 1539 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1552 1552 error_t error; 1553 1553 1554 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1554 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1555 1555 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1556 1556 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1592 1592 hal_remote_swd( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error ); 1593 1593 1594 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1594 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1595 1595 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1596 1596 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1607 1607 page_t ** page ) // out 1608 1608 { 1609 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1609 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1610 1610 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1611 1611 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1628 1628 *page = (page_t *)(intptr_t)rpc.args[2]; 1629 1629 1630 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1630 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1631 1631 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1632 1632 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1636 1636 void rpc_mapper_get_page_server( xptr_t xp ) 1637 1637 { 1638 rpc_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1638 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1639 1639 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1640 1640 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1654 1654 hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 1655 1655 1656 rpc_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1656 rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1657 1657 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1658 1658 CURRENT_THREAD->core->lid , hal_time_stamp() ); … … 1673 1673 reg_t sr_save; 1674 1674 1675 rpc g_dmsg("\n[INFO] %s : enter / client_cxy = %x / server_cxy = %x / cycle %d\n",1675 rpc_dmsg("\n[DMSG] %s : enter / client_cxy = %x / server_cxy = %x / cycle %d\n", 1676 1676 __FUNCTION__ , local_cxy , server_cxy , hal_time_stamp() ); 1677 1677 … … 1700 1700 while( error ); 1701 1701 1702 rpc g_dmsg("\n[INFO] %s : RPC %l registered / server_cxy = %x / cycle %d\n",1702 rpc_dmsg("\n[DMSG] %s : RPC %l registered / server_cxy = %x / cycle %d\n", 1703 1703 __FUNCTION__ , desc_xp , server_cxy , hal_time_stamp() ); 1704 1704 … … 1714 1714 dev_pic_send_ipi( server_cxy , 0 ); 1715 1715 1716 rpc g_dmsg("\n[INFO] %s : IPI sent / client_cxy = %x / server_cxy = %x\n",1716 rpc_dmsg("\n[DMSG] %s : IPI sent / client_cxy = %x / server_cxy = %x\n", 1717 1717 __FUNCTION__, local_cxy , server_cxy ); 1718 1718 } … … 1729 1729 hal_restore_irq( sr_save ); 1730 1730 1731 rpc g_dmsg("\n[INFO] %s : completed / client_cxy = %x / server_cxy = %x / cycle %d\n",1731 rpc_dmsg("\n[DMSG] %s : completed / client_cxy = %x / server_cxy = %x / cycle %d\n", 1732 1732 __FUNCTION__ , local_cxy , server_cxy , hal_time_stamp() ); 1733 1733 … … 1764 1764 count = 0; 1765 1765 1766 rpc g_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1766 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1767 1767 __FUNCTION__, this->trdid, local_cxy, core->lid , hal_time_stamp() ); 1768 1768 … … 1781 1781 index = hal_remote_lw( XPTR( client_cxy , &desc->index ) ); 1782 1782 1783 rpc g_dmsg("\n[INFO] %s : thread %x on core [%x,%d] / rpc = %d\n",1783 rpc_dmsg("\n[DMSG] %s : thread %x on core [%x,%d] / rpc = %d\n", 1784 1784 __FUNCTION__ , this->trdid , core->lid , local_cxy , index ); 1785 1785 … … 1850 1850 thread->blocked = 0; 1851 1851 1852 rpc g_dmsg("\n[INFO] %s : activate RPC thread %x on core [%x,%d] / cycle %d\n",1852 rpc_dmsg("\n[DMSG] %s : activate RPC thread %x on core [%x,%d] / cycle %d\n", 1853 1853 __FUNCTION__ , thread , core->gid , local_cxy , hal_time_stamp() ); 1854 1854 } … … 1875 1875 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 1876 1876 1877 rpc g_dmsg("\n[INFO] %s : create RPC thread %x on core [%x,%d] / cycle %d\n",1877 rpc_dmsg("\n[DMSG] %s : create RPC thread %x on core [%x,%d] / cycle %d\n", 1878 1878 __FUNCTION__ , thread->trdid, local_cxy, core->lid, hal_time_stamp() ); 1879 1879 } … … 1900 1900 error_t error; 1901 1901 1902 rpc g_dmsg("\n[INFO] %s : enter / thread %x / cluster %x / cycle %d\n",1902 rpc_dmsg("\n[DMSG] %s : enter / thread %x / cluster %x / cycle %d\n", 1903 1903 __FUNCTION__ , this->trdid , local_cxy , hal_time_stamp() ); 1904 1904 … … 1906 1906 if( (rpc_fifo->owner != 0) || (local_fifo_is_empty( &rpc_fifo->fifo )) ) 1907 1907 { 1908 rpc g_dmsg("\n[INFO] %s : exit do nothing / thread %x / cluster %x / cycle %d\n",1908 rpc_dmsg("\n[DMSG] %s : exit do nothing / thread %x / cluster %x / cycle %d\n", 1909 1909 __FUNCTION__ , this->trdid , local_cxy , hal_time_stamp() ); 1910 1910 … … 1926 1926 } 1927 1927 1928 rpc g_dmsg("\n[INFO] %s : exit after RPC thread activation / "1928 rpc_dmsg("\n[DMSG] %s : exit after RPC thread activation / " 1929 1929 "thread %x / cluster %x / cycle %d\n", 1930 1930 __FUNCTION__ , this->trdid , local_cxy , hal_time_stamp() ); … … 1934 1934 else // light lock taken by another thread 1935 1935 { 1936 rpc g_dmsg("\n[INFO] %s : exit do nothing / thread %x / cluster %x / cycle %d\n",1936 rpc_dmsg("\n[DMSG] %s : exit do nothing / thread %x / cluster %x / cycle %d\n", 1937 1937 __FUNCTION__ , this->trdid , local_cxy , hal_time_stamp() ); 1938 1938 … … 1951 1951 rpc_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 1952 1952 1953 rpc g_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1953 rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1954 1954 __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() ); 1955 1955 … … 1975 1975 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) 1976 1976 { 1977 rpc g_dmsg("\n[INFO] %s : RPC thread %x on core[%x,%d] suicide / cycle %d\n",1977 rpc_dmsg("\n[DMSG] %s : RPC thread %x on core[%x,%d] suicide / cycle %d\n", 1978 1978 __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() ); 1979 1979 … … 1986 1986 else 1987 1987 { 1988 rpc g_dmsg("\n[INFO] %s : RPC thread %x on core[%x,%d] blocks / cycle %d\n",1988 rpc_dmsg("\n[DMSG] %s : RPC thread %x on core[%x,%d] blocks / cycle %d\n", 1989 1989 __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() ); 1990 1990 … … 1992 1992 sched_yield( NULL ); 1993 1993 1994 rpc g_dmsg("\n[INFO] %s : RPC thread %x wake up on core[%x,%d] / cycle %d\n",1994 rpc_dmsg("\n[DMSG] %s : RPC thread %x wake up on core[%x,%d] / cycle %d\n", 1995 1995 __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() ); 1996 1996 } -
trunk/kernel/kern/scheduler.c
r374 r406 154 154 scheduler_t * sched = &core->scheduler; 155 155 156 sched_dmsg("\n[ INFO] %s : enter core[%x,%d] / cycle %d\n",156 sched_dmsg("\n[DMSG] %s : enter core[%x,%d] / cycle %d\n", 157 157 __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() ); 158 158 … … 185 185 spinlock_unlock( &sched->lock ); 186 186 187 sched_dmsg("\n[ INFO] %s : exit core[%x,%d] / k_thread = %x / cycle %d\n",187 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / k_thread = %x / cycle %d\n", 188 188 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() ); 189 189 … … 216 216 spinlock_unlock( &sched->lock ); 217 217 218 sched_dmsg("\n[ INFO] %s : exit core[%x,%d] / u_thread = %x / cycle %d\n",218 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / u_thread = %x / cycle %d\n", 219 219 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() ); 220 220 return thread; … … 227 227 spinlock_unlock( &sched->lock ); 228 228 229 sched_dmsg("\n[ INFO] %s : exit core[%x,%d] / idle = %x / cycle %d\n",229 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / idle = %x / cycle %d\n", 230 230 __FUNCTION__ , local_cxy , core->lid , sched->idle->trdid , hal_time_stamp() ); 231 231 … … 242 242 scheduler_t * sched = &core->scheduler; 243 243 244 sched_dmsg("\n[ INFO] %s : enter / thread %x on core[%x,%d]\n",244 sched_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d]\n", 245 245 __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid ); 246 246 … … 265 265 spinlock_unlock( &sched->lock ); 266 266 267 sched_dmsg("\n[ INFO] %s : exit / thread %x on core[%x,%d]\n",267 sched_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d]\n", 268 268 __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid ); 269 269 … … 279 279 scheduler_t * sched = &core->scheduler; 280 280 281 sched_dmsg("\n[ INFO] %s : thread %x on core[%x,%d] enter / cycle %d\n",281 sched_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] enter / cycle %d\n", 282 282 __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() ); 283 283 … … 304 304 if( next != current ) 305 305 { 306 sched_dmsg("\n[ INFO] %s : trd %x (%s) on core[%x,%d] => trd %x (%s) / cycle %d\n",306 sched_dmsg("\n[DMSG] %s : trd %x (%s) on core[%x,%d] => trd %x (%s) / cycle %d\n", 307 307 __FUNCTION__, current->trdid, thread_type_str(current->type), local_cxy, core->lid, 308 308 next->trdid, thread_type_str(next->type), hal_time_stamp() ); … … 331 331 else 332 332 { 333 sched_dmsg("\n[ INFO] %s : thread %x on core[%x,%d] continue / cycle %d\n",333 sched_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] continue / cycle %d\n", 334 334 __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() ); 335 335 } -
trunk/kernel/kern/signal.c
r23 r406 44 44 hal_atomic_or( &thread->signals , (1 << sig_id) ); 45 45 46 signal_dmsg("\n[ INFO] %s : thread %x in process %x received signal %d\n",46 signal_dmsg("\n[DMSG] %s : thread %x in process %x received signal %d\n", 47 47 __FUNCTION__, thread->trdid , process->pid , sig_id ); 48 48 } … … 59 59 thread_s * this = CURRENT_THREAD; 60 60 61 printk("\n[ INFO] %s : threadReceived signal %d, pid %d, tid %x, core %d [ KILLED ]\n",61 printk("\n[DMSG] %s : threadReceived signal %d, pid %d, tid %x, core %d [ KILLED ]\n", 62 62 sig, 63 63 this->process->pid, -
trunk/kernel/kern/thread.c
r374 r406 221 221 vseg_t * vseg; // stack vseg 222 222 223 thread_dmsg("\n[ INFO] %s : enters for process %x\n", __FUNCTION__ , pid );223 thread_dmsg("\n[DMSG] %s : enters for process %x\n", __FUNCTION__ , pid ); 224 224 225 225 // get process descriptor local copy … … 311 311 } 312 312 313 thread_dmsg("\n[ INFO] %s : exit / trdid = %x / process %x / core = %d\n",313 thread_dmsg("\n[DMSG] %s : exit / trdid = %x / process %x / core = %d\n", 314 314 __FUNCTION__ , thread->trdid , process->pid , core_lid ); 315 315 … … 328 328 vseg_t * vseg; // stack vseg 329 329 330 thread_dmsg("\n[ INFO] %s : enters\n", __FUNCTION__ );330 thread_dmsg("\n[DMSG] %s : enters\n", __FUNCTION__ ); 331 331 332 332 // allocate a stack from local VMM … … 398 398 } 399 399 400 thread_dmsg("\n[ INFO] %s : exit / thread %x for process %x on core %d in cluster %x\n",400 thread_dmsg("\n[DMSG] %s : exit / thread %x for process %x on core %d in cluster %x\n", 401 401 __FUNCTION__, thread->trdid, process->pid, core_lid, local_cxy ); 402 402 … … 416 416 thread_t * thread; // pointer on new thread descriptor 417 417 418 thread_dmsg("\n[ INFO] %s : enter / for type %s on core[%x,%d] / cycle %d\n",418 thread_dmsg("\n[DMSG] %s : enter / for type %s on core[%x,%d] / cycle %d\n", 419 419 __FUNCTION__ , thread_type_str( type ) , local_cxy , core_lid , hal_time_stamp() ); 420 420 … … 449 449 hal_cpu_context_create( thread ); 450 450 451 thread_dmsg("\n[ INFO] %s : exit / trdid = %x / type = %s / core = [%x,%d] / cycle %d\n",451 thread_dmsg("\n[DMSG] %s : exit / trdid = %x / type = %s / core = [%x,%d] / cycle %d\n", 452 452 __FUNCTION__ , thread->trdid , thread_type_str(type) , 453 453 local_cxy , core_lid , hal_time_stamp() ); … … 502 502 core_t * core = thread->core; 503 503 504 thread_dmsg("\n[ INFO] %s : enters for thread %x in process %x / type = %s\n",504 thread_dmsg("\n[DMSG] %s : enters for thread %x in process %x / type = %s\n", 505 505 __FUNCTION__ , thread->trdid , process->pid , thread_type_str( thread->type ) ); 506 506 … … 556 556 tm_end = hal_get_cycles(); 557 557 558 thread_dmsg("\n[ INFO] %s : exit for thread %x in process %x / duration = %d\n",558 thread_dmsg("\n[DMSG] %s : exit for thread %x in process %x / duration = %d\n", 559 559 __FUNCTION__, thread->trdid , process->pid , tm_end - tm_start ); 560 560 } … … 732 732 while( 1 ) 733 733 { 734 idle_dmsg("\n[ INFO] %s : core[%x][%d] goes to sleep at cycle %d\n",734 idle_dmsg("\n[DMSG] %s : core[%x][%d] goes to sleep at cycle %d\n", 735 735 __FUNCTION__ , local_cxy , lid , hal_get_cycles() ); 736 736 … … 738 738 //hal_core_sleep(); 739 739 740 idle_dmsg("\n[ INFO] %s : core[%x][%d] wake up at cycle %d\n",740 idle_dmsg("\n[DMSG] %s : core[%x][%d] wake up at cycle %d\n", 741 741 __FUNCTION__ , local_cxy , lid , hal_get_cycles() ); 742 742 -
trunk/kernel/kern/thread.h
r367 r406 158 158 * This TRDID is computed by the process_register_thread() function, when the user 159 159 * thread is registered in the local copy of the process descriptor. 160 * 161 * WARNING : Don't modify the first 4 fields order, as this order is used by the 162 * hal_kentry assembly code for the TSAR architecture. 160 163 **************************************************************************************/ 161 164 … … 164 167 typedef struct thread_s 165 168 { 166 void * cpu_uzone; /*! used for exception/interrupt/syscall */167 169 void * cpu_context; /*! used for context switch */ 168 170 void * fpu_context; /*! used for dynamic FPU allocation */ 171 172 intptr_t k_stack_base; /*! kernel stack base address */ 173 uint32_t k_stack_size; /*! kernel stack size (bytes) */ 169 174 170 175 uint32_t trdid; /*! thread index (cxy.ltid) */ … … 189 194 intptr_t u_stack_base; /*! user stack base address */ 190 195 uint32_t u_stack_size; /*! user stack size (bytes) */ 191 intptr_t k_stack_base; /*! kernel stack base address */192 uint32_t k_stack_size; /*! kernel stack size (bytes) */193 196 194 197 void * entry_func; /*! pointer on entry function */ -
trunk/kernel/libk/elf.c
r401 r406 24 24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 #include <hal_special.h> 26 27 #include <hal_uspace.h> 27 28 #include <printk.h> 28 29 #include <process.h> 30 #include <thread.h> 31 #include <mapper.h> 29 32 #include <vseg.h> 30 33 #include <kmem.h> … … 165 168 type = VSEG_TYPE_CODE; 166 169 process->vmm.code_vpn_base = vbase >> CONFIG_PPM_PAGE_SHIFT; 167 168 elf_dmsg("\n[INFO] %s : found CODE vseg / base = %x / size = %x\n",169 __FUNCTION__ , vbase , mem_size );170 170 } 171 171 else // found DATA segment … … 173 173 type = VSEG_TYPE_DATA; 174 174 process->vmm.data_vpn_base = vbase >> CONFIG_PPM_PAGE_SHIFT; 175 176 elf_dmsg("\n[INFO] %s : found DATA vseg / base = %x / size = %x\n",177 __FUNCTION__, vbase , mem_size );178 175 } 179 176 … … 194 191 vfs_file_t * file_ptr = (vfs_file_t *)GET_PTR( file_xp ); 195 192 193 // get local pointer on .elf file mapper 194 mapper_t * mapper_ptr = (mapper_t *)hal_remote_lpt( XPTR( file_cxy , 195 &file_ptr->mapper ) ); 196 196 197 // initialize "file_mapper", "file_offset", "file_size" fields in vseg 197 vseg->mapper_xp = (xptr_t)hal_remote_lwd( XPTR( file_cxy , &file_ptr->mapper ));198 vseg->mapper_xp = XPTR( file_cxy , mapper_ptr ); 198 199 vseg->file_offset = file_offset; 199 200 vseg->file_size = file_size; … … 201 202 // update reference counter in file descriptor 202 203 vfs_file_count_up( file_xp ); 204 205 elf_dmsg("\n[DMSG] %s : found %s vseg / base = %x / size = %x\n" 206 " file_size = %x / file_offset = %x / mapper_xp = %l\n", 207 __FUNCTION__ , vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min , 208 vseg->file_size , vseg->file_offset , vseg->mapper_xp ); 203 209 } 204 210 … … 219 225 error_t error; 220 226 221 elf_dmsg("\n[ INFO] %s : core[%x,%d] enter for <%s>\n",227 elf_dmsg("\n[DMSG] %s : core[%x,%d] enter for <%s>\n", 222 228 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pathname ); 223 229 … … 239 245 } 240 246 241 elf_dmsg("\n[ INFO] %s : open file <%s>\n", __FUNCTION__ , pathname );247 elf_dmsg("\n[DMSG] %s : open file <%s>\n", __FUNCTION__ , pathname ); 242 248 243 249 // load header in local buffer … … 252 258 } 253 259 254 elf_dmsg("\n[ INFO] %s : loaded elf header for %s\n", __FUNCTION__ , pathname );260 elf_dmsg("\n[DMSG] %s : loaded elf header for %s\n", __FUNCTION__ , pathname ); 255 261 256 262 if( header.e_phnum == 0 ) … … 289 295 } 290 296 291 elf_dmsg("\n[ INFO] %s : segments array allocated for %s\n", __FUNCTION__ , pathname );297 elf_dmsg("\n[DMSG] %s : segments array allocated for %s\n", __FUNCTION__ , pathname ); 292 298 293 299 // load seg descriptors array to local buffer … … 306 312 } 307 313 308 elf_dmsg("\n[ INFO] %s loaded segments descriptors for %s \n", __FUNCTION__ , pathname );314 elf_dmsg("\n[DMSG] %s loaded segments descriptors for %s \n", __FUNCTION__ , pathname ); 309 315 310 316 // register loadable segments in process VMM … … 331 337 kmem_free(&req); 332 338 333 elf_dmsg("\n[ INFO] %s : core[%x,%d] exit for <%s> / entry_point = %x\n",339 elf_dmsg("\n[DMSG] %s : core[%x,%d] exit for <%s> / entry_point = %x\n", 334 340 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pathname , header.e_entry ); 335 341 -
trunk/kernel/libk/grdxt.c
r396 r406 26 26 #include <errno.h> 27 27 #include <printk.h> 28 #include <vseg.h> 28 29 #include <kmem.h> 29 30 #include <grdxt.h> … … 125 126 intptr_t value; 126 127 127 printk(" *** %s : n1 = %d / n2 = %d / n3 = %d\n",128 printk("\n***** Generic Radix tree %s : n1 = %d / n2 = %d / n3 = %d\n\n", 128 129 name, 1<<w1 , 1<<w2 , 1<<w3 ); 129 130 -
trunk/kernel/libk/grdxt.h
r1 r406 36 36 * Memory for the second and third levels arrays is dynamically allocated by the 37 37 * grdxt_insert() function and is only released by grdxt_destroy(). 38 * - It used to by the VMM to retrieve a vseg descriptor: key is the virtual address. 39 * - It is used by the mapper to implement the file cache: key is the page index in file. 38 * It is used by the MAPPER to implement the file cache: key is the page index in file. 40 39 ******************************************************************************************/ 41 40 … … 48 47 } 49 48 grdxt_t; 50 51 49 52 50 /******************************************************************************************* … … 122 120 123 121 /******************************************************************************************* 124 * This function displays the c ontent of theradix_tree.122 * This function displays the current content of a radix_tree. 125 123 ******************************************************************************************* 126 124 * @ rt : pointer on the radix-tree descriptor. 127 * @ name : string identifying the radix-tree.125 * @ string : radix tree identifier. 128 126 ******************************************************************************************/ 129 127 void grdxt_print( grdxt_t * rt, 130 char * name);128 char * string ); 131 129 132 130 -
trunk/kernel/mm/kcm.c
r352 r406 47 47 kcm_page_t * kcm_page ) 48 48 { 49 kcm_dmsg("\n[ INFO] %s : enters for %s / page %x / count = %d / active = %d\n",49 kcm_dmsg("\n[DMSG] %s : enters for %s / page %x / count = %d / active = %d\n", 50 50 __FUNCTION__ , kmem_type_str( kcm->type ) , 51 51 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); … … 80 80 + (index * kcm->block_size) ); 81 81 82 kcm_dmsg("\n[ INFO] %s : allocated one block %s / ptr = %p / page = %x / count = %d\n",82 kcm_dmsg("\n[DMSG] %s : allocated one block %s / ptr = %p / page = %x / count = %d\n", 83 83 __FUNCTION__ , kmem_type_str( kcm->type ) , ptr , 84 84 (intptr_t)kcm_page , kcm_page->count ); … … 231 231 kcm->blocks_nr = blocks_nr; 232 232 233 kcm_dmsg("\n[ INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n",233 kcm_dmsg("\n[DMSG] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n", 234 234 __FUNCTION__ , kmem_type_str( type ) , kcm->block_size , kcm->blocks_nr ); 235 235 } … … 301 301 kcm_page->active = 1; 302 302 303 kcm_dmsg("\n[ INFO] %s : enters for type %s at cycle %d / new page = %x / count = %d\n",303 kcm_dmsg("\n[DMSG] %s : enters for type %s at cycle %d / new page = %x / count = %d\n", 304 304 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() , 305 305 (intptr_t)kcm_page , kcm_page->count ); … … 311 311 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 312 312 313 kcm_dmsg("\n[ INFO] %s : enters for type %s at cycle %d / page = %x / count = %d\n",313 kcm_dmsg("\n[DMSG] %s : enters for type %s at cycle %d / page = %x / count = %d\n", 314 314 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() , 315 315 (intptr_t)kcm_page , kcm_page->count ); -
trunk/kernel/mm/kmem.c
r394 r406 145 145 assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" ); 146 146 147 kmem_dmsg("\n[ INFO] %s : enters / KCM type %s missing in cluster %x\n",147 kmem_dmsg("\n[DMSG] %s : enters / KCM type %s missing in cluster %x\n", 148 148 __FUNCTION__ , kmem_type_str( type ) , local_cxy ); 149 149 … … 169 169 hal_fence(); 170 170 171 kmem_dmsg("\n[ INFO] %s : exit / KCM type %s created in cluster %x\n",171 kmem_dmsg("\n[DMSG] %s : exit / KCM type %s created in cluster %x\n", 172 172 __FUNCTION__ , kmem_type_str( type ) , local_cxy ); 173 173 … … 192 192 assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" ); 193 193 194 kmem_dmsg("\n[ INFO] %s : enters in cluster %x for type %s\n",194 kmem_dmsg("\n[DMSG] %s : enters in cluster %x for type %s\n", 195 195 __FUNCTION__ , local_cxy , kmem_type_str( type ) ); 196 196 … … 210 210 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 211 211 212 kmem_dmsg("\n[ INFO] %s : exit in cluster %x for type %s / page = %x / base = %x\n",212 kmem_dmsg("\n[DMSG] %s : exit in cluster %x for type %s / page = %x / base = %x\n", 213 213 __FUNCTION__, local_cxy , kmem_type_str( type ) , 214 214 (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) ); … … 228 228 if( flags & AF_ZERO ) memset( ptr , 0 , size ); 229 229 230 kmem_dmsg("\n[ INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n",230 kmem_dmsg("\n[DMSG] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 231 231 __FUNCTION__, local_cxy , kmem_type_str( type ) , 232 232 (intptr_t)ptr , req->size ); … … 255 255 if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) ); 256 256 257 kmem_dmsg("\n[ INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n",257 kmem_dmsg("\n[DMSG] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 258 258 __FUNCTION__, local_cxy , kmem_type_str( type ) , 259 259 (intptr_t)ptr , kmem_type_size( type ) ); -
trunk/kernel/mm/mapper.c
r367 r406 143 143 error_t error; 144 144 145 mapper_dmsg("\n[ INFO] %s : enters for page %d inmapper %x\n",145 mapper_dmsg("\n[DMSG] %s : enters for page %d / mapper %x\n", 146 146 __FUNCTION__ , index , mapper ); 147 147 … … 170 170 if ( page == NULL ) // missing page => create it and load it from file system 171 171 { 172 mapper_dmsg("\n[ INFO] %s : missing page => load from device\n", __FUNCTION__ );172 mapper_dmsg("\n[DMSG] %s : missing page => load from device\n", __FUNCTION__ ); 173 173 174 174 // allocate one page from PPM … … 212 212 } 213 213 214 // update the mapper and index fields in page descriptor215 // required by the vfs_move_page_to_mapper()216 page->mapper = mapper;217 page->index = index;218 219 214 // launch I/O operation to load page from file system 220 215 error = vfs_mapper_move_page( page, … … 259 254 } 260 255 261 mapper_dmsg("\n[ INFO] %s : exit for page %d inmapper %x / page_desc = %x\n",256 mapper_dmsg("\n[DMSG] %s : exit for page %d / mapper %x / page_desc = %x\n", 262 257 __FUNCTION__ , index , mapper , page ); 263 258 … … 315 310 uint8_t * buf_ptr; // current buffer address 316 311 317 mapper_dmsg("\n[ INFO] %s : enters / to_buf = %d / buffer = %x\n",312 mapper_dmsg("\n[DMSG] %s : enters / to_buf = %d / buffer = %x\n", 318 313 __FUNCTION__ , to_buffer , buffer ); 319 314 … … 341 336 else page_count = CONFIG_PPM_PAGE_SIZE; 342 337 343 mapper_dmsg("\n[ INFO] %s : index = %d / offset = %d / count = %d\n",338 mapper_dmsg("\n[DMSG] %s : index = %d / offset = %d / count = %d\n", 344 339 __FUNCTION__ , index , page_offset , page_count ); 345 340 … … 356 351 buf_ptr = (uint8_t *)buffer + done; 357 352 358 mapper_dmsg("\n[ INFO] %s : index = %d / buf_ptr = %x / map_ptr = %x\n",353 mapper_dmsg("\n[DMSG] %s : index = %d / buf_ptr = %x / map_ptr = %x\n", 359 354 __FUNCTION__ , index , buf_ptr , map_ptr ); 360 355 … … 373 368 } 374 369 375 mapper_dmsg("\n[ INFO] %s : exit for buffer %x\n",370 mapper_dmsg("\n[DMSG] %s : exit for buffer %x\n", 376 371 __FUNCTION__, buffer ); 377 372 … … 398 393 cxy_t dst_cxy; // destination cluster 399 394 400 mapper_dmsg("\n[INFO] %s : enters / to_buf = %d / buffer = %l / size = %x / cycle %d\n", 401 __FUNCTION__ , to_buffer , buffer_xp , size , hal_time_stamp() ); 395 // get buffer cluster and local pointer 396 cxy_t buffer_cxy = GET_CXY( buffer_xp ); 397 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 398 399 mapper_dmsg("\n[DMSG] %s : to_buf = %d / buf_cxy = %x / buf_ptr = %x / size = %x\n", 400 __FUNCTION__ , to_buffer , buffer_cxy , buffer_ptr , size ); 402 401 403 402 // compute offsets of first and last bytes in file … … 409 408 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 410 409 411 // get buffer cluster and local pointer 412 cxy_t buffer_cxy = GET_CXY( buffer_xp ); 413 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 410 mapper_dmsg("\n[DMSG] %s : first_page = %d / last_page = %d\n", 411 __FUNCTION__ , first , last ); 414 412 415 413 // compute source and destination clusters … … 440 438 else page_count = CONFIG_PPM_PAGE_SIZE; 441 439 442 mapper_dmsg("\n[ INFO] %s : page_index = %d / offset = %d / count= %d\n",440 mapper_dmsg("\n[DMSG] %s : page_index = %d / offset = %d / bytes = %d\n", 443 441 __FUNCTION__ , index , page_offset , page_count ); 444 442 … … 466 464 } 467 465 468 mapper_dmsg("\n[INFO] %s : index = %d\n", __FUNCTION__ , index );469 470 466 // move fragment 471 467 hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); … … 474 470 } 475 471 476 mapper_dmsg("\n[ INFO] %s : exit for buffer %l / size = %x / cycle %d\n",477 __FUNCTION__ , buffer_xp , size , hal_time_stamp());472 mapper_dmsg("\n[DMSG] %s : exit / buf_cxy = %x / buf_ptr = %x / size = %x\n", 473 __FUNCTION__ , buffer_cxy , buffer_ptr , size ); 478 474 479 475 return 0; 480 476 481 } // end mapper_move_kernel _buffer()482 477 } // end mapper_move_kernel() 478 -
trunk/kernel/mm/ppm.c
r315 r406 56 56 page_t * page_ptr = (page_t *)GET_PTR( page_xp ); 57 57 58 59 60 58 void * base_ptr = ppm->vaddr_base + 59 ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT); 60 61 61 return XPTR( page_cxy , base_ptr ); 62 62 … … 203 203 assert( (order < CONFIG_PPM_MAX_ORDER) , __FUNCTION__ , "illegal order argument" ); 204 204 205 page_t * block = NULL; 206 207 ppm_dmsg("\n[ INFO] %s : enters / order = %d\n",205 page_t * block = NULL; 206 207 ppm_dmsg("\n[DMSG] %s : enters / order = %d\n", 208 208 __FUNCTION__ , order ); 209 209 … … 256 256 spinlock_unlock( &ppm->free_lock ); 257 257 258 ppm_dmsg("\n[ INFO] %s : base = %x / order = %d\n",258 ppm_dmsg("\n[DMSG] %s : base = %x / order = %d\n", 259 259 __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order ); 260 260 -
trunk/kernel/mm/vmm.c
r401 r406 62 62 intptr_t size; 63 63 64 vmm_dmsg("\n[ INFO] %s : enter for process %x\n", __FUNCTION__ , process->pid );64 vmm_dmsg("\n[DMSG] %s : enter for process %x\n", __FUNCTION__ , process->pid ); 65 65 66 66 // get pointer on VMM … … 83 83 vmm->vsegs_nr = 0; 84 84 list_root_init( &vmm->vsegs_root ); 85 error = grdxt_init( &vmm->grdxt,86 CONFIG_VMM_GRDXT_W1,87 CONFIG_VMM_GRDXT_W2,88 CONFIG_VMM_GRDXT_W3 );89 90 assert( (error == 0) , __FUNCTION__ , "cannot initialize radix tree\n" );91 85 92 86 // register kentry vseg in VMM 93 base = 1<< CONFIG_PPM_PAGE_SHIFT;87 base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT; 94 88 size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT; 89 95 90 vseg_kentry = vmm_create_vseg( process , base , size , VSEG_TYPE_CODE ); 96 91 97 92 assert( (vseg_kentry != NULL) , __FUNCTION__ , "cannot register kentry vseg\n" ); 98 93 99 vmm->kent_vpn_base = 1; 100 101 // register the args vseg in VMM 102 base = (CONFIG_VMM_KENTRY_SIZE + 1 )<<CONFIG_PPM_PAGE_SHIFT; 94 vmm->kent_vpn_base = base; 95 96 // register args vseg in VMM 97 base = (CONFIG_VMM_KENTRY_BASE + 98 CONFIG_VMM_KENTRY_SIZE ) << CONFIG_PPM_PAGE_SHIFT; 103 99 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; 100 104 101 vseg_args = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 105 102 106 103 assert( (vseg_args != NULL) , __FUNCTION__ , "cannot register args vseg\n" ); 107 104 108 vmm->args_vpn_base = CONFIG_VMM_KENTRY_SIZE + 1;105 vmm->args_vpn_base = base; 109 106 110 107 // register the envs vseg in VMM 111 base = (CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + 1 )<<CONFIG_PPM_PAGE_SHIFT; 108 base = (CONFIG_VMM_KENTRY_BASE + 109 CONFIG_VMM_KENTRY_SIZE + 110 CONFIG_VMM_ARGS_SIZE ) << CONFIG_PPM_PAGE_SHIFT; 112 111 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; 112 113 113 vseg_envs = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 114 114 115 115 assert( (vseg_envs != NULL) , __FUNCTION__ , "cannot register envs vseg\n" ); 116 116 117 vmm->envs_vpn_base = CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + 1;117 vmm->envs_vpn_base = base; 118 118 119 119 // register the heap vseg in VMM 120 120 base = CONFIG_VMM_HEAP_BASE << CONFIG_PPM_PAGE_SHIFT; 121 121 size = (CONFIG_VMM_MMAP_BASE-CONFIG_VMM_HEAP_BASE) << CONFIG_PPM_PAGE_SHIFT; 122 122 123 vseg_heap = vmm_create_vseg( process , base , size , VSEG_TYPE_HEAP ); 123 124 124 125 assert( (vseg_heap != NULL) , __FUNCTION__ , "cannot register heap vseg\n" ); 125 126 126 vmm->heap_vpn_base = CONFIG_VMM_HEAP_BASE;127 vmm->heap_vpn_base = base; 127 128 128 129 // initialize generic page table … … 149 150 hal_fence(); 150 151 151 vmm_dmsg("\n[INFO] %s : exit for process %x\n", __FUNCTION__ , process->pid ); 152 vmm_dmsg("\n[DMSG] %s : exit for process %x / entry_point = %x\n", 153 __FUNCTION__ , process->pid , process->vmm.entry_point ); 152 154 153 155 } // end vmm_init() … … 171 173 dst_vmm->vsegs_nr = 0; 172 174 list_root_init( &dst_vmm->vsegs_root ); 173 error = grdxt_init( &dst_vmm->grdxt,174 CONFIG_VMM_GRDXT_W1,175 CONFIG_VMM_GRDXT_W2,176 CONFIG_VMM_GRDXT_W3 );177 if( error )178 {179 printk("\n[ERROR] in %s : cannot initialize radix tree for process %x\n",180 __FUNCTION__ , dst_process->pid );181 return ENOMEM;182 }183 175 184 176 // loop on src_vmm list of vsegs to create … … 292 284 vseg_free( vseg ); 293 285 } 294 295 // delete vsegs radix_tree296 grdxt_destroy( &vmm->grdxt );297 286 298 287 // release lock … … 456 445 vmm_t * vmm = &process->vmm; 457 446 458 vmm_dmsg("\n[ INFO] %s : enter for process %x / base = %x / size = %x / type = %s\n",447 vmm_dmsg("\n[DMSG] %s : enter for process %x / base = %x / size = %x / type = %s\n", 459 448 __FUNCTION__ , process->pid , base , size , vseg_type_str(type) ); 460 449 … … 527 516 528 517 // update "heap_vseg" in VMM 529 process->vmm.heap_vseg = vseg;518 if( type == VSEG_TYPE_HEAP ) process->vmm.heap_vseg = vseg; 530 519 531 520 // attach vseg to vmm … … 534 523 rwlock_wr_unlock( &vmm->vsegs_lock ); 535 524 536 vmm_dmsg("\n[ INFO] %s : exit for process %x / vseg [%x, %x] has been mapped\n",525 vmm_dmsg("\n[DMSG] %s : exit for process %x / vseg [%x, %x] registered\n", 537 526 __FUNCTION__ , process->pid , vseg->min , vseg->max ); 538 527 539 528 return vseg; 540 } 529 530 } // vmm_create_vseg() 541 531 542 532 ///////////////////////////////////// … … 665 655 } 666 656 657 /////////////////////////////////////////////////////////////////////////////////////// 658 // This low-level static function is called by the vmm_get_vseg() and vmm_resize_vseg() 659 // functions. It scan the list of registered vsegs to find the unique vseg containing 660 // a given virtual address. 661 /////////////////////////////////////////////////////////////////////////////////////// 662 // @ vmm : pointer on the process VMM. 663 // @ vaddr : virtual address. 664 // @ return vseg pointer if success / return NULL if not found. 665 /////////////////////////////////////////////////////////////////////////////////////// 666 static vseg_t * vseg_from_vaddr( vmm_t * vmm, 667 intptr_t vaddr ) 668 { 669 list_entry_t * iter; 670 vseg_t * vseg = NULL; 671 672 // get lock protecting the vseg list 673 rwlock_rd_lock( &vmm->vsegs_lock ); 674 675 // scan the list of vsegs 676 LIST_FOREACH( &vmm->vsegs_root , iter ) 677 { 678 vseg = LIST_ELEMENT( iter , vseg_t , list ); 679 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) break; 680 } 681 682 // release the lock 683 rwlock_rd_unlock( &vmm->vsegs_lock ); 684 685 return vseg; 686 } 687 667 688 ///////////////////////////////////////////// 668 689 error_t vmm_resize_vseg( process_t * process, … … 670 691 intptr_t size ) 671 692 { 672 error_t error; 693 error_t error; 694 vseg_t * new; 695 vpn_t vpn_min; 696 vpn_t vpn_max; 673 697 674 698 // get pointer on process VMM … … 677 701 intptr_t addr_min = base; 678 702 intptr_t addr_max = base + size; 679 uint32_t shift = CONFIG_PPM_PAGE_SHIFT;680 703 681 704 // get pointer on vseg 682 vseg_t * vseg = grdxt_lookup( &vmm->grdxt , (uint32_t)(base >> shift));705 vseg_t * vseg = vseg_from_vaddr( vmm , base ); 683 706 684 707 if( vseg == NULL) return EINVAL; … … 696 719 error = 0; 697 720 } 698 else if( vseg->min == addr_min ) // vseg must be resized 699 { 700 panic("resize not implemented yet"); 701 error = 0; 702 } 703 else if( vseg->max == addr_max ) // vseg must be resized 704 { 705 panic("resize not implemented yet"); 706 error = 0; 707 } 708 else // vseg cut in three regions => vseg must be resized & new vseg created 709 { 710 panic("resize not implemented yet"); 711 error = 0; 721 else if( vseg->min == addr_min ) // vseg must be resized 722 { 723 // update vseg base address 724 vseg->min = addr_max; 725 726 // update vpn_base and vpn_size 727 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 728 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 729 vseg->vpn_base = vpn_min; 730 vseg->vpn_size = vpn_max - vpn_min + 1; 731 error = 0; 732 } 733 else if( vseg->max == addr_max ) // vseg must be resized 734 { 735 // update vseg max address 736 vseg->max = addr_min; 737 738 // update vpn_base and vpn_size 739 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 740 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 741 vseg->vpn_base = vpn_min; 742 vseg->vpn_size = vpn_max - vpn_min + 1; 743 error = 0; 744 } 745 else // vseg cut in three regions 746 { 747 // resize existing vseg 748 vseg->max = addr_min; 749 750 // update vpn_base and vpn_size 751 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 752 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 753 vseg->vpn_base = vpn_min; 754 vseg->vpn_size = vpn_max - vpn_min + 1; 755 756 // create new vseg 757 new = vmm_create_vseg( process , addr_min , (vseg->max - addr_max) , vseg->type ); 758 if( new == NULL ) error = EINVAL; 759 else error = 0; 712 760 } 713 761 … … 716 764 717 765 return error; 718 } 766 767 } // vmm_resize_vseg() 719 768 720 769 /////////////////////////////////////////// … … 723 772 vseg_t ** found_vseg ) 724 773 { 725 vmm_t * vmm; 726 vseg_t * vseg; 727 728 // get pointer on process VMM 729 vmm = &process->vmm; 730 731 // get lock protecting the vseg list 732 rwlock_rd_lock( &vmm->vsegs_lock ); 733 734 // get pointer on vseg from local radix tree 735 vseg = grdxt_lookup( &vmm->grdxt, (uint32_t)(vaddr >> CONFIG_PPM_PAGE_SHIFT) ); 736 737 // release the lock 738 rwlock_rd_unlock( &vmm->vsegs_lock ); 774 vmm_t * vmm = &process->vmm; 775 776 // get vseg from vaddr 777 vseg_t * vseg = vseg_from_vaddr( vmm , vaddr ); 739 778 740 779 if( vseg == NULL ) // vseg not found in local cluster => try to get it from ref … … 752 791 xptr_t vseg_xp; 753 792 error_t error; 793 754 794 rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); 755 795 … … 759 799 vseg = vseg_alloc(); 760 800 761 if( vseg == NULL ) panic("no memory for vseg copy in cluster %x", local_cxy );801 if( vseg == NULL ) return -1; 762 802 763 803 // initialise local vseg from reference … … 765 805 766 806 // register local vseg in local VMM 767 error = vseg_attach( &process->vmm , vseg ); 768 769 if( error ) panic("no memory for vseg registration in cluster %x", local_cxy ); 807 vseg_attach( &process->vmm , vseg ); 770 808 } 771 809 … … 784 822 cxy_t page_cxy; // physical page cluster 785 823 page_t * page_ptr; // local pointer on physical page descriptor 786 787 uint32_t type = vseg->type;788 xptr_t mapper_xp = vseg->mapper_xp; 789 uint32_t flags = vseg->flags;790 791 // get mapper cluster and local pointer 792 cxy_t mapper_cxy = GET_CXY( mapper_xp );793 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp);794 795 // FILE type : simplyget the physical page from the file mapper824 uint32_t index; // missing page index in vseg mapper 825 uint32_t type; // vseg type; 826 827 type = vseg->type; 828 index = vpn - vseg->vpn_base; 829 830 vmm_dmsg("\n[DMSG] %s : core[%x,%d] enter for vpn = %x / type = %s / index = %d\n", 831 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, vseg_type_str(type), index ); 832 833 // FILE type : get the physical page from the file mapper 796 834 if( type == VSEG_TYPE_FILE ) 797 835 { 798 // compute index in file mapper 799 uint32_t index = vpn - vseg->vpn_base; 836 // get extended pointer on mapper 837 xptr_t mapper_xp = vseg->mapper_xp; 838 839 assert( (mapper_xp != XPTR_NULL), __FUNCTION__, 840 "mapper not defined for a FILE vseg\n" ); 841 842 // get mapper cluster and local pointer 843 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 844 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp ); 800 845 801 846 // get page descriptor from mapper … … 814 859 } 815 860 816 // all other types : allocate a physical page from target cluster,861 // Other types : allocate a physical page from target cluster, 817 862 else 818 863 { 864 uint32_t flags = vseg->flags; 865 819 866 // get target cluster for physical page 820 867 if( flags & VSEG_DISTRIB ) // depends on VPN LSB 821 868 { 822 uint32_t x_ width = LOCAL_CLUSTER->x_width;823 uint32_t y_ width = LOCAL_CLUSTER->y_width;824 page_cxy = vpn & (( 1<<(x_width + y_width)) - 1);869 uint32_t x_size = LOCAL_CLUSTER->x_size; 870 uint32_t y_size = LOCAL_CLUSTER->y_size; 871 page_cxy = vpn & ((x_size * y_size) - 1); 825 872 } 826 873 else // defined in vseg descriptor … … 831 878 // allocate a physical page in target cluster 832 879 kmem_req_t req; 833 if( page_cxy == local_cxy ) 880 if( page_cxy == local_cxy ) // target cluster is the local cluster 834 881 { 835 882 req.type = KMEM_PAGE; … … 845 892 if( page_ptr == NULL ) return ENOMEM; 846 893 847 // initialise page from .elf file mapper for DATA and CODE types 894 // initialise missing page from .elf file mapper for DATA and CODE types 895 // => the mapper_xp field is an extended pointer on the .elf file mapper 848 896 if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) 849 897 { 850 // compute missing page index in vseg 851 vpn_t page_index = vpn - vseg->vpn_base; 898 // get extended pointer on mapper 899 xptr_t mapper_xp = vseg->mapper_xp; 900 901 assert( (mapper_xp != XPTR_NULL), __FUNCTION__, 902 "mapper not defined for a CODE or DATA vseg\n" ); 903 904 // get mapper cluster and local pointer 905 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 906 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp ); 907 908 // compute missing page offset in vseg 909 uint32_t offset = index << CONFIG_PPM_PAGE_SHIFT; 852 910 853 911 // compute missing page offset in .elf file 854 intptr_t page_offset = vseg->file_offset + 855 (page_index << CONFIG_PPM_PAGE_SHIFT); 856 857 // compute extended pointer on page first byte 858 xptr_t base_xp = ppm_page2base( XPTR( page_cxy , page_ptr ) ); 859 860 // file_size can be smaller than vseg_size for BSS 861 intptr_t file_size = vseg->file_size; 862 863 if( file_size < page_offset ) // fully in BSS 912 uint32_t elf_offset = vseg->file_offset + offset; 913 914 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / elf_offset = %x\n", 915 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, elf_offset ); 916 917 // compute extended pointer on page base 918 xptr_t base_xp = ppm_page2base( XPTR( page_cxy , page_ptr ) ); 919 920 // file_size (in .elf mapper) can be smaller than vseg_size (BSS) 921 uint32_t file_size = vseg->file_size; 922 923 if( file_size < offset ) // missing page fully in BSS 864 924 { 925 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / fully in BSS\n", 926 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 927 865 928 if( page_cxy == local_cxy ) 866 929 { … … 872 935 } 873 936 } 874 else if( file_size >= ( page_offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper937 else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper 875 938 { 939 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / fully in mapper\n", 940 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 941 876 942 if( mapper_cxy == local_cxy ) 877 943 { 878 944 error = mapper_move_kernel( mapper_ptr, 879 945 true, // to_buffer 880 page_offset,946 elf_offset, 881 947 base_xp, 882 948 CONFIG_PPM_PAGE_SIZE ); … … 888 954 true, // to buffer 889 955 false, // kernel buffer 890 page_offset,891 (uint64_t)base_xp,956 elf_offset, 957 base_xp, 892 958 CONFIG_PPM_PAGE_SIZE, 893 959 &error ); … … 895 961 if( error ) return EINVAL; 896 962 } 897 else // in mapper : from page_offset -> (file_size - page_offset) 898 // in BSS : from file_size -> (page_offset + page_size) 963 else // both in mapper and in BSS : 964 // - (file_size - offset) bytes from mapper 965 // - (page_size + offset - file_size) bytes from BSS 899 966 { 967 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / both mapper & BSS\n" 968 " %d bytes from mapper / %d bytes from BSS\n", 969 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, 970 file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); 971 900 972 // initialize mapper part 901 973 if( mapper_cxy == local_cxy ) … … 903 975 error = mapper_move_kernel( mapper_ptr, 904 976 true, // to buffer 905 page_offset,977 elf_offset, 906 978 base_xp, 907 file_size - page_offset );979 file_size - offset ); 908 980 } 909 981 else … … 913 985 true, // to buffer 914 986 false, // kernel buffer 915 page_offset,916 (uint64_t)base_xp,917 file_size - page_offset,987 elf_offset, 988 base_xp, 989 file_size - offset, 918 990 &error ); 919 991 } … … 923 995 if( page_cxy == local_cxy ) 924 996 { 925 memset( GET_PTR( base_xp ) + file_size - page_offset , 0 ,926 page_offset + CONFIG_PPM_PAGE_SIZE - file_size );997 memset( GET_PTR( base_xp ) + file_size - offset , 0 , 998 offset + CONFIG_PPM_PAGE_SIZE - file_size ); 927 999 } 928 1000 else 929 1001 { 930 hal_remote_memset( base_xp + file_size - page_offset , 0 ,931 page_offset + CONFIG_PPM_PAGE_SIZE - file_size );1002 hal_remote_memset( base_xp + file_size - offset , 0 , 1003 offset + CONFIG_PPM_PAGE_SIZE - file_size ); 932 1004 } 933 1005 } … … 937 1009 // return ppn 938 1010 *ppn = ppm_page2ppn( XPTR( page_cxy , page_ptr ) ); 1011 1012 vmm_dmsg("\n[DMSG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n", 1013 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , *ppn ); 1014 939 1015 return 0; 940 1016 … … 954 1030 // this function must be called by a thread running in the reference cluster 955 1031 assert( (GET_CXY( process->ref_xp ) == local_cxy ) , __FUNCTION__ , 956 " not called in the reference cluster\n" ); 1032 "not called in the reference cluster\n" ); 1033 1034 vmm_dmsg("\n[DMSG] %s : core[%x,%d] enter for vpn = %x in process %x\n", 1035 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid ); 957 1036 958 1037 // get VMM pointer … … 968 1047 if( (attr & GPT_MAPPED) == 0 ) 969 1048 { 1049 vmm_dmsg("\n[DMSG] %s : core[%x,%d] page %x unmapped => try to map it\n", 1050 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1051 970 1052 // 1. get vseg pointer 971 1053 error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg ); … … 977 1059 return error; 978 1060 } 1061 1062 vmm_dmsg("\n[DMSG] %s : core[%x,%d] found vseg %s / vpn_base = %x / vpn_size = %x\n", 1063 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 1064 vseg_type_str(vseg->type) , vseg->vpn_base , vseg->vpn_size ); 979 1065 980 1066 // 2. get physical page number, depending on vseg type … … 1005 1091 } // end new PTE 1006 1092 1093 vmm_dmsg("\n[DMSG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n", 1094 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , ppn ); 1095 1007 1096 *ret_ppn = ppn; 1008 1097 *ret_attr = attr; -
trunk/kernel/mm/vmm.h
r401 r406 30 30 #include <bits.h> 31 31 #include <list.h> 32 #include <grdxt.h>33 32 #include <spinlock.h> 34 33 #include <hal_gpt.h> … … 92 91 * This structure defines the Virtual Memory Manager for a given process in a given cluster. 93 92 * This local VMM provides three main services: 94 * 1) It registers all vsegs statically or dynamically defined in the vseg list, 95 * and in the associated radix-tree. 96 * 2) It allocates virtual memory space for the STACKS and MMAP vsegs, 97 * using dedicated allocators. 93 * 1) It registers all vsegs statically or dynamically defined in the vseg list. 94 * 2) It allocates virtual memory space for the STACKS and MMAP vsegs. 98 95 * 3) It contains the local copy of the generic page table descriptor. 99 96 ********************************************************************************************/ … … 104 101 list_entry_t vsegs_root; /*! all vsegs in same process and same cluster */ 105 102 uint32_t vsegs_nr; /*! total number of local vsegs */ 106 grdxt_t grdxt; /*! embedded generic vsegs radix tree (key is vpn) */107 103 108 104 gpt_t gpt; /*! embedded generic page table descriptor */ … … 144 140 145 141 /********************************************************************************************* 146 * This function initialises the virtual memory manager attached to a process. 147 * - It initializes the VSL (list of vsegs and associated radix tree). 148 * - It initializes the generic page table (empty). 142 * This function initialises the virtual memory manager attached to an user process. 143 * - It registers the "kentry", "args", "envs" and "heap" vsegs in the vsegs list. 144 * The "code" and "data" vsegs are registered by the elf_load_process() function, 145 * the "stack" vsegs are registered by the thread_user_create() function, and the 146 * "mmap" vsegs are dynamically created by syscalls. 147 * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function. 148 * For TSAR it map all pages for the "kentry" vseg, that must be identity mapping. 149 149 * - It initializes the STAK and MMAP allocators. 150 * - It registers the "kentry", "args", "envs" and "heap" vsegs in the vsegs list. 151 * Any error in this function gives a kernel panic. 150 * TODO : Any error in this function gives a kernel panic => improve error handling. 152 151 ********************************************************************************************* 153 152 * @ process : pointer on process descriptor … … 249 248 * (a) if the region is not entirely mapped in an existing vseg, it's an error. 250 249 * (b) if the region has same base and size as an existing vseg, the vseg is removed. 251 * (c) if the removed region cut the vseg in two parts, it is removed and re-created.252 * (d) if the removed region cut the vseg in three parts, it is removed, and two are created.253 * TODO : cases (c) and (d) are not implemented [AG]250 * (c) if the removed region cut the vseg in two parts, it is modified. 251 * (d) if the removed region cut the vseg in three parts, it is modified, and a new 252 * vseg is created with same type. 254 253 ********************************************************************************************* 255 254 * @ process : pointer on process descriptor … … 267 266 * - if the vseg is missing in local VMM, it uses a RPC to get it from the reference cluster, 268 267 * register it in local VMM and returns the local vseg pointer, if success. 269 * - if the vseg is missing in reference VMM, it returns an user error. 270 * It creates a kernel panic if there is not enough memory to create a new vseg descriptor 271 * in the cluster containing the calling thread. 268 * - it returns an user error if the vseg is missing in the reference VMM, or if there is 269 * not enough memory for a new vseg descriptor in cluster containing the calling thread. 272 270 ********************************************************************************************* 273 271 * @ process : [in] pointer on process descriptor … … 320 318 * Depending on the vseg type, defined by the <vseg> argument, it returns the PPN 321 319 * (Physical Page Number) associated to a missing page defined by the <vpn> argument. 322 * - For the VSEG_TYPE_FILE, it returns the physical page from the file mapper. 323 * For all other types, it allocates a new physical page from the cluster defined 324 * by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg. 325 * - For the VSEG_TYPE_CODE and VSEG_TYPE_DATA types, the allocated page is initialized 326 * from the .elf file mapper. For others vseg types it is not initialised. 320 * - For the FILE type, it returns directly the physical page from the file mapper. 321 * - For the CODE and DATA types, it allocates a new phsical page from the cluster defined 322 * by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg, 323 * and initialize this page from the .elf file mapper. 324 * - For all other types, it allocates a new physical page from the cluster defined 325 * by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg, 326 * but the new page is not initialized. 327 327 ********************************************************************************************* 328 328 * @ vseg : local pointer on vseg containing the mising page. -
trunk/kernel/mm/vseg.c
r394 r406 188 188 189 189 /////////////////////////////// 190 error_t vseg_attach( vmm_t * vmm, 191 vseg_t * vseg ) 192 { 193 // add vseg in radix-tree 194 error_t error = grdxt_insert( &vmm->grdxt , vseg->vpn_base , vseg ); 195 if ( error ) return ENOMEM; 196 190 void vseg_attach( vmm_t * vmm, 191 vseg_t * vseg ) 192 { 197 193 // update vseg descriptor 198 194 vseg->vmm = vmm; … … 200 196 // add vseg in vmm list 201 197 list_add_last( &vmm->vsegs_root , &vseg->list ); 202 203 return 0;204 198 } 205 199 … … 208 202 vseg_t * vseg ) 209 203 { 210 // remove vseg from radix-tree211 grdxt_remove( &vmm->grdxt , vseg->vpn_base );212 213 204 // update vseg descriptor 214 205 vseg->vmm = NULL; -
trunk/kernel/mm/vseg.h
r388 r406 149 149 * @ vmm : pointer on the VMM 150 150 * @ vseg : pointer on the vseg descriptor 151 * @ returns 0 if success / returns ENOMEM if registration in GRDXT unpossible.152 151 *********************************************************************************************/ 153 error_tvseg_attach( struct vmm_s * vmm,154 152 void vseg_attach( struct vmm_s * vmm, 153 vseg_t * vseg ); 155 154 156 155 /********************************************************************************************** -
trunk/kernel/syscalls/sys_exec.c
r315 r406 223 223 bool_t is_local = (cxy_server == cxy_client); 224 224 225 exec_dmsg("\n[ INFO] %s starts for process %x on core %d in cluster %x"225 exec_dmsg("\n[DMSG] %s starts for process %x on core %d in cluster %x" 226 226 " / target_cluster = %x / cycle %d\n", 227 227 __FUNCTION__, process->pid , CURRENT_CORE->lid, … … 261 261 } 262 262 263 exec_dmsg("\n[ INFO] %s starts exec for process %x at cycle %d\n",263 exec_dmsg("\n[DMSG] %s starts exec for process %x at cycle %d\n", 264 264 __FUNCTION__, process->pid, hal_get_cycles() ); 265 265 … … 275 275 } 276 276 277 exec_dmsg("\n[ INFO] %s completes exec for process %x at cycle %d\n",277 exec_dmsg("\n[DMSG] %s completes exec for process %x at cycle %d\n", 278 278 __FUNCTION__, process->pid , hal_get_cycles() ); 279 279 -
trunk/kernel/syscalls/sys_fork.c
r101 r406 63 63 } 64 64 65 fork_dmsg("\n[ INFO] %s : enters for process %d at cycle [%d]\n",65 fork_dmsg("\n[DMSG] %s : enters for process %d at cycle [%d]\n", 66 66 __FUNCTION__, parent_process->pid, hal_get_cycles()); 67 67 … … 71 71 { 72 72 hal_fpu_context_save( parent_thread ); 73 fork_dmsg("\n[ INFO] %s : save FPU\n", __FUNCTION__);73 fork_dmsg("\n[DMSG] %s : save FPU\n", __FUNCTION__); 74 74 } 75 75 … … 129 129 process_reference_init( child_process , child_pid , parent_pid ); 130 130 131 fork_dmsg("\n[ INFO] : %s created child process : pid = %x / ppid = %x\n",131 fork_dmsg("\n[DMSG] : %s created child process : pid = %x / ppid = %x\n", 132 132 __FUNCTION__, child_pid , parent_pid ); 133 133 … … 148 148 XPTR( local_cxy , &parent_process->fd_array ) ); 149 149 150 fork_dmsg("\n[ INFO] %s : duplicated child process from parent process\n",150 fork_dmsg("\n[DMSG] %s : duplicated child process from parent process\n", 151 151 __FUNCTION__ ); 152 152 … … 162 162 } 163 163 164 fork_dmsg("\n[ INFO] %s : parent vmm duplicated in child process\n", __FUNCTION__ );164 fork_dmsg("\n[DMSG] %s : parent vmm duplicated in child process\n", __FUNCTION__ ); 165 165 166 166 // create child main thread descriptor in local cluster … … 197 197 child_thread->trdid = child_trdid; 198 198 199 fork_dmsg("\n[ INFO] %s : initialised child main thread\n", __FUNCTION__ );199 fork_dmsg("\n[DMSG] %s : initialised child main thread\n", __FUNCTION__ ); 200 200 201 201 // register local child thread into local child process th_tbl[] … … 208 208 sched_register_thread( child_thread->core , child_thread ); 209 209 210 fork_dmsg("\n[ INFO] %s : registered main thread in scheduler\n", __FUNCTION__);210 fork_dmsg("\n[DMSG] %s : registered main thread in scheduler\n", __FUNCTION__); 211 211 212 212 // update DQDT for the child thread 213 213 dqdt_local_update_threads( 1 ); 214 214 215 fork_dmsg("\n[ INFO] %s : completed / parent pid = %x / child pid = %x / at cycle [%d]\n",215 fork_dmsg("\n[DMSG] %s : completed / parent pid = %x / child pid = %x / at cycle [%d]\n", 216 216 __FUNCTION__, parent_process->pid, child_process->pid, hal_get_cycles() ); 217 217 -
trunk/kernel/syscalls/sys_signal.c
r23 r406 45 45 this->process->sig_mgr.sigactions[sig_id] = handler; 46 46 47 signal_dmsg("\n[ INFO] %s : handler @%x has been registred for signal %d\n",47 signal_dmsg("\n[DMSG] %s : handler @%x has been registred for signal %d\n", 48 48 __FUNCTION__ , handler , sig_id ); 49 49 -
trunk/kernel/syscalls/sys_thread_create.c
r289 r406 161 161 tm_end = hal_get_cycles(); 162 162 163 thread_dmsg("\n[ INFO] %s created thread %x for process %x in cluster %x\n"163 thread_dmsg("\n[DMSG] %s created thread %x for process %x in cluster %x\n" 164 164 " start_cycle = %d / end_cycle = %d\n", 165 165 trdid , process->pid , k_attr.cxy , tm_start , tm_end ); -
trunk/kernel/syscalls/sys_thread_sleep.c
r296 r406 32 32 thread_t * this = CURRENT_THREAD; 33 33 34 thread_dmsg("\n[ INFO] %s : thread %x in process %x goes to sleep at cycle %d\n",34 thread_dmsg("\n[DMSG] %s : thread %x in process %x goes to sleep at cycle %d\n", 35 35 __FUNCTION__, this->trdid, this->process->pid, hal_get_cycles() ); 36 36 … … 38 38 sched_yield( NULL ); 39 39 40 thread_dmsg("\n[ INFO] %s : thread %x in process %x resume at cycle\n",40 thread_dmsg("\n[DMSG] %s : thread %x in process %x resume at cycle\n", 41 41 __FUNCTION__, this->trdid, this->process->pid, hal_get_cycles() ); 42 42 -
trunk/kernel/syscalls/sys_trace.c
r124 r406 49 49 // desactivate thread trace TODO 50 50 51 printk("\n[ INFO] %s : trace OFF for thread %x in process %x\n",51 printk("\n[DMSG] %s : trace OFF for thread %x in process %x\n", 52 52 __FUNCTION__ , trdid , pid ); 53 53 } … … 56 56 // activate thread trace TODO 57 57 58 printk("\n[ INFO] %s : trace ON for thread %x in process %x\n",58 printk("\n[DMSG] %s : trace ON for thread %x in process %x\n", 59 59 __FUNCTION__ , trdid , pid ); 60 60 } -
trunk/kernel/vfs/devfs.c
r279 r406 79 79 error_t error; 80 80 81 devfs_dmsg("\n[ INFO] %s : enter in cluster %x\n",81 devfs_dmsg("\n[DMSG] %s : enter in cluster %x\n", 82 82 __FUNCTION__ , local_cxy ); 83 83 … … 93 93 assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" ); 94 94 95 devfs_dmsg("\n[ INFO] %s : <dev> created in cluster %x\n",95 devfs_dmsg("\n[DMSG] %s : <dev> created in cluster %x\n", 96 96 __FUNCTION__ , local_cxy ); 97 97 … … 107 107 assert( (error == 0) , __FUNCTION__ , "cannot create <external>\n" ); 108 108 109 devfs_dmsg("\n[ INFO] %s : <external> created in cluster %x\n",109 devfs_dmsg("\n[DMSG] %s : <external> created in cluster %x\n", 110 110 __FUNCTION__ , local_cxy ); 111 111 } -
trunk/kernel/vfs/fatfs.c
r401 r406 249 249 error_t fatfs_get_cluster( mapper_t * mapper, 250 250 uint32_t first_cluster_id, 251 uint32_t page_index,251 uint32_t searched_page_index, 252 252 uint32_t * searched_cluster_id ) 253 253 { 254 254 page_t * current_page_desc; // pointer on current page descriptor 255 255 uint32_t * current_page_buffer; // pointer on current page (array of uint32_t) 256 uint32_t current_page_index; // index of current page in mapper256 uint32_t current_page_index; // index of current page in FAT 257 257 uint32_t current_page_offset; // offset of slot in current page 258 258 uint32_t page_count_in_file; // index of page in file (index in linked list) 259 uint32_t current_cluster_id; // content of current FAT slot 260 261 assert( (page_index > 0) , __FUNCTION__ , "no FAT access required for first page\n"); 262 263 fatfs_dmsg("\n[INFO] %s : enters / mapper = %x / first_cluster_id = %d / page_index = %d\n", 264 __FUNCTION__ , first_cluster_id , page_index ); 265 266 #if (CONFIG_FATFS_DEBUG > 1) 267 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , mapper_get_page ( mapper , 0 ) ) ); 268 uint32_t * buf = (uint32_t *)GET_PTR( base_xp ); 269 uint32_t line , word; 270 printk("\n*** FAT mapper content for first 256 entries ***\n"); 271 for( line = 0 ; line < 16 ; line++ ) 272 { 273 printk("%X : ", line ); 274 for( word = 0 ; word < 16 ; word++ ) printk("%X ", buf[(line<<4) + word] ); 275 printk("\n"); 276 } 277 #endif 278 279 // compute number of FAT slots per page 259 uint32_t next_cluster_id; // content of current FAT slot 260 261 assert( (searched_page_index > 0) , __FUNCTION__ , 262 "no FAT access required for first page\n"); 263 264 fatfs_dmsg("\n[DMSG] %s : enter / first_cluster_id = %d / searched_page_index = %d\n", 265 __FUNCTION__ , first_cluster_id , searched_page_index ); 266 267 // get number of FAT slots per page 280 268 uint32_t slots_per_page = CONFIG_PPM_PAGE_SIZE >> 2; 281 269 … … 284 272 current_page_offset = first_cluster_id % slots_per_page; 285 273 page_count_in_file = 0; 274 next_cluster_id = 0xFFFFFFFF; 286 275 287 276 // scan FAT (i.e. traverse FAT linked list) 288 while( page_count_in_file <= page_index ) 289 { 290 291 fatfs_dmsg("\n[INFO] %s : page_index = %d / page_offset = %d / count = %d\n", 292 __FUNCTION__ , current_page_index , current_page_offset , page_count_in_file ); 293 277 while( page_count_in_file < searched_page_index ) 278 { 294 279 // get pointer on current page descriptor 295 280 current_page_desc = mapper_get_page( mapper , current_page_index ); … … 302 287 303 288 // get FAT slot content 304 current_cluster_id = current_page_buffer[current_page_offset]; 289 next_cluster_id = current_page_buffer[current_page_offset]; 290 291 fatfs_dmsg("\n[DMSG] %s : traverse FAT / current_page_index = %d\n" 292 " current_page_offset = %d / next_cluster_id = %d\n", 293 __FUNCTION__ , current_page_index , current_page_offset , next_cluster_id ); 305 294 306 295 // update loop variables 307 current_page_index = current_cluster_id / slots_per_page;308 current_page_offset = current_cluster_id % slots_per_page;296 current_page_index = next_cluster_id / slots_per_page; 297 current_page_offset = next_cluster_id % slots_per_page; 309 298 page_count_in_file++; 310 299 } 311 312 fatfs_dmsg("\n[INFO] %s : exit / cluster_id = %d\n", 313 __FUNCTION__ , current_cluster_id ); 314 315 *searched_cluster_id = current_cluster_id; 300 301 if( next_cluster_id == 0xFFFFFFFF ) return EIO; 302 303 fatfs_dmsg("\n[DMSG] %s : exit / cluster_id = %d\n", __FUNCTION__ , next_cluster_id ); 304 305 *searched_cluster_id = next_cluster_id; 316 306 return 0; 317 307 … … 343 333 uint8_t * buffer; 344 334 345 fatfs_dmsg("\n[ INFO] %s : enter for fatfs_ctx = %x\n",335 fatfs_dmsg("\n[DMSG] %s : enter for fatfs_ctx = %x\n", 346 336 __FUNCTION__ , fatfs_ctx ); 347 337 … … 357 347 "cannot allocate memory for 512 bytes buffer\n" ); 358 348 359 fatfs_dmsg("\n[ INFO] %s : allocated 512 bytes buffer\n", __FUNCTION__ );349 fatfs_dmsg("\n[DMSG] %s : allocated 512 bytes buffer\n", __FUNCTION__ ); 360 350 361 351 // load the boot record from device … … 363 353 error = dev_ioc_sync_read( buffer , 0 , 1 ); 364 354 365 fatfs_dmsg("\n[INFO] %s : buffer loaded\n", __FUNCTION__ ); 366 367 assert( (error == 0) , __FUNCTION__ , 368 "cannot access boot record\n" ); 369 370 #if (CONFIG_FATFS_DEBUG > 1) 355 fatfs_dmsg("\n[DMSG] %s : buffer loaded\n", __FUNCTION__ ); 356 357 assert( (error == 0) , __FUNCTION__ , "cannot access boot record\n" ); 358 359 #if (CONFIG_FATFS_DEBUG & 0x1) 360 if( hal_time_stamp() > CONFIG_FATFS_DEBUG ) 361 { 371 362 uint32_t line; 372 363 uint32_t byte = 0; 373 printk("\n***** FAT boot record\n");364 printk("\n***** %s : FAT boot record\n", __FUNCTION__ ); 374 365 for ( line = 0 ; line < 32 ; line++ ) 375 366 { … … 383 374 byte += 16; 384 375 } 376 } 385 377 #endif 386 378 … … 423 415 kmem_free( &req ); 424 416 425 fatfs_dmsg("\n[ INFO] %s : boot record read & released\n",417 fatfs_dmsg("\n[DMSG] %s : boot record read & released\n", 426 418 __FUNCTION__ ); 427 419 … … 445 437 fatfs_ctx->fat_mapper_xp = XPTR( local_cxy , fat_mapper ); 446 438 447 fatfs_dmsg("\n[ INFO] %s : exit for fatfs_ctx = %x\n",439 fatfs_dmsg("\n[DMSG] %s : exit for fatfs_ctx = %x\n", 448 440 __FUNCTION__ , fatfs_ctx ); 449 441 … … 472 464 fatfs_ctx_t * fatfs_ctx; // pointer on local FATFS context 473 465 474 // get pointer on sourcemapper and page index from page descriptor466 // get pointer on mapper and page index from page descriptor 475 467 mapper = page->mapper; 476 468 index = page->index; 477 469 478 // get VFSinode pointer from mapper470 // get inode pointer from mapper 479 471 inode = mapper->inode; 480 472 481 fatfs_dmsg("\n[ INFO] %s : core[%x,%d] enter for inode %x / page_id = %d / mapper =%x\n",482 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , in ode , index, mapper );483 484 // get page to movebase address473 fatfs_dmsg("\n[DMSG] %s : core[%x,%d] enter for page %d / inode %x / mapper %x\n", 474 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , index , inode , mapper ); 475 476 // get page base address 485 477 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 486 478 buffer = (uint8_t *)GET_PTR( base_xp ); … … 496 488 lba = fatfs_ctx->fat_begin_lba + (count * index); 497 489 498 fatfs_dmsg("\n[ INFO] %s : core[%x,%d] access FAT on device / lba = %d\n",490 fatfs_dmsg("\n[DMSG] %s : core[%x,%d] access FAT on device / lba = %d\n", 499 491 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , lba ); 500 492 … … 519 511 else // FAT mapper access required 520 512 { 513 fatfs_dmsg("\n[DMSG] %s : core[%x,%d] must access FAT\n", 514 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 515 521 516 // get cluster and local pointer on FAT mapper 522 517 xptr_t fat_mapper_xp = fatfs_ctx->fat_mapper_xp; … … 545 540 } 546 541 542 fatfs_dmsg("\n[DMSG] %s : core[%x,%d] access device for inode %x / cluster_id %d\n", 543 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , inode , searched_cluster_id ); 544 547 545 // get lba from cluster_id 548 546 lba = fatfs_lba_from_cluster( fatfs_ctx , searched_cluster_id ); 549 550 fatfs_dmsg("\n[INFO] %s : core[%x,%d] access device for inode %x / cluster_id = %d\n",551 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , inode , first_cluster_id );552 547 553 548 // access device … … 558 553 } 559 554 560 fatfs_dmsg("\n[INFO] %s : core[%x,%d] exit for inode %x / page_id = %d / mapper = %x\n", 561 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , inode , index , mapper ); 555 fatfs_dmsg("\n[DMSG] %s : core[%x,%d] exit for page %d / inode %x / mapper %x\n", 556 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , index , inode , mapper ); 557 558 #if (CONFIG_FATFS_DEBUG & 0x1) 559 if( hal_time_stamp() > CONFIG_FATFS_DEBUG ) 560 { 561 uint32_t * tab = (uint32_t *)buffer; 562 uint32_t line , word; 563 printk("\n***** %s : First 64 words of loaded page\n", __FUNCTION__ ); 564 for( line = 0 ; line < 8 ; line++ ) 565 { 566 printk("%X : ", line ); 567 for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] ); 568 printk("\n"); 569 } 570 } 571 #endif 562 572 563 573 return 0; … … 574 584 // - scan the directory entries in each 4 Kbytes page 575 585 576 fatfs_dmsg("\n[ INFO] %s : enter for child <%s> in parent inode %l\n",586 fatfs_dmsg("\n[DMSG] %s : enter for child <%s> in parent inode %l\n", 577 587 __FUNCTION__ , name , XPTR( local_cxy , parent_inode ) ); 578 588 … … 612 622 base = (uint8_t *)GET_PTR( base_xp ); 613 623 614 #if (CONFIG_FATFS_DEBUG > 1) 624 #if (CONFIG_FATFS_DEBUG & 0x1) 625 if( hal_time_stamp() > CONFIG_FATFS_DEBUG ) 626 { 615 627 uint32_t * buf = (uint32_t *)base; 616 628 uint32_t line , word; 617 printk("\n***** first 16 dir entries for parent inode %x\n", parent_inode ); 629 printk("\n***** %s : First 16 dentries for parent inode %x\n", 630 __FUNCTION__ , parent_inode ); 618 631 for( line = 0 ; line < 16 ; line++ ) 619 632 { … … 622 635 printk("\n"); 623 636 } 637 } 624 638 #endif 625 626 627 639 // scan this page until end of directory, end of page, or name found 628 640 while( (offset < 4096) && (found == 0) ) … … 693 705 if ( found == -1 ) // found end of directory => failure 694 706 { 695 fatfs_dmsg("\n[ INFO] %s : exit / child <%s> not found in parent inode %l\n",707 fatfs_dmsg("\n[DMSG] %s : exit / child <%s> not found in parent inode %l\n", 696 708 __FUNCTION__ , name , XPTR( local_cxy , parent_inode ) ); 697 709 … … 711 723 hal_remote_sw( XPTR( child_cxy , &child_ptr->extend ) , cluster ); 712 724 713 fatfs_dmsg("\n[ INFO] %s : exit / child <%s> found in parent inode %l\n",725 fatfs_dmsg("\n[DMSG] %s : exit / child <%s> found in parent inode %l\n", 714 726 __FUNCTION__ , name , XPTR( local_cxy , parent_inode ) ); 715 727 -
trunk/kernel/vfs/fatfs.h
r401 r406 202 202 ****************************************************************************************** 203 203 * @ mapper : local pointer on the FAT mapper. 204 * @ first_cluster _id: index of the first FATFS cluster allocated to the file.205 * @ searched_page : index of searched page in thefile.204 * @ first_cluster : index of the first FATFS cluster allocated to the file. 205 * @ page_index : index of searched page in file. 206 206 * @ searched_cluster_id : [out] found FATFS cluster index. 207 207 * @ return 0 if success / return EIO if a FAT mapper miss cannot be solved. -
trunk/kernel/vfs/vfs.c
r401 r406 1707 1707 assert( (mapper != NULL) , __FUNCTION__ , "no mapper for page\n" ); 1708 1708 1709 vfs_dmsg("\n[ INFO] %s : enters for page %d in mapper / inode_xp %l\n",1710 __FUNCTION__ , page->index , XPTR( local_cxy , &mapper->inode ));1709 vfs_dmsg("\n[DMSG] %s : enters for page %d / inode_cxy = %x / inode_ptr = %x\n", 1710 __FUNCTION__ , page->index , local_cxy , mapper->inode ); 1711 1711 1712 1712 // get FS type … … 1733 1733 } 1734 1734 1735 vfs_dmsg("\n[ INFO] %s : exit for page %d in mapper / inode %l\n",1736 __FUNCTION__ , page->index , XPTR( local_cxy , &mapper->inode));1735 vfs_dmsg("\n[DMSG] %s : exit for page %d / inode_cxy = %x / inode_ptr = %x\n", 1736 __FUNCTION__ , page->index , local_cxy , mapper->inode ); 1737 1737 1738 1738 return error;
Note: See TracChangeset
for help on using the changeset viewer.