- Timestamp:
- Apr 4, 2018, 2:49:02 PM (7 years ago)
- Location:
- trunk
- Files:
-
- 64 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_exception.c
r437 r438 201 201 } 202 202 203 #if CONFIG_DEBUG_HAL_EXCEPTIONS203 #if DEBUG_HAL_EXCEPTIONS 204 204 uint32_t cycle = (uint32_t)hal_get_cycles(); 205 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )205 if( DEBUG_HAL_EXCEPTIONS < cycle ) 206 206 printk("\n[DBG] %s : thread %x enter / is_ins %d / %s / vaddr %x / cycle %d\n", 207 207 __FUNCTION__, this, is_ins, hal_mmu_exception_str(excp_code), bad_vaddr, cycle ); … … 229 229 { 230 230 231 #if CONFIG_DEBUG_HAL_EXCEPTIONS231 #if DEBUG_HAL_EXCEPTIONS 232 232 cycle = (uint32_t)hal_get_cycles(); 233 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )233 if( DEBUG_HAL_EXCEPTIONS < cycle ) 234 234 printk("\n[DBG] %s : thread %x exit / page-fault handled for vaddr = %x\n", 235 235 __FUNCTION__ , this , bad_vaddr ); … … 268 268 { 269 269 270 #if CONFIG_DEBUG_HAL_EXCEPTIONS270 #if DEBUG_HAL_EXCEPTIONS 271 271 cycle = (uint32_t)hal_get_cycles(); 272 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )272 if( DEBUG_HAL_EXCEPTIONS < cycle ) 273 273 printk("\n[DBG] %s : thread %x exit / copy-on-write handled for vaddr = %x\n", 274 274 __FUNCTION__ , this , bad_vaddr ); … … 390 390 excPC = uzone[UZ_EPC]; 391 391 392 #if CONFIG_DEBUG_HAL_EXCEPTIONS392 #if DEBUG_HAL_EXCEPTIONS 393 393 uint32_t cycle = (uint32_t)hal_get_cycles(); 394 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )394 if( DEBUG_HAL_EXCEPTIONS < cycle ) 395 395 printk("\n[DBG] %s : thread %x enter / core[%x,%d] / pid %x / epc %x / xcode %x / cycle %d\n", 396 396 __FUNCTION__, this, local_cxy, this->core->lid, this->process->pid, excPC, excCode, cycle ); … … 450 450 } 451 451 452 #if CONFIG_DEBUG_HAL_EXCEPTIONS452 #if DEBUG_HAL_EXCEPTIONS 453 453 cycle = (uint32_t)hal_get_cycles(); 454 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )454 if( DEBUG_HAL_EXCEPTIONS < cycle ) 455 455 printk("\n[DBG] %s : thread %x exit / core[%x,%d] / pid %x / epc %x / xcode %x / cycle %d\n", 456 456 __FUNCTION__, this, local_cxy, this->core->lid, this->process->pid, excPC, excCode, cycle ); -
trunk/hal/tsar_mips32/core/hal_gpt.c
r432 r438 132 132 xptr_t page_xp; 133 133 134 #if CONFIG_DEBUG_GPT_ACCESS134 #if DEBUG_GPT_ACCESS 135 135 uint32_t cycle = (uint32_t)hal_get_cycles; 136 if( CONFIG_DEBUG_GPT_ACCESS < cycle )136 if( DEBUG_GPT_ACCESS < cycle ) 137 137 printk("\n[DBG] %s : thread %x enter / cycle %d\n", 138 138 __FUNCTION__, CURRENT_THREAD, cycle ); … … 161 161 gpt->ppn = ppm_page2ppn( page_xp ); 162 162 163 #if CONFIG_DEBUG_GPT_ACCESS163 #if DEBUG_GPT_ACCESS 164 164 cycle = (uint32_t)hal_get_cycles; 165 if( CONFIG_DEBUG_GPT_ACCESS < cycle )165 if( DEBUG_GPT_ACCESS < cycle ) 166 166 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 167 167 __FUNCTION__, CURRENT_THREAD, cycle ); … … 340 340 uint32_t tsar_attr; // PTE attributes for TSAR MMU 341 341 342 #if CONFIG_DEBUG_GPT_ACCESS342 #if DEBUG_GPT_ACCESS 343 343 uint32_t cycle = (uint32_t)hal_get_cycles; 344 if( CONFIG_DEBUG_GPT_ACCESS < cycle )344 if( DEBUG_GPT_ACCESS < cycle ) 345 345 printk("\n[DBG] %s : thread %x enter / vpn %x / attr %x / ppn %x / cycle %d\n", 346 346 __FUNCTION__, CURRENT_THREAD, vpn, attr, ppn, cycle ); … … 357 357 tsar_attr = gpt2tsar( attr ); 358 358 359 #if ( CONFIG_DEBUG_GPT_ACCESS & 1)360 if( CONFIG_DEBUG_GPT_ACCESS < cycle )359 #if (DEBUG_GPT_ACCESS & 1) 360 if( DEBUG_GPT_ACCESS < cycle ) 361 361 printk("\n[DBG] %s : thread %x / vpn %x / &pt1 %x / tsar_attr %x\n", 362 362 __FUNCTION__, CURRENT_THREAD, vpn, pt1, tsar_attr ); … … 392 392 pte1 = *pte1_ptr; 393 393 394 #if ( CONFIG_DEBUG_GPT_ACCESS & 1)395 if( CONFIG_DEBUG_GPT_ACCESS < cycle )394 #if (DEBUG_GPT_ACCESS & 1) 395 if( DEBUG_GPT_ACCESS < cycle ) 396 396 printk("\n[DBG] %s : thread %x / vpn %x / current_pte1 %x\n", 397 397 __FUNCTION__, CURRENT_THREAD, vpn, pte1 ); … … 437 437 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 438 438 439 #if ( CONFIG_DEBUG_GPT_ACCESS & 1)440 if( CONFIG_DEBUG_GPT_ACCESS < cycle )439 #if (DEBUG_GPT_ACCESS & 1) 440 if( DEBUG_GPT_ACCESS < cycle ) 441 441 printk("\n[DBG] %s : thread %x / vpn %x / pte1 %x / &pt2 %x\n", 442 442 __FUNCTION__, CURRENT_THREAD, vpn, pte1, pt2 ); … … 452 452 hal_fence(); 453 453 454 #if CONFIG_DEBUG_GPT_ACCESS454 #if DEBUG_GPT_ACCESS 455 455 cycle = (uint32_t)hal_get_cycles; 456 if( CONFIG_DEBUG_GPT_ACCESS < cycle )456 if( DEBUG_GPT_ACCESS < cycle ) 457 457 printk("\n[DBG] %s : thread %x exit / vpn %x / pte2_attr %x / pte2_ppn %x / cycle %d\n", 458 458 __FUNCTION__, CURRENT_THREAD, vpn, pt2[2 * ix2], pt2[2 * ix2 + 1], cycle ); … … 762 762 ppn_t dst_pt2_ppn; 763 763 764 #if CONFIG_DEBUG_GPT_ACCESS764 #if DEBUG_GPT_ACCESS 765 765 uint32_t cycle = (uint32_t)hal_get_cycles; 766 if( CONFIG_DEBUG_GPT_ACCESS < cycle )766 if( DEBUG_GPT_ACCESS < cycle ) 767 767 printk("\n[DBG] %s : thread %x enter / vpn %x / cycle %d\n", 768 768 __FUNCTION__, CURRENT_THREAD, vpn, cycle ); … … 853 853 *ppn = src_pte2_ppn; 854 854 855 #if CONFIG_DEBUG_GPT_ACCESS855 #if DEBUG_GPT_ACCESS 856 856 cycle = (uint32_t)hal_get_cycles; 857 if( CONFIG_DEBUG_GPT_ACCESS < cycle )857 if( DEBUG_GPT_ACCESS < cycle ) 858 858 printk("\n[DBG] %s : thread %x exit / copy done for vpn %x / cycle %d\n", 859 859 __FUNCTION__, CURRENT_THREAD, vpn, cycle ); … … 870 870 *ppn = 0; 871 871 872 #if CONFIG_DEBUG_GPT_ACCESS872 #if DEBUG_GPT_ACCESS 873 873 cycle = (uint32_t)hal_get_cycles; 874 if( CONFIG_DEBUG_GPT_ACCESS < cycle )874 if( DEBUG_GPT_ACCESS < cycle ) 875 875 printk("\n[DBG] %s : thread %x exit / nothing done for vpn %x / cycle %d\n", 876 876 __FUNCTION__, CURRENT_THREAD, vpn, cycle ); -
trunk/hal/tsar_mips32/core/hal_kentry.S
r432 r438 200 200 mtc0 $3, $12 # set new c0_sr 201 201 202 #-------------------- ------203 #if CONFIG_DEBUG_HAL_KENTRY202 #-------------------- 203 #if DEBUG_HAL_KENTRY 204 204 205 205 # display "enter" message … … 349 349 sw $5, 8($4) # current uzone pointer <= previous 350 350 351 #------------------- -------352 #if CONFIG_DEBUG_HAL_KENTRY351 #------------------- 352 #if DEBUG_HAL_KENTRY 353 353 354 354 # display "exit" message -
trunk/hal/tsar_mips32/drivers/soclib_bdv.c
r437 r438 75 75 ioc_xp = (xptr_t)hal_remote_lwd( XPTR( th_cxy , &th_ptr->ioc_cmd.dev_xp ) ); 76 76 77 #if CONFIG_DEBUG_HAL_IOC_RX78 uint32_t cycle = (uint32_t)hal_get_cycles(); 79 if( ( CONFIG_DEBUG_HAL_IOC_RX < cycle) && (cmd_type != IOC_WRITE ) )77 #if DEBUG_HAL_IOC_RX 78 uint32_t cycle = (uint32_t)hal_get_cycles(); 79 if( (DEBUG_HAL_IOC_RX < cycle) && (cmd_type != IOC_WRITE ) ) 80 80 printk("\n[DBG] %s : thread %x enter for RX / cycle %d\n", 81 81 __FUNCTION__ , CURRENT_THREAD , cycle ); 82 82 #endif 83 83 84 #if CONFIG_DEBUG_HAL_IOC_TX85 uint32_t cycle = (uint32_t)hal_get_cycles(); 86 if( ( CONFIG_DEBUG_HAL_IOC_TX < cycle) && (cmd_type == IOC_WRITE) )84 #if DEBUG_HAL_IOC_TX 85 uint32_t cycle = (uint32_t)hal_get_cycles(); 86 if( (DEBUG_HAL_IOC_TX < cycle) && (cmd_type == IOC_WRITE) ) 87 87 printk("\n[DBG] %s : thread %x enter for TX / cycle %d\n", 88 88 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 152 152 } 153 153 154 #if CONFIG_DEBUG_HAL_IOC_RX154 #if DEBUG_HAL_IOC_RX 155 155 cycle = (uint32_t)hal_get_cycles(); 156 if( ( CONFIG_DEBUG_HAL_IOC_RX < cycle) && (cmd_type != TXT_WRITE) )156 if( (DEBUG_HAL_IOC_RX < cycle) && (cmd_type != TXT_WRITE) ) 157 157 printk("\n[DBG] %s : thread %x exit after RX / cycle %d\n", 158 158 __FUNCTION__ , CURRENT_THREAD , cycle ); 159 159 #endif 160 160 161 #if CONFIG_DEBUG_HAL_IOC_TX161 #if DEBUG_HAL_IOC_TX 162 162 cycle = (uint32_t)hal_get_cycles(); 163 if( ( CONFIG_DEBUG_HAL_IOC_TX < cycle) && (cmd_type == TXT_WRITE) )163 if( (DEBUG_HAL_IOC_TX < cycle) && (cmd_type == TXT_WRITE) ) 164 164 printk("\n[DBG] %s : thread %x exit after TX / cycle %d\n", 165 165 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 199 199 error = (status != BDV_READ_SUCCESS); 200 200 201 #if CONFIG_DEBUG_HAL_IOC_RX202 uint32_t cycle = (uint32_t)hal_get_cycles(); 203 if( CONFIG_DEBUG_HAL_IOC_RX < cycle )201 #if DEBUG_HAL_IOC_RX 202 uint32_t cycle = (uint32_t)hal_get_cycles(); 203 if( DEBUG_HAL_IOC_RX < cycle ) 204 204 printk("\n[DBG] %s : IOC_IRQ / RX transfer / client %x / server %x / cycle %d\n", 205 205 __FUNCTION__, client_ptr , chdev->server , cycle ); … … 211 211 error = (status != BDV_WRITE_SUCCESS); 212 212 213 #if CONFIG_DEBUG_HAL_IOC_TX214 uint32_t cycle = (uint32_t)hal_get_cycles(); 215 if( CONFIG_DEBUG_HAL_IOC_TX < cycle )213 #if DEBUG_HAL_IOC_TX 214 uint32_t cycle = (uint32_t)hal_get_cycles(); 215 if( DEBUG_HAL_IOC_TX < cycle ) 216 216 printk("\n[DBG] %s : IOC_IRQ / RX transfer / client %x / server %x / cycle %d\n", 217 217 __FUNCTION__, client_ptr , chdev->server , cycle ); -
trunk/hal/tsar_mips32/drivers/soclib_pic.c
r435 r438 130 130 &pti_status ); 131 131 132 #if CONFIG_DEBUG_HAL_IRQS132 #if DEBUG_HAL_IRQS 133 133 uint32_t cycle = (uint32_t)hal_get_cycles(); 134 if ( CONFIG_DEBUG_HAL_IRQS < cycle )134 if (DEBUG_HAL_IRQS < cycle ) 135 135 printk("\n[DBG] %s : core[%x,%d] enter / WTI = %x / HWI = %x / PTI = %x / cycle %d\n", 136 136 __FUNCTION__ , local_cxy , core->lid , wti_status , hwi_status , pti_status, cycle ); … … 143 143 index = wti_status - 1; 144 144 145 //////////////////////////////////////////////////////// 145 146 if( index < LOCAL_CLUSTER->cores_nr ) // it is an IPI 146 147 { 147 148 assert( (index == core->lid) , __FUNCTION__ , "illegal IPI index" ); 148 149 149 #if CONFIG_DEBUG_HAL_IRQS150 if ( CONFIG_DEBUG_HAL_IRQS < cycle )150 #if DEBUG_HAL_IRQS 151 if (DEBUG_HAL_IRQS < cycle ) 151 152 printk("\n[DBG] %s : core[%x,%d] received an IPI\n", __FUNCTION__ , local_cxy , core->lid ); 152 153 #endif 153 // acknowledge WTI(this require an XCU read)154 // acknowledge IRQ (this require an XCU read) 154 155 uint32_t ack = xcu_base[(XCU_WTI_REG << 5) | core->lid]; 155 156 156 157 // check RPC FIFO, and activate or create a RPC thread 157 158 // condition is always true, but we must use the ack value 158 159 if( ack + 1 ) rpc_check(); 159 160 } 160 else // it is an external device 161 //////////////////////////////////////////////////////////////// 162 else // it is an external IRQ 161 163 { 162 164 // get pointer on source chdev … … 171 173 172 174 // disable WTI in local XCU controller 173 uint32_t * base = soclib_pic_xcu_base(); 174 base[(XCU_MSK_WTI_DISABLE << 5) | core->lid] = 1 << core->lid; 175 xcu_base[(XCU_MSK_WTI_DISABLE << 5) | core->lid] = 1 << core->lid; 176 177 hal_fence(); 175 178 } 176 179 else // call relevant ISR 177 180 { 178 181 179 #if CONFIG_DEBUG_HAL_IRQS180 if ( CONFIG_DEBUG_HAL_IRQS < cycle )182 #if DEBUG_HAL_IRQS 183 if (DEBUG_HAL_IRQS < cycle ) 181 184 printk("\n[DBG] %s : core[%x,%d] received external WTI %d\n", 182 185 __FUNCTION__ , local_cxy , core->lid , index ); … … 188 191 } 189 192 190 if( hwi_status ) // pending HWI 193 ///////////////////////////////////////////////////////////// 194 if( hwi_status ) // It is an Internal IRQ 191 195 { 192 196 index = hwi_status - 1; … … 204 208 // disable HWI in local XCU controller 205 209 xcu_base[(XCU_MSK_HWI_DISABLE << 5) | core->lid] = 1 << core->lid; 210 211 hal_fence(); 206 212 } 207 213 else // call relevant ISR 208 214 { 209 215 210 #if CONFIG_DEBUG_HAL_IRQS211 if ( CONFIG_DEBUG_HAL_IRQS < cycle )216 #if DEBUG_HAL_IRQS 217 if (DEBUG_HAL_IRQS < cycle ) 212 218 printk("\n[DBG] %s : core[%x,%d] received HWI %d\n", 213 219 __FUNCTION__ , local_cxy , core->lid , index ); … … 217 223 } 218 224 } 219 220 if( pti_status ) // pending PTI225 /////////////////////////////////////////////////////// 226 if( pti_status ) // It is a Timer IRQ 221 227 { 222 228 index = pti_status - 1; … … 224 230 assert( (index == core->lid) , __FUNCTION__ , "unconsistent PTI index\n"); 225 231 226 #if CONFIG_DEBUG_HAL_IRQS227 if ( CONFIG_DEBUG_HAL_IRQS < cycle )232 #if DEBUG_HAL_IRQS 233 if (DEBUG_HAL_IRQS < cycle ) 228 234 printk("\n[DBG] %s : core[%x,%d] received PTI %d\n", 229 235 __FUNCTION__ , core->lid , local_cxy , index ); 230 236 #endif 231 // acknowledge PTI(this require a read access to XCU)237 // acknowledge IRQ (this require a read access to XCU) 232 238 uint32_t ack = xcu_base[(XCU_PTI_ACK << 5) | core->lid]; 233 239 … … 359 365 { 360 366 361 #if CONFIG_DEBUG_HAL_IRQS367 #if DEBUG_HAL_IRQS 362 368 uint32_t cycle = (uint32_t)hal_get_cycles(); 363 if( CONFIG_DEBUG_HAL_IRQS < cycle )369 if( DEBUG_HAL_IRQS < cycle ) 364 370 printk("\n[DBG] %s : thread %x enter for core[%x,%d] / cycle %d\n", 365 371 __FUNCTION__ , CURRENT_THREAD , local_cxy , lid , cycle ); … … 422 428 ((soclib_pic_core_t *)core->pic_extend)->wti_vector[wti_id] = src_chdev; 423 429 424 #if CONFIG_DEBUG_HAL_IRQS425 if( CONFIG_DEBUG_HAL_IRQS < cycle )430 #if DEBUG_HAL_IRQS 431 if( DEBUG_HAL_IRQS < cycle ) 426 432 printk("\n[DBG] %s : %s / channel = %d / rx = %d / hwi_id = %d / wti_id = %d / cluster = %x\n", 427 433 __FUNCTION__ , chdev_func_str( func ) , channel , is_rx , hwi_id , wti_id , local_cxy ); … … 444 450 ((soclib_pic_core_t *)core->pic_extend)->wti_vector[hwi_id] = src_chdev; 445 451 446 #if CONFIG_DEBUG_HAL_IRQS447 if( CONFIG_DEBUG_HAL_IRQS < cycle )452 #if DEBUG_HAL_IRQS 453 if( DEBUG_HAL_IRQS < cycle ) 448 454 printk("\n[DBG] %s : %s / channel = %d / hwi_id = %d / cluster = %x\n", 449 455 __FUNCTION__ , chdev_func_str( func ) , channel , hwi_id , local_cxy ); -
trunk/hal/tsar_mips32/drivers/soclib_tty.c
r436 r438 30 30 #include <hal_special.h> 31 31 32 #if ( CONFIG_DEBUG_SYS_READ & 1)32 #if (DEBUG_SYS_READ & 1) 33 33 extern uint32_t enter_tty_cmd_read; 34 34 extern uint32_t exit_tty_cmd_read; … … 38 38 #endif 39 39 40 #if ( CONFIG_DEBUG_SYS_WRITE & 1)40 #if (DEBUG_SYS_WRITE & 1) 41 41 extern uint32_t enter_tty_cmd_write; 42 42 extern uint32_t exit_tty_cmd_write; … … 115 115 xptr_t error_xp = XPTR( th_cxy , &th_ptr->txt_cmd.error ); 116 116 117 #if ( CONFIG_DEBUG_SYS_READ & 1)117 #if (DEBUG_SYS_READ & 1) 118 118 if( type == TXT_READ) enter_tty_cmd_read = (uint32_t)hal_get_cycles(); 119 119 #endif 120 120 121 #if ( CONFIG_DEBUG_SYS_WRITE & 1)121 #if (DEBUG_SYS_WRITE & 1) 122 122 if( type == TXT_WRITE) enter_tty_cmd_write = (uint32_t)hal_get_cycles(); 123 123 #endif 124 124 125 #if CONFIG_DEBUG_HAL_TXT_RX125 #if DEBUG_HAL_TXT_RX 126 126 uint32_t cycle = (uint32_t)hal_get_cycles(); 127 if( ( CONFIG_DEBUG_HAL_TXT_RX < cycle) && (type == TXT_READ) )127 if( (DEBUG_HAL_TXT_RX < cycle) && (type == TXT_READ) ) 128 128 printk("\n[DBG] %s : thread %x enter for RX / cycle %d\n", 129 129 __FUNCTION__ , CURRENT_THREAD , cycle ); 130 130 #endif 131 131 132 #if CONFIG_DEBUG_HAL_TXT_TX132 #if DEBUG_HAL_TXT_TX 133 133 uint32_t cycle = (uint32_t)hal_get_cycles(); 134 if( ( CONFIG_DEBUG_HAL_TXT_TX < cycle) && (type == TXT_WRITE) )134 if( (DEBUG_HAL_TXT_TX < cycle) && (type == TXT_WRITE) ) 135 135 printk("\n[DBG] %s : thread %x enter for TX / cycle %d\n", 136 136 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 238 238 } 239 239 240 #if CONFIG_DEBUG_HAL_TXT_RX240 #if DEBUG_HAL_TXT_RX 241 241 cycle = (uint32_t)hal_get_cycles(); 242 if( ( CONFIG_DEBUG_HAL_TXT_RX < cycle) && (type == TXT_READ) )242 if( (DEBUG_HAL_TXT_RX < cycle) && (type == TXT_READ) ) 243 243 printk("\n[DBG] %s : thread %x exit after RX / cycle %d\n", 244 244 __FUNCTION__ , CURRENT_THREAD , cycle ); 245 245 #endif 246 246 247 #if CONFIG_DEBUG_HAL_TXT_TX247 #if DEBUG_HAL_TXT_TX 248 248 cycle = (uint32_t)hal_get_cycles(); 249 if( ( CONFIG_DEBUG_HAL_TXT_TX < cycle) && (type == TXT_WRITE) )249 if( (DEBUG_HAL_TXT_TX < cycle) && (type == TXT_WRITE) ) 250 250 printk("\n[DBG] %s : thread %x exit after TX / cycle %d\n", 251 251 __FUNCTION__ , CURRENT_THREAD , cycle ); 252 252 #endif 253 253 254 #if ( CONFIG_DEBUG_SYS_READ & 1)254 #if (DEBUG_SYS_READ & 1) 255 255 if( type == TXT_READ ) exit_tty_cmd_read = (uint32_t)hal_get_cycles(); 256 256 #endif 257 257 258 #if ( CONFIG_DEBUG_SYS_WRITE & 1)258 #if (DEBUG_SYS_WRITE & 1) 259 259 if( type == TXT_WRITE ) exit_tty_cmd_write = (uint32_t)hal_get_cycles(); 260 260 #endif … … 288 288 server_lid = server->core->lid; 289 289 290 #if ( CONFIG_DEBUG_SYS_READ & 1)290 #if (DEBUG_SYS_READ & 1) 291 291 if( is_rx ) enter_tty_isr_read = (uint32_t)hal_get_cycles(); 292 292 #endif 293 293 294 #if ( CONFIG_DEBUG_SYS_WRITE & 1)294 #if (DEBUG_SYS_WRITE & 1) 295 295 if( is_rx == 0 ) enter_tty_isr_write = (uint32_t)hal_get_cycles(); 296 296 #endif 297 297 298 #if CONFIG_DEBUG_HAL_TXT_RX298 #if DEBUG_HAL_TXT_RX 299 299 uint32_t cycle = (uint32_t)hal_get_cycles(); 300 if( ( CONFIG_DEBUG_HAL_TXT_RX < cycle) && is_rx )300 if( (DEBUG_HAL_TXT_RX < cycle) && is_rx ) 301 301 printk("\n[DBG] %s : enter for RX / cycle %d\n", __FUNCTION__ , cycle ); 302 302 #endif 303 303 304 #if CONFIG_DEBUG_HAL_TXT_TX304 #if DEBUG_HAL_TXT_TX 305 305 uint32_t cycle = (uint32_t)hal_get_cycles(); 306 if( ( CONFIG_DEBUG_HAL_TXT_TX < cycle) && (is_rx == 0) )306 if( (DEBUG_HAL_TXT_TX < cycle) && (is_rx == 0) ) 307 307 printk("\n[DBG] %s : enter for TX / cycle %d\n", __FUNCTION__ , cycle ); 308 308 #endif … … 459 459 hal_fence(); 460 460 461 #if CONFIG_DEBUG_HAL_TXT_RX461 #if DEBUG_HAL_TXT_RX 462 462 cycle = (uint32_t)hal_get_cycles(); 463 if( ( CONFIG_DEBUG_HAL_TXT_RX < cycle) && is_rx )463 if( (DEBUG_HAL_TXT_RX < cycle) && is_rx ) 464 464 printk("\n[DBG] %s : exit after RX / cycle %d\n", __FUNCTION__, cycle ); 465 465 #endif 466 466 467 #if CONFIG_DEBUG_HAL_TXT_TX467 #if DEBUG_HAL_TXT_TX 468 468 cycle = (uint32_t)hal_get_cycles(); 469 if( ( CONFIG_DEBUG_HAL_TXT_TX < cycle) && (is_rx == 0) )469 if( (DEBUG_HAL_TXT_TX < cycle) && (is_rx == 0) ) 470 470 printk("\n[DBG] %s : exit after TX / cycle %d\n", __FUNCTION__, cycle ); 471 471 #endif 472 472 473 #if ( CONFIG_DEBUG_SYS_READ & 1)473 #if (DEBUG_SYS_READ & 1) 474 474 if( is_rx ) exit_tty_isr_read = (uint32_t)hal_get_cycles(); 475 475 #endif 476 476 477 #if ( CONFIG_DEBUG_SYS_WRITE & 1)477 #if (DEBUG_SYS_WRITE & 1) 478 478 if( is_rx == 0 ) exit_tty_isr_write = (uint32_t)hal_get_cycles(); 479 479 #endif -
trunk/kernel/devices/dev_fbf.c
r437 r438 177 177 { 178 178 179 #if CONFIG_DEBUG_DEV_FBF_RX179 #if DEBUG_DEV_FBF_RX 180 180 uint32_t cycle = (uint32_t)hal_get_cycle(); 181 if( CONFIG_DEBUG_DEV_FBF_RX < cycle )181 if( DEBUG_DEV_FBF_RX < cycle ) 182 182 printk("\n[DBG] %s : thread %x enter / process %x / vaddr %x / size %x\n", 183 183 __FUNCTION__ , this, this->process->pid , buffer , buf_paddr ); … … 186 186 return dev_fbf_access( false , buffer , length , offset ); 187 187 188 #if CONFIG_DEBUG_DEV_FBF_RX188 #if DEBUG_DEV_FBF_RX 189 189 cycle = (uint32_t)hal_get_cycle(); 190 if( CONFIG_DEBUG_DEV_FBF_RX < cycle )190 if( DEBUG_DEV_FBF_RX < cycle ) 191 191 printk("\n[DBG] %s : thread %x exit / process %x / vaddr %x / size %x\n", 192 192 __FUNCTION__ , this, this->process->pid , buffer , buf_paddr ); … … 201 201 { 202 202 203 #if CONFIG_DEBUG_DEV_FBF_TX203 #if DEBUG_DEV_FBF_TX 204 204 uint32_t cycle = (uint32_t)hal_get_cycle(); 205 if( CONFIG_DEBUG_DEV_FBF_TX < cycle )205 if( DEBUG_DEV_FBF_TX < cycle ) 206 206 printk("\n[DBG] %s : thread %x enter / process %x / vaddr %x / size %x\n", 207 207 __FUNCTION__ , this, this->process->pid , buffer , buf_paddr ); … … 210 210 return dev_fbf_access( true , buffer , length , offset ); 211 211 212 #if CONFIG_DEBUG_DEV_FBF_RX212 #if DEBUG_DEV_FBF_RX 213 213 cycle = (uint32_t)hal_get_cycle(); 214 if( CONFIG_DEBUG_DEV_FBF_RX < cycle )214 if( DEBUG_DEV_FBF_RX < cycle ) 215 215 printk("\n[DBG] %s : thread %x exit / process %x / vaddr %x / size %x\n", 216 216 __FUNCTION__ , this, this->process->pid , buffer , buf_paddr ); -
trunk/kernel/devices/dev_ioc.c
r437 r438 136 136 { 137 137 138 #if CONFIG_DEBUG_DEV_IOC_RX138 #if DEBUG_DEV_IOC_RX 139 139 uint32_t cycle = (uint32_t)hal_get_cycles(); 140 if( CONFIG_DEBUG_DEV_IOC_RX < cycle )140 if( DEBUG_DEV_IOC_RX < cycle ) 141 141 printk("\n[DBG] %s : thread %x enters / lba %x / buffer %x / cycle %d\n", 142 142 __FUNCTION__ , this, lba, buffer, cycle ); … … 145 145 return dev_ioc_access( IOC_READ , buffer , lba , count ); 146 146 147 #if CONFIG_DEBUG_DEV_IOC_RX147 #if DEBUG_DEV_IOC_RX 148 148 cycle = (uint32_t)hal_get_cycles(); 149 if( CONFIG_DEBUG_DEV_IOC_RX < cycle )149 if( DEBUG_DEV_IOC_RX < cycle ) 150 150 printk("\n[DBG] %s : thread %x exit / lba %x / buffer %x / cycle %d\n", 151 151 __FUNCTION__ , this, lba, buffer, cycle ); … … 160 160 { 161 161 162 #if CONFIG_DEBUG_DEV_IOC_TX162 #if DEBUG_DEV_IOC_TX 163 163 uint32_t cycle = (uint32_t)hal_get_cycles(); 164 if( CONFIG_DEBUG_DEV_IOC_TX < cycle )164 if( DEBUG_DEV_IOC_TX < cycle ) 165 165 printk("\n[DBG] %s : thread %x enters / lba %x / buffer %x / cycle %d\n", 166 166 __FUNCTION__ , this, lba, buffer, cycle ); … … 169 169 return dev_ioc_access( IOC_WRITE , buffer , lba , count ); 170 170 171 #if CONFIG_DEBUG_DEV_IOC_TX171 #if DEBUG_DEV_IOC_TX 172 172 cycle = (uint32_t)hal_get_cycles(); 173 if( CONFIG_DEBUG_DEV_IOC_TX < cycle )173 if( DEBUG_DEV_IOC_TX < cycle ) 174 174 printk("\n[DBG] %s : thread %x exit / lba %x / buffer %x / cycle %d\n", 175 175 __FUNCTION__ , this, lba, buffer, cycle ); … … 186 186 thread_t * this = CURRENT_THREAD; 187 187 188 #if CONFIG_DEBUG_DEV_IOC_RX188 #if DEBUG_DEV_IOC_RX 189 189 uint32_t cycle = (uint32_t)hal_get_cycles(); 190 if( CONFIG_DEBUG_DEV_IOC_RX < cycle )190 if( DEBUG_DEV_IOC_RX < cycle ) 191 191 printk("\n[DBG] %s : thread %x enters / lba %x / buffer %x / cycle %d\n", 192 192 __FUNCTION__ , this, lba, buffer, cycle ); … … 227 227 dev_pic_enable_irq( lid , ioc_xp ); 228 228 229 #if CONFIG_DEBUG_DEV_IOC_RX229 #if DEBUG_DEV_IOC_RX 230 230 cycle = (uint32_t)hal_get_cycles(); 231 if( CONFIG_DEBUG_DEV_IOC_RX < cycle )231 if( DEBUG_DEV_IOC_RX < cycle ) 232 232 printk("\n[DBG] %s : thread %x exit / lba %x / buffer %x / cycle %d\n", 233 233 __FUNCTION__ , this, lba, buffer, cycle ); -
trunk/kernel/devices/dev_mmc.c
r437 r438 99 99 thread_t * this = CURRENT_THREAD; 100 100 101 #if CONFIG_DEBUG_DEV_MMC101 #if DEBUG_DEV_MMC 102 102 uint32_t cycle = (uint32_t)hal_get_cycles(); 103 if( CONFIG_DEBUG_DEV_MMC < cycle )103 if( DEBUG_DEV_MMC < cycle ) 104 104 printk("\n[DBG] %s : thread %x enters / process %x / buf_xp = %l\n", 105 105 __FUNCTION__, this, this->process->pid , buf_xp ); … … 128 128 error = dev_mmc_access( this ); 129 129 130 #if CONFIG_DEBUG_DEV_MMC130 #if DEBUG_DEV_MMC 131 131 cycle = (uint32_t)hal_get_cycles(); 132 if( CONFIG_DEBUG_DEV_MMC < cycle )132 if( DEBUG_DEV_MMC < cycle ) 133 133 printk("\n[DBG] %s : thread %x exit / process %x / buf_xp = %l\n", 134 134 __FUNCTION__, this, this->process->pid , buf_xp ); … … 147 147 thread_t * this = CURRENT_THREAD; 148 148 149 #if CONFIG_DEBUG_DEV_MMC149 #if DEBUG_DEV_MMC 150 150 uint32_t cycle = (uint32_t)hal_get_cycles(); 151 if( CONFIG_DEBUG_DEV_MMC < cycle )151 if( DEBUG_DEV_MMC < cycle ) 152 152 printk("\n[DBG] %s : thread %x enters / process %x / buf_xp = %l\n", 153 153 __FUNCTION__, this, this->process->pid , buf_xp ); … … 176 176 error = dev_mmc_access( this ); 177 177 178 #if CONFIG_DEBUG_DEV_MMC178 #if DEBUG_DEV_MMC 179 179 cycle = (uint32_t)hal_get_cycles(); 180 if( CONFIG_DEBUG_DEV_MMC < cycle )180 if( DEBUG_DEV_MMC < cycle ) 181 181 printk("\n[DBG] %s : thread %x exit / process %x / buf_xp = %l\n", 182 182 __FUNCTION__, this, this->process->pid , buf_xp ); -
trunk/kernel/devices/dev_nic.c
r437 r438 99 99 core_t * core = thread_ptr->core; 100 100 101 #if CONFIG_DEBUG_DEV_NIC_RX101 #if DEBUG_DEV_NIC_RX 102 102 uint32_t cycle = (uint32_t)hal_get_cycles(); 103 if( CONFIG_DEBUG_DEV_NIC_RX < cycle )103 if( DEBUG_DEV_NIC_RX < cycle ) 104 104 printk("\n[DBG] %s : thread %x enters for packet %x in cluster %x\n", 105 105 __FUNCTION__ , thread_ptr , pkd , local_cxy ); … … 153 153 pkd->length = thread_ptr->nic_cmd.length; 154 154 155 #if CONFIG_DEBUG_DEV_NIC_RX155 #if DEBUG_DEV_NIC_RX 156 156 cycle = (uint32_t)hal_get_cycles(); 157 if( CONFIG_DEBUG_DEV_NIC_RX < cycle )157 if( DEBUG_DEV_NIC_RX < cycle ) 158 158 printk("\n[DBG] %s : thread %x exit for packet %x in cluster %x\n", 159 159 __FUNCTION__ , thread_ptr , pkd , local_cxy ); … … 177 177 core_t * core = thread_ptr->core; 178 178 179 #if CONFIG_DEBUG_DEV_NIC_RX179 #if DEBUG_DEV_NIC_RX 180 180 uint32_t cycle = (uint32_t)hal_get_cycles(); 181 if( CONFIG_DEBUG_DEV_NIC_RX < cycle )181 if( DEBUG_DEV_NIC_RX < cycle ) 182 182 printk("\n[DBG] %s : thread %x enters for packet %x in cluster %x\n", 183 183 __FUNCTION__ , thread_ptr , pkd , local_cxy ); … … 229 229 if( error ) return error; 230 230 231 #if CONFIG_DEBUG_DEV_NIC_RX231 #if DEBUG_DEV_NIC_RX 232 232 cycle = (uint32_t)hal_get_cycles(); 233 if( CONFIG_DEBUG_DEV_NIC_RX < cycle )233 if( DEBUG_DEV_NIC_RX < cycle ) 234 234 printk("\n[DBG] %s : thread %x exit for packet %x in cluster %x\n", 235 235 __FUNCTION__ , thread_ptr , pkd , local_cxy ); -
trunk/kernel/devices/dev_pic.c
r437 r438 86 86 { 87 87 88 #if CONFIG_DEBUG_DEV_PIC89 uint32_t cycle = (uint32_t)hal_get_cycles(); 90 if( CONFIG_DEBUG_DEV_PIC < cycle )88 #if DEBUG_DEV_PIC 89 uint32_t cycle = (uint32_t)hal_get_cycles(); 90 if( DEBUG_DEV_PIC < cycle ) 91 91 printk("\n[DBG] %s : core[%x,%d] / src_chdev_cxy %x / src_chdev_ptr %x / cycle %d\n", 92 92 __FUNCTION__, local_cxy, lid, GET_CXY(src_chdev_xp), GET_PTR(src_chdev_xp), cycle ); … … 109 109 { 110 110 111 #if CONFIG_DEBUG_DEV_PIC112 uint32_t cycle = (uint32_t)hal_get_cycles(); 113 if( CONFIG_DEBUG_DEV_PIC < cycle )111 #if DEBUG_DEV_PIC 112 uint32_t cycle = (uint32_t)hal_get_cycles(); 113 if( DEBUG_DEV_PIC < cycle ) 114 114 printk("\n[DBG] %s : core[%x,%d] / src_chdev_cxy %x / src_chdev_ptr %x / cycle %d\n", 115 115 __FUNCTION__, local_cxy, lid, GET_CXY(src_chdev_xp), GET_PTR(src_chdev_xp), cycle ); … … 131 131 { 132 132 133 #if CONFIG_DEBUG_DEV_PIC134 uint32_t cycle = (uint32_t)hal_get_cycles(); 135 if( CONFIG_DEBUG_DEV_PIC < cycle )133 #if DEBUG_DEV_PIC 134 uint32_t cycle = (uint32_t)hal_get_cycles(); 135 if( DEBUG_DEV_PIC < cycle ) 136 136 printk("\n[DBG] %s : core[%x,%d] / period %d / cycle %d\n", 137 137 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , period, cycle ); … … 153 153 { 154 154 155 #if CONFIG_DEBUG_DEV_PIC156 uint32_t cycle = (uint32_t)hal_get_cycles(); 157 if( CONFIG_DEBUG_DEV_PIC < cycle )155 #if DEBUG_DEV_PIC 156 uint32_t cycle = (uint32_t)hal_get_cycles(); 157 if( DEBUG_DEV_PIC < cycle ) 158 158 printk("\n[DBG] %s : core[%x,%d] / cycle %d\n", 159 159 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , cycle ); … … 176 176 { 177 177 178 #if CONFIG_DEBUG_DEV_PIC179 uint32_t cycle = (uint32_t)hal_get_cycles(); 180 if( CONFIG_DEBUG_DEV_PIC < cycle )178 #if DEBUG_DEV_PIC 179 uint32_t cycle = (uint32_t)hal_get_cycles(); 180 if( DEBUG_DEV_PIC < cycle ) 181 181 printk("\n[DBG] %s : src_core[%x,%d] / dst_core[%x,%d] / cycle %d\n", 182 182 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, cycle ); … … 198 198 { 199 199 200 #if CONFIG_DEBUG_DEV_PIC201 uint32_t cycle = (uint32_t)hal_get_cycles(); 202 if( CONFIG_DEBUG_DEV_PIC < cycle )200 #if DEBUG_DEV_PIC 201 uint32_t cycle = (uint32_t)hal_get_cycles(); 202 if( DEBUG_DEV_PIC < cycle ) 203 203 printk("\n[DBG] %s : core[%x,%d] / cycle %d\n", 204 204 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); -
trunk/kernel/devices/dev_txt.c
r436 r438 38 38 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 39 39 40 #if ( CONFIG_DEBUG_SYS_READ & 1)40 #if (DEBUG_SYS_READ & 1) 41 41 extern uint32_t enter_txt_read; 42 42 extern uint32_t exit_txt_read; 43 43 #endif 44 44 45 #if ( CONFIG_DEBUG_SYS_WRITE & 1)45 #if (DEBUG_SYS_WRITE & 1) 46 46 extern uint32_t enter_txt_write; 47 47 extern uint32_t exit_txt_write; … … 161 161 { 162 162 163 #if ( CONFIG_DEBUG_SYS_WRITE & 1)163 #if (DEBUG_SYS_WRITE & 1) 164 164 enter_txt_write = hal_time_stamp(); 165 165 #endif 166 166 167 #if CONFIG_DEBUG_DEV_TXT_TX167 #if DEBUG_DEV_TXT_TX 168 168 uint32_t cycle = (uint32_t)hal_get_cycles(); 169 if( CONFIG_DEBUG_DEV_TXT_TX < cycle )169 if( DEBUG_DEV_TXT_TX < cycle ) 170 170 printk("\n[DBG] %s : thread %x enters / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 171 171 #endif … … 173 173 return dev_txt_access( TXT_WRITE , channel , buffer , count ); 174 174 175 #if CONFIG_DEBUG_DEV_TXT_TX175 #if DEBUG_DEV_TXT_TX 176 176 cycle = (uint32_t)hal_get_cycles(); 177 if( CONFIG_DEBUG_DEV_TXT_TX < cycle )177 if( DEBUG_DEV_TXT_TX < cycle ) 178 178 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 179 179 #endif 180 180 181 #if ( CONFIG_DEBUG_SYS_WRITE & 1)181 #if (DEBUG_SYS_WRITE & 1) 182 182 exit_txt_write = hal_time_stamp(); 183 183 #endif … … 190 190 { 191 191 192 #if ( CONFIG_DEBUG_SYS_READ & 1)192 #if (DEBUG_SYS_READ & 1) 193 193 enter_txt_read = hal_time_stamp(); 194 194 #endif 195 195 196 #if CONFIG_DEBUG_DEV_TXT_RX196 #if DEBUG_DEV_TXT_RX 197 197 uint32_t cycle = (uint32_t)hal_get_cycles(); 198 if( CONFIG_DEBUG_DEV_TXT_RX < cycle )198 if( DEBUG_DEV_TXT_RX < cycle ) 199 199 printk("\n[DBG] %s : thread %x enters / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 200 200 #endif … … 202 202 return dev_txt_access( TXT_READ , channel , buffer , 1 ); 203 203 204 #if CONFIG_DEBUG_DEV_TXT_RX204 #if DEBUG_DEV_TXT_RX 205 205 cycle = (uint32_t)hal_get_cycles(); 206 if( CONFIG_DEBUG_DEV_TXT_RX < cycle )206 if( DEBUG_DEV_TXT_RX < cycle ) 207 207 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 208 208 #endif 209 209 210 #if ( CONFIG_DEBUG_SYS_READ & 1)210 #if (DEBUG_SYS_READ & 1) 211 211 exit_txt_read = hal_time_stamp(); 212 212 #endif -
trunk/kernel/fs/devfs.c
r437 r438 42 42 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 43 43 44 #if ( CONFIG_DEBUG_SYS_READ & 1)44 #if (DEBUG_SYS_READ & 1) 45 45 extern uint32_t enter_devfs_read; 46 46 extern uint32_t exit_devfs_read; 47 47 #endif 48 48 49 #if ( CONFIG_DEBUG_SYS_WRITE & 1)49 #if (DEBUG_SYS_WRITE & 1) 50 50 extern uint32_t enter_devfs_write; 51 51 extern uint32_t exit_devfs_write; … … 92 92 error_t error; 93 93 94 #if CONFIG_DEBUG_DEVFS_INIT94 #if DEBUG_DEVFS_INIT 95 95 uint32_t cycle = (uint32_t)hal_get_cycles(); 96 if( CONFIG_DEBUG_DEVFS_INIT < cycle )96 if( DEBUG_DEVFS_INIT < cycle ) 97 97 printk("\n[DBG] %s : thread %x enter at cycle %d\n", 98 98 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 110 110 assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" ); 111 111 112 #if( CONFIG_DEBUG_DEVFS_INIT & 1 )113 if( CONFIG_DEBUG_DEVFS_INIT < cycle )112 #if( DEBUG_DEVFS_INIT & 1 ) 113 if( DEBUG_DEVFS_INIT < cycle ) 114 114 printk("\n[DBG] %s : created <dev> inode at cycle %d\n", __FUNCTION__, cycle ); 115 115 #endif … … 126 126 assert( (error == 0) , __FUNCTION__ , "cannot create <external>\n" ); 127 127 128 #if CONFIG_DEBUG_DEVFS_INIT128 #if DEBUG_DEVFS_INIT 129 129 cycle = (uint32_t)hal_get_cycles(); 130 if( CONFIG_DEBUG_DEVFS_INIT < cycle )130 if( DEBUG_DEVFS_INIT < cycle ) 131 131 printk("\n[DBG] %s : thread %x exit at cycle %d\n", 132 132 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 147 147 uint32_t channel; 148 148 149 #if CONFIG_DEBUG_DEVFS_INIT149 #if DEBUG_DEVFS_INIT 150 150 uint32_t cycle = (uint32_t)hal_get_cycles(); 151 if( CONFIG_DEBUG_DEVFS_INIT < cycle )151 if( DEBUG_DEVFS_INIT < cycle ) 152 152 printk("\n[DBG] %s : thread %x enter at cycle %d\n", 153 153 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 357 357 } 358 358 359 #if CONFIG_DEBUG_DEVFS_INIT359 #if DEBUG_DEVFS_INIT 360 360 cycle = (uint32_t)hal_get_cycles(); 361 if( CONFIG_DEBUG_DEVFS_INIT < cycle )361 if( DEBUG_DEVFS_INIT < cycle ) 362 362 printk("\n[DBG] %s : thread %x exit at cycle %d\n", 363 363 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 385 385 char k_buf[CONFIG_TXT_KBUF_SIZE]; // local kernel buffer 386 386 387 #if ( CONFIG_DEBUG_SYS_READ & 1)387 #if (DEBUG_SYS_READ & 1) 388 388 enter_devfs_read = hal_time_stamp(); 389 389 #endif 390 390 391 #if ( CONFIG_DEBUG_SYS_WRITE & 1)391 #if (DEBUG_SYS_WRITE & 1) 392 392 enter_devfs_write = hal_time_stamp(); 393 393 #endif 394 394 395 #if CONFIG_DEBUG_DEVFS_MOVE395 #if DEBUG_DEVFS_MOVE 396 396 uint32_t cycle = (uint32_t)hal_get_cycles(); 397 if( CONFIG_DEBUG_DEVFS_MOVE < cycle )397 if( DEBUG_DEVFS_MOVE < cycle ) 398 398 printk("\n[DBG] %s : thread %x enter / to_mem %d / cycle %d\n", 399 399 __FUNCTION__ , CURRENT_THREAD , to_buffer , cycle ); … … 431 431 } 432 432 433 #if CONFIG_DEBUG_DEVFS_MOVE433 #if DEBUG_DEVFS_MOVE 434 434 cycle = (uint32_t)hal_get_cycles(); 435 if( CONFIG_DEBUG_DEVFS_MOVE < cycle )435 if( DEBUG_DEVFS_MOVE < cycle ) 436 436 printk("\n[DBG] %s : thread %x exit / to_mem %d / cycle %d\n", 437 437 __FUNCTION__ , CURRENT_THREAD , to_buffer / cycle ); 438 438 #endif 439 439 440 #if ( CONFIG_DEBUG_SYS_READ & 1)440 #if (DEBUG_SYS_READ & 1) 441 441 exit_devfs_read = hal_time_stamp(); 442 442 #endif … … 455 455 { 456 456 457 #if CONFIG_DEBUG_DEVFS_MOVE457 #if DEBUG_DEVFS_MOVE 458 458 cycle = (uint32_t)hal_get_cycles(); 459 if( CONFIG_DEBUG_DEVFS_MOVE < cycle )459 if( DEBUG_DEVFS_MOVE < cycle ) 460 460 printk("\n[DBG] %s : thread %x exit / to_mem %d / cycle %d\n", 461 461 __FUNCTION__ , CURRENT_THREAD , to_buffer / cycle ); 462 462 #endif 463 463 464 #if ( CONFIG_DEBUG_SYS_WRITE & 1)464 #if (DEBUG_SYS_WRITE & 1) 465 465 exit_devfs_write = hal_time_stamp(); 466 466 #endif -
trunk/kernel/fs/fatfs.c
r435 r438 262 262 "no FAT access required for first page\n"); 263 263 264 #if CONFIG_DEBUG_FATFS_GET_CLUSTER264 #if DEBUG_FATFS_GET_CLUSTER 265 265 uint32_t cycle = (uint32_t)hal_get_cycles(); 266 if( CONFIG_DEBUG_FATFS_GET_CLUSTER < cycle )266 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 267 267 printk("\n[DBG] %s : thread %x enter / first_cluster_id %d / searched_index / cycle %d\n", 268 268 __FUNCTION__, CURRENT_THREAD, first_cluster_id, searched_page_index, cycle ); … … 293 293 next_cluster_id = current_page_buffer[current_page_offset]; 294 294 295 #if ( CONFIG_DEBUG_FATFS_GET_CLUSTER & 1)296 if( CONFIG_DEBUG_FATFS_GET_CLUSTER < cycle )295 #if (DEBUG_FATFS_GET_CLUSTER & 1) 296 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 297 297 printk("\n[DBG] %s : traverse FAT / current_page_index = %d\n" 298 298 "current_page_offset = %d / next_cluster_id = %d\n", … … 308 308 if( next_cluster_id == 0xFFFFFFFF ) return EIO; 309 309 310 #if CONFIG_DEBUG_FATFS_GET_CLUSTER310 #if DEBUG_FATFS_GET_CLUSTER 311 311 cycle = (uint32_t)hal_get_cycles(); 312 if( CONFIG_DEBUG_FATFS_GET_CLUSTER < cycle )312 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 313 313 printk("\n[DBG] %s : thread %x exit / searched_cluster_id = %d / cycle %d\n", 314 314 __FUNCTION__, CURRENT_THREAD, next_cluster_id / cycle ); … … 345 345 uint8_t * buffer; 346 346 347 #if CONFIG_DEBUG_FATFS_INIT347 #if DEBUG_FATFS_INIT 348 348 uint32_t cycle = (uint32_t)hal_get_cycles(); 349 if( CONFIG_DEBUG_FATFS_INIT < cycle )349 if( DEBUG_FATFS_INIT < cycle ) 350 350 printk("\n[DBG] %s : thread %x enter for fatfs_ctx = %x / cycle %d\n", 351 351 __FUNCTION__ , CURRENT_THREAD , fatfs_ctx , cycle ); … … 370 370 "cannot access boot record\n" ); 371 371 372 #if ( CONFIG_DEBUG_FATFS_INIT & 0x1)373 if( CONFIG_DEBUG_FATFS_INIT < cycle )372 #if (DEBUG_FATFS_INIT & 0x1) 373 if( DEBUG_FATFS_INIT < cycle ) 374 374 { 375 375 uint32_t line; … … 448 448 fatfs_ctx->fat_mapper_xp = XPTR( local_cxy , fat_mapper ); 449 449 450 #if CONFIG_DEBUG_FATFS_INIT450 #if DEBUG_FATFS_INIT 451 451 cycle = (uint32_t)hal_get_cycles(); 452 if( CONFIG_DEBUG_FATFS_INIT < cycle )452 if( DEBUG_FATFS_INIT < cycle ) 453 453 printk("\n[DBG] %s : thread %x exit for fatfs_ctx = %x / cycle %d\n", 454 454 __FUNCTION__ , CURRENT_THREAD , fatfs_ctx , cycle ); … … 486 486 inode = mapper->inode; 487 487 488 #if CONFIG_DEBUG_FATFS_MOVE488 #if DEBUG_FATFS_MOVE 489 489 uint32_t cycle = (uint32_t)hal_get_cycles(); 490 if( CONFIG_DEBUG_FATFS_MOVE < cycle )490 if( DEBUG_FATFS_MOVE < cycle ) 491 491 printk("\n[DBG] %s : thread %x enter / page %d / inode %x / mapper %x / cycle %d\n", 492 492 __FUNCTION__ , CURRENT_THREAD , index , inode , mapper , cycle ); … … 507 507 lba = fatfs_ctx->fat_begin_lba + (count * index); 508 508 509 #if ( CONFIG_DEBUG_FATFS_MOVE & 0x1)510 if( CONFIG_DEBUG_FATFS_MOVE < cycle )509 #if (DEBUG_FATFS_MOVE & 0x1) 510 if( DEBUG_FATFS_MOVE < cycle ) 511 511 printk("\n[DBG] %s : access FAT on device / lba = %d\n", __FUNCTION__ , lba ); 512 512 #endif … … 541 541 { 542 542 543 #if ( CONFIG_DEBUG_FATFS_MOVE & 0x1)544 if( CONFIG_DEBUG_FATFS_MOVE < cycle )543 #if (DEBUG_FATFS_MOVE & 0x1) 544 if( DEBUG_FATFS_MOVE < cycle ) 545 545 print("\n[DBG] %s : access local FAT mapper\n" 546 546 "fat_mapper_cxy = %x / fat_mapper_ptr = %x / first_cluster_id = %d / index = %d\n", … … 555 555 { 556 556 557 #if ( CONFIG_DEBUG_FATFS_MOVE & 0x1)558 if( CONFIG_DEBUG_FATFS_MOVE < cycle )557 #if (DEBUG_FATFS_MOVE & 0x1) 558 if( DEBUG_FATFS_MOVE < cycle ) 559 559 printk("\n[DBG] %s : access remote FAT mapper\n" 560 560 "fat_mapper_cxy = %x / fat_mapper_ptr = %x / first_cluster_id = %d / index = %d\n", … … 572 572 } 573 573 574 #if ( CONFIG_DEBUG_FATFS_MOVE & 0x1)575 if( CONFIG_DEBUG_FATFS_MOVE < cycle )574 #if (DEBUG_FATFS_MOVE & 0x1) 575 if( DEBUG_FATFS_MOVE < cycle ) 576 576 printk("\n[DBG] %s : access device for inode %x / cluster_id %d\n", 577 577 __FUNCTION__ , inode , searched_cluster_id ); … … 588 588 } 589 589 590 #if CONFIG_DEBUG_FATFS_MOVE590 #if DEBUG_FATFS_MOVE 591 591 cycle = (uint32_t)hal_get_cycles(); 592 if( CONFIG_DEBUG_FATFS_MOVE < cycle )592 if( DEBUG_FATFS_MOVE < cycle ) 593 593 printk("\n[DBG] %s : thread %x exit / page %d / inode %x / mapper %x / cycle %d\n", 594 594 __FUNCTION__ , CURRENT_THREAD , index , inode , mapper , cycle ); 595 595 #endif 596 596 597 #if ( CONFIG_DEBUG_FATFS_MOVE & 0x1)598 if( CONFIG_DEBUG_FATFS_MOVE < cycle )597 #if (DEBUG_FATFS_MOVE & 0x1) 598 if( DEBUG_FATFS_MOVE < cycle ) 599 599 { 600 600 uint32_t * tab = (uint32_t *)buffer; … … 623 623 // - scan the directory entries in each 4 Kbytes page 624 624 625 #if CONFIG_DEBUG_FATFS_LOAD625 #if DEBUG_FATFS_LOAD 626 626 uint32_t cycle = (uint32_t)hal_get_cycles(); 627 if( CONFIG_DEBUG_FATFS_LOAD < cycle )627 if( DEBUG_FATFS_LOAD < cycle ) 628 628 printk("\n[DBG] %s : thread %x enter for child <%s> in parent inode %x / cycle %d\n", 629 629 __FUNCTION__ , CURRENT_THREAD , name , parent_inode , cycle ); … … 665 665 base = (uint8_t *)GET_PTR( base_xp ); 666 666 667 #if ( CONFIG_DEBUG_FATFS_LOAD & 0x1)668 if( CONFIG_DEBUG_FATFS_LOAD < cycle )667 #if (DEBUG_FATFS_LOAD & 0x1) 668 if( DEBUG_FATFS_LOAD < cycle ) 669 669 { 670 670 uint32_t * buf = (uint32_t *)base; … … 749 749 { 750 750 751 #if CONFIG_DEBUG_FATFS_LOAD751 #if DEBUG_FATFS_LOAD 752 752 cycle = (uint32_t)hal_get_cycles(); 753 if( CONFIG_DEBUG_FATFS_LOAD < cycle )753 if( DEBUG_FATFS_LOAD < cycle ) 754 754 printk("\n[DBG] %s : thread %x exit / child <%s> not found / cycle %d\n", 755 755 __FUNCTION__ , CURRENT_THREAD, name, cycle ); … … 771 771 hal_remote_sw( XPTR( child_cxy , &child_ptr->extend ) , cluster ); 772 772 773 #if CONFIG_DEBUG_FATFS_LOAD773 #if DEBUG_FATFS_LOAD 774 774 cycle = (uint32_t)hal_get_cycles(); 775 if( CONFIG_DEBUG_FATFS_LOAD < cycle )775 if( DEBUG_FATFS_LOAD < cycle ) 776 776 printk("\n[DBG] %s : thread %x exit / child <%s> loaded / cycle %d\n", 777 777 __FUNCTION__ , CURRENT_THREAD, name, cycle ); -
trunk/kernel/fs/vfs.c
r437 r438 157 157 error_t error; 158 158 159 #if CONFIG_DEBUG_VFS_INODE_CREATE159 #if DEBUG_VFS_INODE_CREATE 160 160 uint32_t cycle = (uint32_t)hal_get_cycles(); 161 if( CONFIG_DEBUG_VFS_INODE_CREATE < cycle )161 if( DEBUG_VFS_INODE_CREATE < cycle ) 162 162 printk("\n[DBG] %s : thread %x enter / dentry = %x in cluster %x / cycle %d\n", 163 163 __FUNCTION__, CURRENT_THREAD, GET_PTR(dentry_xp), GET_CXY(dentry_xp), cycle ); … … 234 234 remote_spinlock_init( XPTR( local_cxy , &inode->main_lock ) ); 235 235 236 #if CONFIG_DEBUG_VFS_INODE_CREATE236 #if DEBUG_VFS_INODE_CREATE 237 237 cycle = (uint32_t)hal_get_cycles(); 238 if( CONFIG_DEBUG_VFS_INODE_CREATE < cycle )238 if( DEBUG_VFS_INODE_CREATE < cycle ) 239 239 printk("\n[DBG] %s : thread %x exit / inode = %x in cluster %x / cycle %d\n", 240 240 __FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle ); … … 272 272 { 273 273 274 #if CONFIG_DEBUG_VFS_INODE_LOAD274 #if DEBUG_VFS_INODE_LOAD 275 275 uint32_t cycle = (uint32_t)hal_get_cycles(); 276 if( CONFIG_DEBUG_VFS_INODE_LOAD < cycle )276 if( DEBUG_VFS_INODE_LOAD < cycle ) 277 277 printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n", 278 278 __FUNCTION__, CURRENT_THREAD , name , cycle ); … … 306 306 } 307 307 308 #if CONFIG_DEBUG_VFS_INODE_LOAD308 #if DEBUG_VFS_INODE_LOAD 309 309 cycle = (uint32_t)hal_get_cycles(); 310 if( CONFIG_DEBUG_VFS_INODE_LOAD < cycle )310 if( DEBUG_VFS_INODE_LOAD < cycle ) 311 311 printk("\n[DBG] %s : thread %x exit for <%s> / cycle %d\n", 312 312 __FUNCTION__, CURRENT_THREAD , name , cycle ); … … 433 433 kmem_req_t req; // request to kernel memory allocator 434 434 435 #if CONFIG_DEBUG_VFS_DENTRY_CREATE435 #if DEBUG_VFS_DENTRY_CREATE 436 436 uint32_t cycle = (uint32_t)hal_get_cycles(); 437 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )437 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 438 438 printk("\n[DBG] %s : thread %x enter for <%s> / parent_inode %x / cycle %d\n", 439 439 __FUNCTION__, CURRENT_THREAD , name , parent , cycle ); … … 456 456 { 457 457 458 #if CONFIG_DEBUG_SYSCALLS_ERROR458 #if DEBUG_SYSCALLS_ERROR 459 459 printk("\n[ERROR] in %s : name <name> too long\n", __FUNCTION__ , name ); 460 460 #endif … … 471 471 { 472 472 473 #if CONFIG_DEBUG_SYSCALLS_ERROR473 #if DEBUG_SYSCALLS_ERROR 474 474 printk("\n[ERROR] in %s : cannot allocate dentry\n", __FUNCTION__ ); 475 475 #endif … … 484 484 strcpy( dentry->name , name ); 485 485 486 #if( CONFIG_DEBUG_VFS_DENTRY_CREATE & 1 )486 #if( DEBUG_VFS_DENTRY_CREATE & 1 ) 487 487 cycle = (uint32_t)hal_get_cycles(); 488 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )488 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 489 489 printk("\n[DBG] %s : dentry initialised\n", __FUNCTION__ ); 490 490 #endif … … 495 495 XPTR( local_cxy , &dentry->list ) ); 496 496 497 #if( CONFIG_DEBUG_VFS_DENTRY_CREATE & 1 )497 #if( DEBUG_VFS_DENTRY_CREATE & 1 ) 498 498 cycle = (uint32_t)hal_get_cycles(); 499 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )499 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 500 500 printk("\n[DBG] %s : dentry registerd in htab\n", __FUNCTION__ ); 501 501 #endif … … 504 504 *dentry_xp = XPTR( local_cxy , dentry ); 505 505 506 #if CONFIG_DEBUG_VFS_DENTRY_CREATE506 #if DEBUG_VFS_DENTRY_CREATE 507 507 cycle = (uint32_t)hal_get_cycles(); 508 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )508 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 509 509 printk("\n[DBG] %s : thread %x exit for <%s> / dentry %x / cycle %d\n", 510 510 __FUNCTION__, CURRENT_THREAD , name , dentry , cycle ); … … 627 627 uint32_t file_id; // created file descriptor index in reference fd_array 628 628 629 #if CONFIG_DEBUG_VFS_OPEN629 #if DEBUG_VFS_OPEN 630 630 uint32_t cycle = (uint32_t)hal_get_cycles(); 631 if( CONFIG_DEBUG_VFS_OPEN < cycle )631 if( DEBUG_VFS_OPEN < cycle ) 632 632 printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n", 633 633 __FUNCTION__, CURRENT_THREAD, path, cycle ); … … 674 674 if( error ) return error; 675 675 676 #if CONFIG_DEBUG_VFS_OPEN676 #if DEBUG_VFS_OPEN 677 677 cycle = (uint32_t)hal_get_cycles(); 678 if( CONFIG_DEBUG_VFS_OPEN < cycle )678 if( DEBUG_VFS_OPEN < cycle ) 679 679 printk("\n[DBG] %s : thread %x exit for <%s> / file %x in cluster %x / cycle %d\n", 680 680 __FUNCTION__, CURRENT_THREAD, path, GET_PTR(file_xp), GET_CXY(file_xp), cycle ); … … 1365 1365 process = this->process; 1366 1366 1367 #if CONFIG_DEBUG_VFS_LOOKUP1367 #if DEBUG_VFS_LOOKUP 1368 1368 uint32_t cycle = (uint32_t)hal_get_cycles(); 1369 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )1369 if( DEBUG_VFS_LOOKUP < cycle ) 1370 1370 printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n", 1371 1371 __FUNCTION__, CURRENT_THREAD, pathname, cycle ); … … 1393 1393 vfs_get_name_from_path( current , name , &next , &last ); 1394 1394 1395 #if ( CONFIG_DEBUG_VFS_LOOKUP & 1)1396 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )1395 #if (DEBUG_VFS_LOOKUP & 1) 1396 if( DEBUG_VFS_LOOKUP < cycle ) 1397 1397 printk("\n[DBG] %s : look for <%s> / last = %d\n", __FUNCTION__ , name , last ); 1398 1398 #endif … … 1414 1414 { 1415 1415 1416 #if ( CONFIG_DEBUG_VFS_LOOKUP & 1)1417 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )1416 #if (DEBUG_VFS_LOOKUP & 1) 1417 if( DEBUG_VFS_LOOKUP < cycle ) 1418 1418 printk("\n[DBG] %s : miss <%s> => load it\n", __FUNCTION__ , name ); 1419 1419 #endif … … 1500 1500 vfs_inode_lock( parent_xp ); 1501 1501 1502 #if ( CONFIG_DEBUG_VFS_LOOKUP & 1)1503 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )1502 #if (DEBUG_VFS_LOOKUP & 1) 1503 if( DEBUG_VFS_LOOKUP < cycle ) 1504 1504 printk("\n[DBG] %s : created node <%s>\n", __FUNCTION__ , name ); 1505 1505 #endif … … 1507 1507 } 1508 1508 1509 #if ( CONFIG_DEBUG_VFS_LOOKUP & 1)1510 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )1509 #if (DEBUG_VFS_LOOKUP & 1) 1510 if( DEBUG_VFS_LOOKUP < cycle ) 1511 1511 printk("\n[DBG] %s : found <%s> / inode %x in cluster %x\n", 1512 1512 __FUNCTION__ , name , GET_PTR(child_xp) , GET_CXY(child_xp) ); … … 1536 1536 vfs_inode_unlock( parent_xp ); 1537 1537 1538 #if CONFIG_DEBUG_VFS_LOOKUP1538 #if DEBUG_VFS_LOOKUP 1539 1539 cycle = (uint32_t)hal_get_cycles(); 1540 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )1540 if( DEBUG_VFS_LOOKUP < cycle ) 1541 1541 printk("\n[DBG] %s : thread %x exit for <%s> / inode %x in cluster %x / cycle %d\n", 1542 1542 __FUNCTION__, CURRENT_THREAD, pathname, GET_PTR(child_xp), GET_CXY(child_xp), cycle ); … … 1639 1639 parent_ptr = (vfs_inode_t *)GET_PTR( parent_xp ); 1640 1640 1641 #if CONFIG_DEBUG_VFS_ADD_CHILD1641 #if DEBUG_VFS_ADD_CHILD 1642 1642 uint32_t cycle = (uint32_t)hal_get_cycles(); 1643 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )1643 if( DEBUG_VFS_ADD_CHILD < cycle ) 1644 1644 printk("\n[DBG] %s : thread %x enter for <%s> / child_cxy = %x / parent_cxy = %x\n", 1645 1645 __FUNCTION__ , CURRENT_THREAD , name , child_cxy , parent_cxy ); … … 1654 1654 &dentry_xp ); 1655 1655 1656 #if ( CONFIG_DEBUG_VFS_ADD_CHILD & 1)1657 if( ( CONFIG_DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) )1656 #if (DEBUG_VFS_ADD_CHILD & 1) 1657 if( (DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) ) 1658 1658 printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, local_cxy ); 1659 1659 #endif … … 1669 1669 &error ); 1670 1670 1671 #if ( CONFIG_DEBUG_VFS_ADD_CHILD & 1)1672 if( ( CONFIG_DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) )1671 #if (DEBUG_VFS_ADD_CHILD & 1) 1672 if( (DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) ) 1673 1673 printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, parent_cxy ); 1674 1674 #endif … … 1701 1701 &inode_xp ); 1702 1702 1703 #if ( CONFIG_DEBUG_VFS_ADD_CHILD & 1)1704 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )1703 #if (DEBUG_VFS_ADD_CHILD & 1) 1704 if( DEBUG_VFS_ADD_CHILD < cycle ) 1705 1705 printk("\n[DBG] %s : inode <%x> created in cluster %x\n", 1706 1706 __FUNCTION__ , GET_PTR(inode_xp) , local_cxy ); … … 1722 1722 &error ); 1723 1723 1724 #if ( CONFIG_DEBUG_VFS_ADD_CHILD & 1)1725 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )1724 #if (DEBUG_VFS_ADD_CHILD & 1) 1725 if( DEBUG_VFS_ADD_CHILD < cycle ) 1726 1726 printk("\n[DBG] %s : inode <%s> created in cluster %x\n", 1727 1727 __FUNCTION__ , GET_PTR(inode_xp) , child_cxy ); … … 1746 1746 hal_remote_swd( XPTR( dentry_cxy , &dentry_ptr->child_xp ) , inode_xp ); 1747 1747 1748 #if CONFIG_DEBUG_VFS_ADD_CHILD1748 #if DEBUG_VFS_ADD_CHILD 1749 1749 cycle = (uint32_t)hal_get_cycles(); 1750 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )1750 if( DEBUG_VFS_ADD_CHILD < cycle ) 1751 1751 printk("\n[DBG] %s : thread %x exit for <%s>\n", 1752 1752 __FUNCTION__ , CURRENT_THREAD , name ); … … 1775 1775 assert( (mapper != NULL) , __FUNCTION__ , "no mapper for page\n" ); 1776 1776 1777 #if CONFIG_DEBUG_VFS_MAPPER_MOVE1777 #if DEBUG_VFS_MAPPER_MOVE 1778 1778 uint32_t cycle = (uint32_t)hal_get_cycles(); 1779 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )1779 if( DEBUG_VFS_MAPPER_MOVE < cycle ) 1780 1780 printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / inode %x / cycle %d\n", 1781 1781 __FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle ); … … 1805 1805 } 1806 1806 1807 #if CONFIG_DEBUG_VFS_MAPPER_MOVE1807 #if DEBUG_VFS_MAPPER_MOVE 1808 1808 cycle = (uint32_t)hal_get_cycles(); 1809 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )1809 if( DEBUG_VFS_MAPPER_MOVE < cycle ) 1810 1810 printk("\n[DBG] %s : thread %x exit for page %d / mapper %x / inode %x / cycle %d\n", 1811 1811 __FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle ); … … 1829 1829 assert( (mapper != NULL) , __FUNCTION__ , "mapper pointer is NULL\n" ); 1830 1830 1831 #if CONFIG_DEBUG_VFS_MAPPER_LOAD1831 #if DEBUG_VFS_MAPPER_LOAD 1832 1832 uint32_t cycle = (uint32_t)hal_get_cycles(); 1833 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )1833 if( DEBUG_VFS_MAPPER_MOVE < cycle ) 1834 1834 printk("\n[DBG] %s : thread %x enter for inode %x in cluster %x / cycle %d\n", 1835 1835 __FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle ); … … 1850 1850 } 1851 1851 1852 #if CONFIG_DEBUG_VFS_MAPPER_LOAD1852 #if DEBUG_VFS_MAPPER_LOAD 1853 1853 cycle = (uint32_t)hal_get_cycles(); 1854 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )1854 if( DEBUG_VFS_MAPPER_MOVE < cycle ) 1855 1855 printk("\n[DBG] %s : thread %x exit for inode %x in cluster %x / cycle %d\n", 1856 1856 __FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle ); -
trunk/kernel/kern/chdev.c
r437 r438 39 39 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 40 40 41 #if ( CONFIG_DEBUG_SYS_READ & 1)41 #if (DEBUG_SYS_READ & 1) 42 42 extern uint32_t enter_chdev_cmd_read; 43 43 extern uint32_t exit_chdev_cmd_read; … … 46 46 #endif 47 47 48 #if ( CONFIG_DEBUG_SYS_WRITE & 1)48 #if (DEBUG_SYS_WRITE & 1) 49 49 extern uint32_t enter_chdev_cmd_write; 50 50 extern uint32_t exit_chdev_cmd_write; … … 130 130 uint32_t save_sr; // for critical section 131 131 132 #if ( CONFIG_DEBUG_SYS_READ & 1)132 #if (DEBUG_SYS_READ & 1) 133 133 enter_chdev_cmd_read = (uint32_t)hal_get_cycles(); 134 134 #endif 135 135 136 #if ( CONFIG_DEBUG_SYS_WRITE & 1)136 #if (DEBUG_SYS_WRITE & 1) 137 137 enter_chdev_cmd_write = (uint32_t)hal_get_cycles(); 138 138 #endif … … 144 144 chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp ); 145 145 146 #if ( CONFIG_DEBUG_CHDEV_CMD_RX || CONFIG_DEBUG_CHDEV_CMD_TX)146 #if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX) 147 147 bool_t is_rx = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->is_rx ) ); 148 148 #endif 149 149 150 #if CONFIG_DEBUG_CHDEV_CMD_RX150 #if DEBUG_CHDEV_CMD_RX 151 151 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 152 if( (is_rx) && ( CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) )152 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 153 153 printk("\n[DBG] %s : client_thread %x (%s) enter for RX / cycle %d\n", 154 154 __FUNCTION__, this, thread_type_str(this->type) , rx_cycle ); 155 155 #endif 156 156 157 #if CONFIG_DEBUG_CHDEV_CMD_TX157 #if DEBUG_CHDEV_CMD_TX 158 158 uint32_t tx_cycle = (uint32_t)hal_get_cycles(); 159 if( (is_rx == 0) && ( CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) )159 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 160 160 printk("\n[DBG] %s : client_thread %x (%s) enter for TX / cycle %d\n", 161 161 __FUNCTION__, this, thread_type_str(this->type) , tx_cycle ); … … 207 207 hal_restore_irq( save_sr ); 208 208 209 #if CONFIG_DEBUG_CHDEV_CMD_RX209 #if DEBUG_CHDEV_CMD_RX 210 210 rx_cycle = (uint32_t)hal_get_cycles(); 211 if( (is_rx) && ( CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) )211 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 212 212 printk("\n[DBG] %s : client_thread %x (%s) exit for RX / cycle %d\n", 213 213 __FUNCTION__, this, thread_type_str(this->type) , rx_cycle ); 214 214 #endif 215 215 216 #if CONFIG_DEBUG_CHDEV_CMD_TX216 #if DEBUG_CHDEV_CMD_TX 217 217 tx_cycle = (uint32_t)hal_get_cycles(); 218 if( (is_rx == 0) && ( CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) )218 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 219 219 printk("\n[DBG] %s : client_thread %x (%s) exit for TX / cycle %d\n", 220 220 __FUNCTION__, this, thread_type_str(this->type) , tx_cycle ); 221 221 #endif 222 222 223 #if ( CONFIG_DEBUG_SYS_READ & 1)223 #if (DEBUG_SYS_READ & 1) 224 224 exit_chdev_cmd_read = (uint32_t)hal_get_cycles(); 225 225 #endif 226 226 227 #if ( CONFIG_DEBUG_SYS_WRITE & 1)227 #if (DEBUG_SYS_WRITE & 1) 228 228 exit_chdev_cmd_write = (uint32_t)hal_get_cycles(); 229 229 #endif … … 275 275 client_ptr = (thread_t *)GET_PTR( client_xp ); 276 276 277 #if CONFIG_DEBUG_CHDEV_SERVER_RX277 #if DEBUG_CHDEV_SERVER_RX 278 278 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 279 if( (chdev->is_rx) && ( CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) )279 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 280 280 printk("\n[DBG] %s : server_thread %x start RX / client %x / cycle %d\n", 281 281 __FUNCTION__ , server , client_ptr , rx_cycle ); 282 282 #endif 283 283 284 #if CONFIG_DEBUG_CHDEV_SERVER_TX284 #if DEBUG_CHDEV_SERVER_TX 285 285 uint32_t tx_cycle = (uint32_t)hal_get_cycles(); 286 if( (chdev->is_rx == 0) && ( CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) )286 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 287 287 printk("\n[DBG] %s : server_thread %x start TX / client %x / cycle %d\n", 288 288 __FUNCTION__ , server , client_ptr , tx_cycle ); 289 289 #endif 290 290 291 #if ( CONFIG_DEBUG_SYS_READ & 1)291 #if (DEBUG_SYS_READ & 1) 292 292 enter_chdev_server_read = (uint32_t)hal_get_cycles(); 293 293 #endif 294 294 295 #if ( CONFIG_DEBUG_SYS_WRITE & 1)295 #if (DEBUG_SYS_WRITE & 1) 296 296 enter_chdev_server_write = (uint32_t)hal_get_cycles(); 297 297 #endif … … 308 308 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 309 309 310 #if CONFIG_DEBUG_CHDEV_SERVER_RX310 #if DEBUG_CHDEV_SERVER_RX 311 311 rx_cycle = (uint32_t)hal_get_cycles(); 312 if( (chdev->is_rx) && ( CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) )312 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 313 313 printk("\n[DBG] %s : server_thread %x completes RX / client %x / cycle %d\n", 314 314 __FUNCTION__ , server , client_ptr , rx_cycle ); 315 315 #endif 316 316 317 #if CONFIG_DEBUG_CHDEV_SERVER_TX317 #if DEBUG_CHDEV_SERVER_TX 318 318 tx_cycle = (uint32_t)hal_get_cycles(); 319 if( (chdev->is_rx == 0) && ( CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) )319 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 320 320 printk("\n[DBG] %s : server_thread %x completes TX / client %x / cycle %d\n", 321 321 __FUNCTION__ , server , client_ptr , tx_cycle ); 322 322 #endif 323 323 324 #if ( CONFIG_DEBUG_SYS_READ & 1)324 #if (DEBUG_SYS_READ & 1) 325 325 exit_chdev_server_read = (uint32_t)hal_get_cycles(); 326 326 #endif 327 327 328 #if ( CONFIG_DEBUG_SYS_WRITE & 1)328 #if (DEBUG_SYS_WRITE & 1) 329 329 exit_chdev_server_write = (uint32_t)hal_get_cycles(); 330 330 #endif -
trunk/kernel/kern/cluster.c
r437 r438 89 89 spinlock_init( &cluster->kcm_lock ); 90 90 91 #if CONFIG_DEBUG_CLUSTER_INIT91 #if DEBUG_CLUSTER_INIT 92 92 uint32_t cycle = (uint32_t)hal_get_cycles(); 93 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )93 if( DEBUG_CLUSTER_INIT < cycle ) 94 94 printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n", 95 95 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); … … 99 99 cluster->dqdt_root_level = dqdt_init( info->x_size, 100 100 info->y_size, 101 info->y_width ); 102 cluster->threads_var = 0; 103 cluster->pages_var = 0; 101 info->y_width ) - 1; 104 102 105 103 // initialises embedded PPM … … 113 111 } 114 112 115 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )113 #if( DEBUG_CLUSTER_INIT & 1 ) 116 114 cycle = (uint32_t)hal_get_cycles(); 117 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )115 if( DEBUG_CLUSTER_INIT < cycle ) 118 116 printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n", 119 117 __FUNCTION__ , local_cxy , cycle ); … … 123 121 khm_init( &cluster->khm ); 124 122 125 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )123 #if( DEBUG_CLUSTER_INIT & 1 ) 126 124 uint32_t cycle = (uint32_t)hal_get_cycles(); 127 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )125 if( DEBUG_CLUSTER_INIT < cycle ) 128 126 printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n", 129 127 __FUNCTION__ , local_cxy , hal_get_cycles() ); … … 133 131 kcm_init( &cluster->kcm , KMEM_KCM ); 134 132 135 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )133 #if( DEBUG_CLUSTER_INIT & 1 ) 136 134 uint32_t cycle = (uint32_t)hal_get_cycles(); 137 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )135 if( DEBUG_CLUSTER_INIT < cycle ) 138 136 printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n", 139 137 __FUNCTION__ , local_cxy , hal_get_cycles() ); … … 148 146 } 149 147 150 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )148 #if( DEBUG_CLUSTER_INIT & 1 ) 151 149 cycle = (uint32_t)hal_get_cycles(); 152 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )150 if( DEBUG_CLUSTER_INIT < cycle ) 153 151 printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n", 154 152 __FUNCTION__ , local_cxy , cycle ); … … 159 157 cluster->rpc_threads = 0; 160 158 161 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )159 #if( DEBUG_CLUSTER_INIT & 1 ) 162 160 cycle = (uint32_t)hal_get_cycles(); 163 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )161 if( DEBUG_CLUSTER_INIT < cycle ) 164 162 printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n", 165 163 __FUNCTION__ , local_cxy , hal_get_cycles() ); … … 188 186 } 189 187 190 #if CONFIG_DEBUG_CLUSTER_INIT188 #if DEBUG_CLUSTER_INIT 191 189 cycle = (uint32_t)hal_get_cycles(); 192 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )190 if( DEBUG_CLUSTER_INIT < cycle ) 193 191 printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n", 194 192 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); … … 456 454 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 457 455 458 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES456 #if DEBUG_CLUSTER_PROCESS_COPIES 459 457 uint32_t cycle = (uint32_t)hal_get_cycles(); 460 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )458 if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 461 459 printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n", 462 460 __FUNCTION__ , local_cxy , process , cycle ); … … 487 485 remote_spinlock_unlock_busy( copies_lock , irq_state ); 488 486 489 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES487 #if DEBUG_CLUSTER_PROCESS_COPIES 490 488 cycle = (uint32_t)hal_get_cycles(); 491 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )489 if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 492 490 printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n", 493 491 __FUNCTION__ , local_cxy , process , cycle ); … … 502 500 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 503 501 504 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES502 #if DEBUG_CLUSTER_PROCESS_COPIES 505 503 uint32_t cycle = (uint32_t)hal_get_cycles(); 506 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )504 if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 507 505 printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n", 508 506 __FUNCTION__ , local_cxy , process , cycle ); … … 530 528 remote_spinlock_unlock_busy( copies_lock , irq_state ); 531 529 532 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES530 #if DEBUG_CLUSTER_PROCESS_COPIES 533 531 cycle = (uint32_t)hal_get_cycles(); 534 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )532 if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 535 533 printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n", 536 534 __FUNCTION__ , local_cxy , process , cycle ); -
trunk/kernel/kern/cluster.h
r437 r438 132 132 133 133 // DQDT 134 int32_t pages_var; /*! pages number increment from last DQQT updt */135 int32_t threads_var; /*! threads number increment from last DQDT updt */136 137 134 dqdt_node_t dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster */ 138 135 -
trunk/kernel/kern/core.c
r433 r438 85 85 // handle scheduler 86 86 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK"); 87 88 // update DQDT89 if( ((ticks % CONFIG_DQDT_TICKS_PER_QUANTUM) == 0) && (core->lid == 0) )90 dqdt_global_update();91 87 } 92 88 -
trunk/kernel/kern/do_syscall.c
r437 r438 173 173 int error = 0; 174 174 175 assert( (this == CURRENT_THREAD), __FUNCTION__, 176 "wrong <this> argument\n" ); 177 175 178 // update user time 176 179 thread_user_time_update( this ); … … 194 197 195 198 // check kernel stack overflow 196 assert( (this->signature == THREAD_SIGNATURE), __FUNCTION__, "kernel stack overflow\n" ); 199 assert( (CURRENT_THREAD->signature == THREAD_SIGNATURE), __FUNCTION__, 200 "kernel stack overflow after for thread %x in cluster %x\n", CURRENT_THREAD, local_cxy ); 197 201 198 202 // update kernel time -
trunk/kernel/kern/dqdt.c
r437 r438 28 28 #include <hal_remote.h> 29 29 #include <printk.h> 30 #include <chdev.h> 30 31 #include <cluster.h> 31 32 #include <bits.h> … … 33 34 34 35 35 /////////////////////////////////////////// 36 void dqdt_local_print( dqdt_node_t * node ) 37 { 38 printk("DQDT node : level = %d / cluster = %x / threads = %x / pages = %x\n", 39 node->level, 40 local_cxy, 41 node->threads, 42 node->pages ); 43 } 44 45 ///////////////////////////////////////// 46 void dqdt_global_print( xptr_t node_xp ) 36 /////////////////////////////////////////////////////////////////////////////////////////// 37 // Extern variables 38 /////////////////////////////////////////////////////////////////////////////////////////// 39 40 extern chdev_directory_t chdev_dir; // defined in chdev.h / allocated in kernel_init.c 41 42 43 /////////////////////////////////////////////////////////////////////////////////////////// 44 // This static recursive function traverse the DQDT quad-tree from root to bottom. 45 /////////////////////////////////////////////////////////////////////////////////////////// 46 static void dqdt_recursive_print( xptr_t node_xp ) 47 47 { 48 48 uint32_t i; 49 dqdt_node_t local_node; 50 51 // get root node local copy 52 hal_remote_memcpy( XPTR( local_cxy , &local_node ), node_xp , sizeof(dqdt_node_t) ); 53 54 // display DQDT node content 55 dqdt_local_print( &local_node ); 49 dqdt_node_t node; 50 51 // get node local copy 52 hal_remote_memcpy( XPTR( local_cxy , &node ), node_xp , sizeof(dqdt_node_t) ); 53 54 // display node content 55 nolock_printk("- level %d in cluster %x (node %x) : threads = %x / pages = %x\n", 56 node.level, GET_CXY( node_xp ), GET_PTR( node_xp ), node.threads, node.pages ); 56 57 57 58 // recursive call on children if node is not terminal 58 if ( local_node.level > 0 )59 if ( node.level > 0 ) 59 60 { 60 61 for ( i = 0 ; i < 4 ; i++ ) 61 62 { 62 if ( local_node.children[i] != XPTR_NULL ) 63 dqdt_global_print( local_node.children[i] ); 63 if ( node.children[i] != XPTR_NULL ) dqdt_recursive_print( node.children[i] ); 64 64 } 65 65 } 66 } 67 68 /////////////////// 69 void dqdt_display() 70 { 71 reg_t save_sr; 72 73 // build extended pointer on DQDT root node 74 cluster_t * cluster = LOCAL_CLUSTER; 75 uint32_t level = cluster->dqdt_root_level; 76 xptr_t root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] ); 77 78 // get pointers on TXT0 chdev 79 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 80 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 81 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 82 83 // get extended pointer on remote TXT0 chdev lock 84 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 85 86 // get TXT0 lock in busy waiting mode 87 remote_spinlock_lock_busy( lock_xp , &save_sr ); 88 89 // print header 90 nolock_printk("\n***** DQDT state\n\n"); 91 92 // call recursive function 93 dqdt_recursive_print( root_xp ); 94 95 // release lock 96 remote_spinlock_unlock_busy( lock_xp , save_sr ); 66 97 } 67 98 … … 161 192 } // end dqdt_init() 162 193 163 164 /////////////////////////////////////////////////////////////////////////// 165 // This recursive function is called by the dqdt_global_update() function. 194 /////////////////////////////////////////////////////////////////////////// 195 // This recursive function is called by the dqdt_update_threads() function. 166 196 // It traverses the quad tree from clusters to root. 167 197 /////////////////////////////////////////////////////////////////////////// 168 static void dqdt_propagate( xptr_t node, // extended pointer on current node 169 int32_t threads_var, // number of threads variation 170 int32_t pages_var ) // number of pages variation 198 // @ node : extended pointer on current node 199 // @ increment : number of threads variation 200 /////////////////////////////////////////////////////////////////////////// 201 static void dqdt_propagate_threads( xptr_t node, 202 int32_t increment ) 171 203 { 172 204 // get current node cluster identifier and local pointer 173 cxy_t cxy = (cxy_t)GET_CXY( node );174 dqdt_node_t * ptr = (dqdt_node_t *)GET_PTR( node );205 cxy_t cxy = GET_CXY( node ); 206 dqdt_node_t * ptr = GET_PTR( node ); 175 207 176 208 // update current node threads number 177 hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , threads_var ); 178 179 // update current node pages number 180 hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , pages_var ); 209 hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , increment ); 181 210 182 211 // get extended pointer on parent node … … 184 213 185 214 // propagate if required 186 if ( parent != XPTR_NULL ) 187 { 188 dqdt_propagate( parent, threads_var, pages_var ); 189 } 190 } 191 192 ///////////////////////// 193 void dqdt_global_update() 215 if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment ); 216 } 217 218 /////////////////////////////////////////////////////////////////////////// 219 // This recursive function is called by the dqdt_update_pages() function. 220 // It traverses the quad tree from clusters to root. 221 /////////////////////////////////////////////////////////////////////////// 222 // @ node : extended pointer on current node 223 // @ increment : number of pages variation 224 /////////////////////////////////////////////////////////////////////////// 225 static void dqdt_propagate_pages( xptr_t node, 226 int32_t increment ) 227 { 228 // get current node cluster identifier and local pointer 229 cxy_t cxy = GET_CXY( node ); 230 dqdt_node_t * ptr = GET_PTR( node ); 231 232 // update current node threads number 233 hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , increment ); 234 235 // get extended pointer on parent node 236 xptr_t parent = (xptr_t)hal_remote_lwd( XPTR( cxy , &ptr->parent ) ); 237 238 // propagate if required 239 if ( parent != XPTR_NULL ) dqdt_propagate_pages( parent, increment ); 240 } 241 242 ///////////////////////////////////////////// 243 void dqdt_update_threads( int32_t increment ) 194 244 { 195 245 cluster_t * cluster = LOCAL_CLUSTER; 196 246 dqdt_node_t * node = &cluster->dqdt_tbl[0]; 197 247 198 // get variations199 int32_t threads_var = cluster->threads_var;200 int32_t pages_var = cluster->pages_var;201 202 // propagate this variation to DQDT upper levels203 if( (threads_var || pages_var) && (node->parent != XPTR_NULL) )204 {205 dqdt_propagate( node->parent, threads_var, pages_var );206 }207 208 // update variations209 hal_atomic_add( &cluster->threads_var , -threads_var );210 hal_atomic_add( &cluster->pages_var , -pages_var );211 }212 213 ///////////////////////////////////////////////////214 void dqdt_local_update_threads( int32_t increment )215 {216 cluster_t * cluster = LOCAL_CLUSTER;217 218 // register change for future propagation in DQDT219 hal_atomic_add( &cluster->threads_var , increment );220 221 248 // update DQDT node level 0 222 hal_atomic_add( &cluster->dqdt_tbl[0].threads , increment ); 223 } 224 225 ///////////////////////////////////////////////// 226 void dqdt_local_update_pages( int32_t increment ) 227 { 228 cluster_t * cluster = LOCAL_CLUSTER; 229 230 // register change for future propagation in DQDT 231 hal_atomic_add( &cluster->pages_var , increment ); 249 hal_atomic_add( &node->threads , increment ); 250 251 // propagate to DQDT upper levels 252 if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , increment ); 253 } 254 255 /////////////////////////////////////////// 256 void dqdt_update_pages( int32_t increment ) 257 { 258 cluster_t * cluster = LOCAL_CLUSTER; 259 dqdt_node_t * node = &cluster->dqdt_tbl[0]; 232 260 233 261 // update DQDT node level 0 234 hal_atomic_add( &cluster->dqdt_tbl[0].pages , increment ); 235 } 262 hal_atomic_add( &node->pages , increment ); 263 264 // propagate to DQDT upper levels 265 if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , increment ); 266 } 267 236 268 237 269 //////////////////////////////////////////////////////////////////////////////// … … 289 321 cluster_t * cluster = LOCAL_CLUSTER; 290 322 uint32_t level = cluster->dqdt_root_level; 291 xptr_t root 323 xptr_t root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] ); 292 324 293 325 // call recursive function 294 return dqdt_select_cluster( root , false );326 return dqdt_select_cluster( root_xp , false ); 295 327 } 296 328 … … 301 333 cluster_t * cluster = LOCAL_CLUSTER; 302 334 uint32_t level = cluster->dqdt_root_level; 303 xptr_t root 335 xptr_t root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] ); 304 336 305 337 // call recursive function 306 return dqdt_select_cluster( root , true );307 } 308 338 return dqdt_select_cluster( root_xp , true ); 339 } 340 -
trunk/kernel/kern/dqdt.h
r437 r438 93 93 94 94 /**************************************************************************************** 95 * This recursive function traverses the DQDT quad-tree from bottom to root, to propagate 96 * the change in the threads number and allocated pages number in a leaf cluster, 97 * toward the upper levels of the DQDT quad-tree. 98 * It should be called periodically by each instance of the kernel. 99 ***************************************************************************************/ 100 void dqdt_global_update(); 101 102 /**************************************************************************************** 103 * This local function updates both the total number of threads, 104 * in the level 0 DQDT node, and the variation of the number of threads 105 * for future propagation to the DQDT upper levels. 95 * This local function updates the total number of threads in level 0 DQDT node, 96 * and propagates the variation to the DQDT upper levels. 106 97 * It should be called on each thread creation or destruction. 107 98 **************************************************************************************** 108 99 * @ increment : increment (can be positive or negative) 109 100 ***************************************************************************************/ 110 void dqdt_ local_update_threads( int32_t increment );101 void dqdt_update_threads( int32_t increment ); 111 102 112 103 /**************************************************************************************** 113 * This local function updates both the total number of allocated pages, 114 * in the level 0 DQDT node, and the variation of the number of pages 115 * for future propagation to the DQDT upper levels. 116 * It should be called on each memory allocation or release. 104 * This local function updates the total number of pages in level 0 DQDT node, 105 * and propagates the variation to the DQDT upper levels. 106 * It should be called on each physical memory page allocation or release. 117 107 **************************************************************************************** 118 108 * @ increment : increment (can be positive or negative) 119 109 ***************************************************************************************/ 120 void dqdt_ local_update_pages( int32_t increment );110 void dqdt_update_pages( int32_t increment ); 121 111 122 112 /**************************************************************************************** … … 139 129 140 130 /**************************************************************************************** 141 * This recursive function displays usage information for all DQDT nodes in the subtree 142 * defined by the node argument. It traverses the quadtree from root to bottom. 143 **************************************************************************************** 144 * @ node_xp : extended pointer on a DQDT node. 131 * This function displays on kernel TXT0 the DQDT state for all nodes in the quad-tree. 132 * It traverses the quadtree from root to bottom, and can be called by a thread 133 * running in any cluster 145 134 ***************************************************************************************/ 146 void dqdt_global_print( xptr_t node_xp ); 147 148 /**************************************************************************************** 149 * This function displays summary usage information in a given DQDT local node. 150 **************************************************************************************** 151 * @ node : local pointer on a DQDT node. 152 ***************************************************************************************/ 153 void dqdt_local_print( dqdt_node_t * node ); 135 void dqdt_display(); 154 136 155 137 -
trunk/kernel/kern/kernel_init.c
r437 r438 125 125 // these debug variables are used to analyse the sys_read() syscall timing 126 126 127 #if CONFIG_DEBUG_SYS_READ127 #if DEBUG_SYS_READ 128 128 uint32_t enter_sys_read; 129 129 uint32_t exit_sys_read; … … 150 150 // these debug variables are used to analyse the sys_write() syscall timing 151 151 152 #if CONFIG_DEBUG_SYS_WRITE152 #if DEBUG_SYS_WRITE 153 153 uint32_t enter_sys_write; 154 154 uint32_t exit_sys_write; … … 324 324 } 325 325 326 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )327 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )326 #if( DEBUG_KERNEL_INIT & 0x1 ) 327 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 328 328 printk("\n[DBG] %s : created MMC in cluster %x / chdev = %x\n", 329 329 __FUNCTION__ , local_cxy , chdev_ptr ); … … 353 353 chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr ); 354 354 355 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )356 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )355 #if( DEBUG_KERNEL_INIT & 0x1 ) 356 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 357 357 printk("\n[DBG] %s : created DMA[%d] in cluster %x / chdev = %x\n", 358 358 __FUNCTION__ , channel , local_cxy , chdev_ptr ); … … 488 488 } 489 489 490 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )491 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )490 #if( DEBUG_KERNEL_INIT & 0x1 ) 491 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 492 492 printk("\n[DBG] %s : create chdev %s / channel = %d / rx = %d / cluster %x / chdev = %x\n", 493 493 __FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy , chdev ); … … 623 623 } 624 624 625 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )626 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )625 #if( DEBUG_KERNEL_INIT & 0x1 ) 626 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 627 627 { 628 628 printk("\n[DBG] %s created PIC chdev in cluster %x at cycle %d\n", … … 807 807 ///////////////////////////////////////////////////////////////////////////////// 808 808 809 #if CONFIG_DEBUG_KERNEL_INIT810 if( (core_lid == 0) & &(local_cxy == 0) )809 #if DEBUG_KERNEL_INIT 810 if( (core_lid == 0) & (local_cxy == 0) ) 811 811 printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n", 812 812 __FUNCTION__, (uint32_t)hal_get_cycles() ); … … 845 845 ///////////////////////////////////////////////////////////////////////////////// 846 846 847 #if CONFIG_DEBUG_KERNEL_INIT848 if( (core_lid == 0) & &(local_cxy == 0) )847 #if DEBUG_KERNEL_INIT 848 if( (core_lid == 0) & (local_cxy == 0) ) 849 849 printk("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n", 850 850 __FUNCTION__, (uint32_t)hal_get_cycles() ); … … 872 872 //////////////////////////////////////////////////////////////////////////////// 873 873 874 #if CONFIG_DEBUG_KERNEL_INIT875 if( (core_lid == 0) & &(local_cxy == 0) )874 #if DEBUG_KERNEL_INIT 875 if( (core_lid == 0) & (local_cxy == 0) ) 876 876 printk("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n", 877 877 __FUNCTION__, (uint32_t)hal_get_cycles() ); … … 905 905 ///////////////////////////////////////////////////////////////////////////////// 906 906 907 #if CONFIG_DEBUG_KERNEL_INIT908 if( (core_lid == 0) & &(local_cxy == 0) )907 #if DEBUG_KERNEL_INIT 908 if( (core_lid == 0) & (local_cxy == 0) ) 909 909 printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n", 910 910 __FUNCTION__, (uint32_t)hal_get_cycles() ); 911 911 #endif 912 912 913 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )913 #if( DEBUG_KERNEL_INIT & 1 ) 914 914 chdev_dir_display(); 915 915 #endif … … 927 927 928 928 // all cores initialize the idle thread descriptor 929 error = thread_ kernel_init( thread,930 931 932 933 929 error = thread_idle_init( thread, 930 THREAD_IDLE, 931 &thread_idle_func, 932 NULL, 933 core_lid ); 934 934 if( error ) 935 935 { … … 942 942 core->scheduler.idle = thread; 943 943 944 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )944 #if( DEBUG_KERNEL_INIT & 1 ) 945 945 sched_display( core_lid ); 946 946 #endif … … 1014 1014 ///////////////////////////////////////////////////////////////////////////////// 1015 1015 1016 #if CONFIG_DEBUG_KERNEL_INIT1017 if( (core_lid == 0) & &(local_cxy == 0) )1016 #if DEBUG_KERNEL_INIT 1017 if( (core_lid == 0) & (local_cxy == 0) ) 1018 1018 printk("\n[DBG] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n", 1019 1019 __FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles()); … … 1075 1075 ///////////////////////////////////////////////////////////////////////////////// 1076 1076 1077 #if CONFIG_DEBUG_KERNEL_INIT1078 if( (core_lid == 0) & & (local_cxy == io_cxy) )1077 #if DEBUG_KERNEL_INIT 1078 if( (core_lid == 0) & (local_cxy == 0) ) 1079 1079 printk("\n[DBG] %s : exit barrier 5 : VFS_root = %l in cluster %x / cycle %d\n", 1080 1080 __FUNCTION__, vfs_root_inode_xp , io_cxy , (uint32_t)hal_get_cycles()); … … 1110 1110 ///////////////////////////////////////////////////////////////////////////////// 1111 1111 1112 #if CONFIG_DEBUG_KERNEL_INIT1113 if( (core_lid == 0) & & (local_cxy == io_cxy) )1112 #if DEBUG_KERNEL_INIT 1113 if( (core_lid == 0) & (local_cxy == 0) ) 1114 1114 printk("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster %x / cycle %d\n", 1115 1115 __FUNCTION__, devfs_dev_inode_xp , io_cxy , (uint32_t)hal_get_cycles() ); … … 1149 1149 ///////////////////////////////////////////////////////////////////////////////// 1150 1150 1151 #if CONFIG_DEBUG_KERNEL_INIT1152 if( (core_lid == 0) & &(local_cxy == 0) )1151 #if DEBUG_KERNEL_INIT 1152 if( (core_lid == 0) & (local_cxy == 0) ) 1153 1153 printk("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n", 1154 1154 __FUNCTION__, devfs_dev_inode_xp , (uint32_t)hal_get_cycles() ); … … 1162 1162 { 1163 1163 1164 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )1164 #if( DEBUG_KERNEL_INIT & 1 ) 1165 1165 vfs_display( vfs_root_inode_xp ); 1166 1166 #endif … … 1175 1175 ///////////////////////////////////////////////////////////////////////////////// 1176 1176 1177 #if CONFIG_DEBUG_KERNEL_INIT1178 if( (core_lid == 0) & &(local_cxy == 0) )1177 #if DEBUG_KERNEL_INIT 1178 if( (core_lid == 0) & (local_cxy == 0) ) 1179 1179 printk("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n", 1180 1180 __FUNCTION__ , (uint32_t)hal_get_cycles() ); … … 1189 1189 print_banner( (info->x_size * info->y_size) , info->cores_nr ); 1190 1190 1191 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )1191 #if( DEBUG_KERNEL_INIT & 1 ) 1192 1192 printk("\n\n***** memory fooprint for main kernel objects\n\n" 1193 1193 " - thread descriptor : %d bytes\n" -
trunk/kernel/kern/process.c
r437 r438 124 124 model_pid = hal_remote_lw( XPTR( model_cxy , &model_ptr->pid ) ); 125 125 126 #if CONFIG_DEBUG_PROCESS_REFERENCE_INIT126 #if DEBUG_PROCESS_REFERENCE_INIT 127 127 uint32_t cycle = (uint32_t)hal_get_cycles(); 128 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )128 if( DEBUG_PROCESS_REFERENCE_INIT ) 129 129 printk("\n[DBG] %s : thread %x enter / pid = %x / ppid = %x / model_pid = %x / cycle %d\n", 130 130 __FUNCTION__ , CURRENT_THREAD , pid , parent_pid , model_pid , cycle ); … … 141 141 assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" ); 142 142 143 #if ( CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)144 cycle = (uint32_t)hal_get_cycles(); 145 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )143 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 144 cycle = (uint32_t)hal_get_cycles(); 145 if( DEBUG_PROCESS_REFERENCE_INIT ) 146 146 printk("\n[DBG] %s : thread %x / vmm empty for process %x / cycle %d\n", 147 147 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); … … 232 232 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 233 233 234 #if ( CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)235 cycle = (uint32_t)hal_get_cycles(); 236 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )234 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 235 cycle = (uint32_t)hal_get_cycles(); 236 if( DEBUG_PROCESS_REFERENCE_INIT ) 237 237 printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n", 238 238 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); … … 272 272 hal_fence(); 273 273 274 #if ( CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)275 cycle = (uint32_t)hal_get_cycles(); 276 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )274 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 275 cycle = (uint32_t)hal_get_cycles(); 276 if( DEBUG_PROCESS_REFERENCE_INIT ) 277 277 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n", 278 278 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); … … 297 297 local_process->term_state = 0; 298 298 299 #if CONFIG_DEBUG_PROCESS_COPY_INIT299 #if DEBUG_PROCESS_COPY_INIT 300 300 uint32_t cycle = (uint32_t)hal_get_cycles(); 301 if( CONFIG_DEBUG_PROCESS_COPY_INIT )301 if( DEBUG_PROCESS_COPY_INIT ) 302 302 printk("\n[DBG] %s : thread %x enter for process %x\n", 303 303 __FUNCTION__ , CURRENT_THREAD , local_process->pid ); … … 347 347 hal_fence(); 348 348 349 #if CONFIG_DEBUG_PROCESS_COPY_INIT350 cycle = (uint32_t)hal_get_cycles(); 351 if( CONFIG_DEBUG_PROCESS_COPY_INIT )349 #if DEBUG_PROCESS_COPY_INIT 350 cycle = (uint32_t)hal_get_cycles(); 351 if( DEBUG_PROCESS_COPY_INIT ) 352 352 printk("\n[DBG] %s : thread %x exit for process %x\n", 353 353 __FUNCTION__ , CURRENT_THREAD , local_process->pid ); … … 371 371 "process %x in cluster %x has still active threads", pid , local_cxy ); 372 372 373 #if CONFIG_DEBUG_PROCESS_DESTROY373 #if DEBUG_PROCESS_DESTROY 374 374 uint32_t cycle = (uint32_t)hal_get_cycles(); 375 if( CONFIG_DEBUG_PROCESS_DESTROY )375 if( DEBUG_PROCESS_DESTROY ) 376 376 printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n", 377 377 __FUNCTION__ , CURRENT_THREAD , process, pid , cycle ); 378 #endif379 380 #if CONFIG_DEBUG_PROCESS_DESTROY381 if( CONFIG_DEBUG_PROCESS_DESTROY & 1 )382 cluster_processes_display( CXY_FROM_PID( pid ) );383 378 #endif 384 379 … … 422 417 process_free( process ); 423 418 424 #if CONFIG_DEBUG_PROCESS_DESTROY425 cycle = (uint32_t)hal_get_cycles(); 426 if( CONFIG_DEBUG_PROCESS_DESTROY )419 #if DEBUG_PROCESS_DESTROY 420 cycle = (uint32_t)hal_get_cycles(); 421 if( DEBUG_PROCESS_DESTROY ) 427 422 printk("\n[DBG] %s : thread %x exit / destroyed process %x (pid = %x) / cycle %d\n", 428 423 __FUNCTION__ , CURRENT_THREAD , process, pid, cycle ); … … 457 452 thread_t * client = CURRENT_THREAD; 458 453 459 #if CONFIG_DEBUG_PROCESS_SIGACTION454 #if DEBUG_PROCESS_SIGACTION 460 455 uint32_t cycle = (uint32_t)hal_get_cycles(); 461 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )456 if( DEBUG_PROCESS_SIGACTION < cycle ) 462 457 printk("\n[DBG] %s : thread %x enter to %s process %x / cycle %d\n", 463 458 __FUNCTION__ , client, process_action_str( action_type ) , pid , cycle ); … … 483 478 // it can be shared because all parallel, non-blocking, server threads 484 479 // use the same input arguments, and use the shared RPC response field 485 // but use486 480 487 481 // the client thread makes the following sequence: … … 502 496 503 497 // initialize shared RPC descriptor 504 rpc.response = 0;505 rpc.blocking = false;506 rpc.index = RPC_PROCESS_SIGACTION;507 rpc.thread = client;508 rpc.lid = client->core->lid;509 rpc.args[0] = action_type;510 rpc.args[1] = pid;498 rpc.responses = 0; 499 rpc.blocking = false; 500 rpc.index = RPC_PROCESS_SIGACTION; 501 rpc.thread = client; 502 rpc.lid = client->core->lid; 503 rpc.args[0] = action_type; 504 rpc.args[1] = pid; 511 505 512 506 // send RPCs to all clusters containing process copiess … … 514 508 { 515 509 516 #if CONFIG_DEBUG_PROCESS_SIGACTION517 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )510 #if DEBUG_PROCESS_SIGACTION 511 if( DEBUG_PROCESS_SIGACTION < cycle ) 518 512 printk("\n[DBG] %s : send RPC to %s process %x in cluster %x\n", 519 513 __FUNCTION__ , process_action_str( action_type ) , pid , process_cxy ); 520 514 #endif 521 515 // atomically increment responses counter 522 hal_atomic_add( (void *)&rpc.response , 1 );516 hal_atomic_add( (void *)&rpc.responses , 1 ); 523 517 524 518 process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); … … 538 532 sched_yield("blocked on rpc_process_sigaction"); 539 533 540 #if CONFIG_DEBUG_PROCESS_SIGACTION541 cycle = (uint32_t)hal_get_cycles(); 542 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )534 #if DEBUG_PROCESS_SIGACTION 535 cycle = (uint32_t)hal_get_cycles(); 536 if( DEBUG_PROCESS_SIGACTION < cycle ) 543 537 printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n", 544 538 __FUNCTION__ , client, process_action_str( action_type ) , pid , local_cxy , cycle ); … … 563 557 owner_cxy = CXY_FROM_PID( process->pid ); 564 558 565 #if CONFIG_DEBUG_PROCESS_SIGACTION559 #if DEBUG_PROCESS_SIGACTION 566 560 uint32_t cycle = (uint32_t)hal_get_cycles(); 567 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )561 if( DEBUG_PROCESS_SIGACTION < cycle ) 568 562 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 569 563 __FUNCTION__ , this , process->pid , local_cxy , cycle ); … … 623 617 } 624 618 625 #if CONFIG_DEBUG_PROCESS_SIGACTION626 cycle = (uint32_t)hal_get_cycles(); 627 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )619 #if DEBUG_PROCESS_SIGACTION 620 cycle = (uint32_t)hal_get_cycles(); 621 if( DEBUG_PROCESS_SIGACTION < cycle ) 628 622 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 629 623 __FUNCTION__ , this , process->pid , local_cxy , cycle ); … … 643 637 this = CURRENT_THREAD; 644 638 645 #if CONFIG_DEBUG_PROCESS_SIGACTION639 #if DEBUG_PROCESS_SIGACTION 646 640 uint32_t cycle = (uint32_t)hal_get_cycles(); 647 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )641 if( DEBUG_PROCESS_SIGACTION < cycle ) 648 642 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 649 643 __FUNCTION__ , this , process->pid , local_cxy , cycle ); … … 671 665 spinlock_unlock( &process->th_lock ); 672 666 673 #if CONFIG_DEBUG_PROCESS_SIGACTION674 cycle = (uint32_t)hal_get_cycles(); 675 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )667 #if DEBUG_PROCESS_SIGACTION 668 cycle = (uint32_t)hal_get_cycles(); 669 if( DEBUG_PROCESS_SIGACTION < cycle ) 676 670 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 677 671 __FUNCTION__ , this , process->pid , local_cxy , cycle ); … … 687 681 uint32_t count; // threads counter 688 682 689 #if CONFIG_DEBUG_PROCESS_SIGACTION683 #if DEBUG_PROCESS_SIGACTION 690 684 uint32_t cycle = (uint32_t)hal_get_cycles(); 691 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )685 if( DEBUG_PROCESS_SIGACTION < cycle ) 692 686 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 693 687 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle ); … … 716 710 spinlock_unlock( &process->th_lock ); 717 711 718 #if CONFIG_DEBUG_PROCESS_SIGACTION719 cycle = (uint32_t)hal_get_cycles(); 720 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )712 #if DEBUG_PROCESS_SIGACTION 713 cycle = (uint32_t)hal_get_cycles(); 714 if( DEBUG_PROCESS_SIGACTION < cycle ) 721 715 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 722 716 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle ); … … 1036 1030 vfs_bin_xp = hal_remote_lwd(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp)); 1037 1031 1038 // check parent process is the reference 1032 // check parent process is the reference process 1039 1033 ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); 1034 1035 printk("\n@@@ %s : parent_cxy = %x / parent_ptr = %x / ref_cxy = %x / ref_ptr = %x\n", 1036 __FUNCTION__, parent_process_cxy, parent_process_ptr, GET_CXY( ref_xp ), GET_PTR( ref_xp ) ); 1037 1040 1038 assert( (parent_process_xp == ref_xp ) , __FUNCTION__ , 1041 1039 "parent process must be the reference process\n" ); 1042 1040 1043 #if CONFIG_DEBUG_PROCESS_MAKE_FORK1041 #if DEBUG_PROCESS_MAKE_FORK 1044 1042 uint32_t cycle = (uint32_t)hal_get_cycles(); 1045 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1046 printk("\n[DBG] %s : thread %x enter for process %x / c ycle %d\n",1047 __FUNCTION__, CURRENT_THREAD, parent_pid, cycle );1043 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1044 printk("\n[DBG] %s : thread %x enter for process %x / cluster %x / cycle %d\n", 1045 __FUNCTION__, CURRENT_THREAD, parent_pid, local_cxy, cycle ); 1048 1046 #endif 1049 1047 … … 1073 1071 parent_process_xp ); 1074 1072 1075 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )1076 cycle = (uint32_t)hal_get_cycles(); 1077 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1073 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1074 cycle = (uint32_t)hal_get_cycles(); 1075 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1078 1076 printk("\n[DBG] %s : thread %x created child_process %x / child_pid %x / cycle %d\n", 1079 1077 __FUNCTION__, CURRENT_THREAD, process, new_pid, cycle ); … … 1092 1090 } 1093 1091 1094 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )1095 cycle = (uint32_t)hal_get_cycles(); 1096 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1092 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1093 cycle = (uint32_t)hal_get_cycles(); 1094 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1097 1095 printk("\n[DBG] %s : thread %x copied VMM from parent %x to child %x / cycle %d\n", 1098 1096 __FUNCTION__ , CURRENT_THREAD , parent_pid, new_pid, cycle ); … … 1115 1113 } 1116 1114 1117 // check main thread index 1118 assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1119 1120 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 ) 1121 cycle = (uint32_t)hal_get_cycles(); 1122 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1115 // check main thread LTID 1116 assert( (LTID_FROM_TRDID(thread->trdid) == 0) , __FUNCTION__ , 1117 "main thread must have LTID == 0\n" ); 1118 1119 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1120 cycle = (uint32_t)hal_get_cycles(); 1121 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1123 1122 printk("\n[DBG] %s : thread %x created child thread %x / cycle %d\n", 1124 1123 __FUNCTION__ , CURRENT_THREAD, thread, cycle ); … … 1140 1139 vmm_set_cow( process ); 1141 1140 1142 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )1143 cycle = (uint32_t)hal_get_cycles(); 1144 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1141 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1142 cycle = (uint32_t)hal_get_cycles(); 1143 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1145 1144 printk("\n[DBG] %s : thread %x set COW in parent and child / cycle %d\n", 1146 1145 __FUNCTION__ , CURRENT_THREAD, cycle ); … … 1162 1161 *child_pid = new_pid; 1163 1162 1164 #if CONFIG_DEBUG_PROCESS_MAKE_FORK1165 cycle = (uint32_t)hal_get_cycles(); 1166 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1163 #if DEBUG_PROCESS_MAKE_FORK 1164 cycle = (uint32_t)hal_get_cycles(); 1165 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1167 1166 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1168 1167 __FUNCTION__, CURRENT_THREAD, cycle ); … … 1205 1204 "must be called by the main thread\n" ); 1206 1205 1207 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC1206 #if DEBUG_PROCESS_MAKE_EXEC 1208 1207 uint32_t cycle = (uint32_t)hal_get_cycles(); 1209 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )1208 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1210 1209 printk("\n[DBG] %s : thread %x enters for process %x / %s / cycle %d\n", 1211 1210 __FUNCTION__, old_thread, pid, path, cycle ); … … 1244 1243 process_txt_set_ownership( XPTR( local_cxy , new_process) ); 1245 1244 1246 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )1247 cycle = (uint32_t)hal_get_cycles(); 1248 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )1245 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1246 cycle = (uint32_t)hal_get_cycles(); 1247 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1249 1248 printk("\n[DBG] %s : thread %x created new process %x / cycle %d \n", 1250 1249 __FUNCTION__ , old_thread , new_process , cycle ); … … 1261 1260 } 1262 1261 1263 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )1264 cycle = (uint32_t)hal_get_cycles(); 1265 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )1262 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1263 cycle = (uint32_t)hal_get_cycles(); 1264 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1266 1265 printk("\n[DBG] %s : thread %x registered code/data vsegs in new process %x / cycle %d\n", 1267 1266 __FUNCTION__, old_thread , new_process->pid , cycle ); … … 1290 1289 } 1291 1290 1292 // check main thread index 1293 assert( (new_thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1294 1295 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 ) 1296 cycle = (uint32_t)hal_get_cycles(); 1297 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1291 // check main thread LTID 1292 assert( (LTID_FROM_TRDID(new_thread->trdid) == 0) , __FUNCTION__ , 1293 "main thread must have LTID == 0\n" ); 1294 1295 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1296 cycle = (uint32_t)hal_get_cycles(); 1297 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1298 1298 printk("\n[DBG] %s : thread %x created new_process main thread %x / cycle %d\n", 1299 1299 __FUNCTION__ , old_thread , new_thread , cycle ); … … 1327 1327 hal_fence(); 1328 1328 1329 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC1330 cycle = (uint32_t)hal_get_cycles(); 1331 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )1329 #if DEBUG_PROCESS_MAKE_EXEC 1330 cycle = (uint32_t)hal_get_cycles(); 1331 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1332 1332 printk("\n[DBG] %s : old_thread %x blocked / new_thread %x activated / cycle %d\n", 1333 1333 __FUNCTION__ , old_thread , new_thread , cycle ); … … 1342 1342 { 1343 1343 1344 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE1344 #if DEBUG_PROCESS_ZERO_CREATE 1345 1345 uint32_t cycle = (uint32_t)hal_get_cycles(); 1346 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )1346 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1347 1347 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1348 1348 #endif … … 1370 1370 hal_fence(); 1371 1371 1372 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE1373 cycle = (uint32_t)hal_get_cycles(); 1374 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )1372 #if DEBUG_PROCESS_ZERO_CREATE 1373 cycle = (uint32_t)hal_get_cycles(); 1374 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1375 1375 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1376 1376 #endif … … 1388 1388 error_t error; 1389 1389 1390 #if CONFIG_DEBUG_PROCESS_INIT_CREATE1390 #if DEBUG_PROCESS_INIT_CREATE 1391 1391 uint32_t cycle = (uint32_t)hal_get_cycles(); 1392 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )1392 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1393 1393 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1394 1394 #endif … … 1468 1468 hal_fence(); 1469 1469 1470 #if CONFIG_DEBUG_PROCESS_INIT_CREATE1471 cycle = (uint32_t)hal_get_cycles(); 1472 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )1470 #if DEBUG_PROCESS_INIT_CREATE 1471 cycle = (uint32_t)hal_get_cycles(); 1472 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1473 1473 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1474 1474 #endif … … 1605 1605 xptr_t lock_xp; // extended pointer on list lock in chdev 1606 1606 1607 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1607 #if DEBUG_PROCESS_TXT_ATTACH 1608 1608 uint32_t cycle = (uint32_t)hal_get_cycles(); 1609 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1609 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1610 1610 printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d / cycle %d\n", 1611 1611 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle ); … … 1634 1634 remote_spinlock_unlock( lock_xp ); 1635 1635 1636 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1637 cycle = (uint32_t)hal_get_cycles(); 1638 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1636 #if DEBUG_PROCESS_TXT_ATTACH 1637 cycle = (uint32_t)hal_get_cycles(); 1638 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1639 1639 printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n", 1640 1640 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle ); … … 1664 1664 "process descriptor not in owner cluster" ); 1665 1665 1666 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1666 #if DEBUG_PROCESS_TXT_ATTACH 1667 1667 uint32_t cycle = (uint32_t)hal_get_cycles(); 1668 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1668 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1669 1669 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1670 1670 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); … … 1690 1690 remote_spinlock_unlock( lock_xp ); 1691 1691 1692 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )1693 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1692 #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) 1693 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1694 1694 { 1695 1695 xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root ); … … 1706 1706 #endif 1707 1707 1708 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1709 cycle = (uint32_t)hal_get_cycles(); 1710 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1708 #if DEBUG_PROCESS_TXT_ATTACH 1709 cycle = (uint32_t)hal_get_cycles(); 1710 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1711 1711 printk("\n[DBG] %s : thread %x exit / process %x detached from TXT / cycle %d\n", 1712 1712 __FUNCTION__, CURRENT_THREAD, process->pid, cycle ); … … 1737 1737 "process descriptor not in owner cluster\n" ); 1738 1738 1739 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1739 #if DEBUG_PROCESS_TXT_ATTACH 1740 1740 uint32_t cycle = (uint32_t)hal_get_cycles(); 1741 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1741 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1742 1742 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1743 1743 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); … … 1755 1755 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp ); 1756 1756 1757 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1758 cycle = (uint32_t)hal_get_cycles(); 1759 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1757 #if DEBUG_PROCESS_TXT_ATTACH 1758 cycle = (uint32_t)hal_get_cycles(); 1759 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1760 1760 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 1761 1761 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); … … 1794 1794 "process descriptor not in owner cluster\n" ); 1795 1795 1796 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1796 #if DEBUG_PROCESS_TXT_ATTACH 1797 1797 uint32_t cycle = (uint32_t)hal_get_cycles(); 1798 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1798 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1799 1799 printk("\n[DBG] %s : thread %x enter / process %x / pid %x / cycle %d\n", 1800 1800 __FUNCTION__, CURRENT_THREAD, process_ptr, process_pid, cycle ); … … 1813 1813 txt_id = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) ); 1814 1814 1815 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )1816 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1815 #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) 1816 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1817 1817 printk("\n[DBG] %s : file_ptr %x / txt_ptr %x / txt_id %d / owner_ptr = %x\n", 1818 1818 __FUNCTION__, GET_PTR(file_xp), txt_ptr, txt_id, GET_PTR(owner_xp) ); … … 1832 1832 { 1833 1833 1834 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )1835 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1834 #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) 1835 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1836 1836 printk("\n[DBG] %s : process is not the KSH process => search the KSH\n", __FUNCTION__ ); 1837 1837 #endif … … 1851 1851 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 1852 1852 1853 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1854 cycle = (uint32_t)hal_get_cycles(); 1855 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1853 #if DEBUG_PROCESS_TXT_ATTACH 1854 cycle = (uint32_t)hal_get_cycles(); 1855 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1856 1856 printk("\n[DBG] %s : thread %x exit / process %x to KSH process %x / cycle %d\n", 1857 1857 __FUNCTION__, CURRENT_THREAD, process_pid, … … 1873 1873 { 1874 1874 1875 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )1876 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1875 #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) 1876 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1877 1877 printk("\n[DBG] %s : process is the KSH process => search another\n", __FUNCTION__ ); 1878 1878 #endif … … 1893 1893 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 1894 1894 1895 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1896 cycle = (uint32_t)hal_get_cycles(); 1897 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1895 #if DEBUG_PROCESS_TXT_ATTACH 1896 cycle = (uint32_t)hal_get_cycles(); 1897 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1898 1898 printk("\n[DBG] %s : thread %x exit / KSH process %x to process %x / cycle %d\n", 1899 1899 __FUNCTION__, CURRENT_THREAD, process_pid, … … 1910 1910 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL ); 1911 1911 1912 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1913 cycle = (uint32_t)hal_get_cycles(); 1914 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1912 #if DEBUG_PROCESS_TXT_ATTACH 1913 cycle = (uint32_t)hal_get_cycles(); 1914 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1915 1915 printk("\n[DBG] %s : thread %x exit / KSH process %x to nobody / cycle %d\n", 1916 1916 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); … … 1922 1922 { 1923 1923 1924 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1925 cycle = (uint32_t)hal_get_cycles(); 1926 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1924 #if DEBUG_PROCESS_TXT_ATTACH 1925 cycle = (uint32_t)hal_get_cycles(); 1926 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1927 1927 printk("\n[DBG] %s : thread %x exit / process %x is not TXT owner / cycle %d\n", 1928 1928 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); -
trunk/kernel/kern/rpc.c
r437 r438 43 43 44 44 ///////////////////////////////////////////////////////////////////////////////////////// 45 // Debug macros for marshalling functions46 /////////////////////////////////////////////////////////////////////////////////////////47 48 #if CONFIG_DEBUG_RPC_MARSHALING49 50 #define RPC_DEBUG_ENTER \51 uint32_t cycle = (uint32_t)hal_get_cycles(); \52 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \53 printk("\n[DBG] %s : enter thread %x on core[%x,%d] / cycle %d\n", \54 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );55 56 #define RPC_DEBUG_EXIT \57 cycle = (uint32_t)hal_get_cycles(); \58 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \59 printk("\n[DBG] %s : exit thread %x on core[%x,%d] / cycle %d\n", \60 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );61 62 #else63 64 #define RPC_DEBUG_ENTER65 66 #define RPC_DEBUG_EXIT67 68 #endif69 70 /////////////////////////////////////////////////////////////////////////////////////////71 45 // array of function pointers (must be consistent with enum in rpc.h) 72 46 ///////////////////////////////////////////////////////////////////////////////////////// … … 122 96 rpc_desc_t * rpc ) 123 97 { 124 volatile error_t full = 0; 125 thread_t * this = CURRENT_THREAD; 126 core_t * core = this->core; 127 128 #if CONFIG_DEBUG_RPC_SEND 98 lid_t server_core_lid; 99 lid_t client_core_lid; 100 volatile error_t full; 101 thread_t * this; 102 cluster_t * cluster; 103 104 #if DEBUG_RPC_CLIENT_GENERIC 129 105 uint32_t cycle = (uint32_t)hal_get_cycles(); 130 if( CONFIG_DEBUG_RPC_SEND< cycle )106 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 131 107 printk("\n[DBG] %s : thread %x in cluster %x enter for rpc[%d] / rpc_ptr %x / cycle %d\n", 132 108 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, rpc, cycle ); 133 109 #endif 134 110 135 // register client thread pointer and core lid in RPC descriptor 111 full = 0; 112 this = CURRENT_THREAD; 113 cluster = LOCAL_CLUSTER; 114 client_core_lid = this->core->lid; 115 116 // select a server_core index: 117 // use client core index if possible / core 0 otherwise 118 if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) ) 119 { 120 server_core_lid = client_core_lid; 121 } 122 else 123 { 124 server_core_lid = 0; 125 } 126 127 // register client_thread pointer and client_core lid in RPC descriptor 136 128 rpc->thread = this; 137 rpc->lid = c ore->lid;138 139 // build anextended pointer on the RPC descriptor129 rpc->lid = client_core_lid; 130 131 // build extended pointer on the RPC descriptor 140 132 xptr_t desc_xp = XPTR( local_cxy , rpc ); 141 133 … … 160 152 hal_fence(); 161 153 162 // send IPI to the remote core corresponding to the clientcore163 dev_pic_send_ipi( server_cxy , core->lid );154 // send IPI to the selected server core 155 dev_pic_send_ipi( server_cxy , server_core_lid ); 164 156 165 157 // wait RPC completion before returning if blocking RPC … … 171 163 { 172 164 173 #if CONFIG_DEBUG_RPC_SEND174 cycle = (uint32_t)hal_get_cycles(); 175 if( CONFIG_DEBUG_RPC_SEND< cycle )165 #if DEBUG_RPC_CLIENT_GENERIC 166 cycle = (uint32_t)hal_get_cycles(); 167 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 176 168 printk("\n[DBG] %s : thread %x in cluster %x busy waiting / rpc[%d] / cycle %d\n", 177 169 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle ); 178 170 #endif 179 171 180 while( rpc->response ) hal_fixed_delay( 100 );172 while( rpc->responses ) hal_fixed_delay( 100 ); 181 173 182 #if CONFIG_DEBUG_RPC_SEND183 cycle = (uint32_t)hal_get_cycles(); 184 if( CONFIG_DEBUG_RPC_SEND< cycle )185 printk("\n[DBG] %s : thread % in cluster %x resume/ rpc[%d] / cycle %d\n",174 #if DEBUG_RPC_CLIENT_GENERIC 175 cycle = (uint32_t)hal_get_cycles(); 176 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 177 printk("\n[DBG] %s : thread %x in cluster %x resumes / rpc[%d] / cycle %d\n", 186 178 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle ); 187 179 #endif … … 190 182 { 191 183 192 #if CONFIG_DEBUG_RPC_SEND193 cycle = (uint32_t)hal_get_cycles(); 194 if( CONFIG_DEBUG_RPC_SEND< cycle )195 printk("\n[DBG] %s : thread %x in cluster %x deschedule/ rpc[%d] / cycle %d\n",184 #if DEBUG_RPC_CLIENT_GENERIC 185 cycle = (uint32_t)hal_get_cycles(); 186 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 187 printk("\n[DBG] %s : thread %x in cluster %x blocks & deschedules / rpc[%d] / cycle %d\n", 196 188 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle ); 197 189 #endif … … 199 191 sched_yield("blocked on RPC"); 200 192 201 #if CONFIG_DEBUG_RPC_SEND202 cycle = (uint32_t)hal_get_cycles(); 203 if( CONFIG_DEBUG_RPC_SEND< cycle )204 printk("\n[DBG] %s : thread % in cluster %x resume/ rpcr[%d] / cycle %d\n",193 #if DEBUG_RPC_CLIENT_GENERIC 194 cycle = (uint32_t)hal_get_cycles(); 195 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 196 printk("\n[DBG] %s : thread %x in cluster %x resumes / rpcr[%d] / cycle %d\n", 205 197 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle ); 206 198 #endif … … 208 200 209 201 // check response available 210 assert( (rpc->response == 0) , __FUNCTION__, "illegal RPC response\n" ); 211 212 // acknowledge the IPI sent by the server 213 dev_pic_ack_ipi(); 202 assert( (rpc->responses == 0) , __FUNCTION__, "illegal RPC response\n" ); 214 203 } 215 else 204 else // non blocking RPC 216 205 { 217 206 218 #if CONFIG_DEBUG_RPC_SEND219 cycle = (uint32_t)hal_get_cycles(); 220 if( CONFIG_DEBUG_RPC_SEND< cycle )207 #if DEBUG_RPC_CLIENT_GENERIC 208 cycle = (uint32_t)hal_get_cycles(); 209 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 221 210 printk("\n[DBG] %s : non blocking rpc[%d] => thread %x return / cycle %d\n", 222 211 __FUNCTION__, rpc->index, CURRENT_THREAD, cycle ); … … 244 233 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 245 234 246 #if CONFIG_DEBUG_RPC_SERVER235 #if DEBUG_RPC_SERVER_GENERIC 247 236 uint32_t cycle = (uint32_t)hal_get_cycles(); 248 if( CONFIG_DEBUG_RPC_SERVER< cycle )237 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 249 238 printk("\n[DBG] %s : thread %x interrupted in cluster %x / cycle %d\n", 250 239 __FUNCTION__, this, local_cxy, cycle ); … … 254 243 hal_disable_irq( &sr_save ); 255 244 256 // check RPC FIFO not empty and no RPC thread handling it245 // activate (or create) RPC thread if RPC FIFO not empty 257 246 if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) ) 258 247 { 259 // search one non blocked RPC thread 248 249 #if DEBUG_RPC_SERVER_GENERIC 250 cycle = (uint32_t)hal_get_cycles(); 251 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 252 printk("\n[DBG] %s : RPC FIFO non empty in cluster %x / cycle %d\n", 253 __FUNCTION__, local_cxy, cycle ); 254 #endif 255 256 // search one IDLE RPC thread 260 257 list_entry_t * iter; 261 258 LIST_FOREACH( &sched->k_root , iter ) 262 259 { 263 260 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 264 if( (thread->type == THREAD_RPC) && (thread->blocked == 0) )261 if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) ) 265 262 { 263 // unblock found RPC thread 264 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE ); 265 266 // exit loop 266 267 found = true; 267 268 break; … … 279 280 if( error ) 280 281 { 281 printk("\n[WARNING] in %s : no memory for new RPC thread in cluster %x\n",282 __FUNCTION__, local_cxy );282 assert( false , __FUNCTION__ , 283 "no memory to allocate a new RPC thread in cluster %x", local_cxy ); 283 284 } 284 else 285 { 286 // unblock created RPC thread 287 thread->blocked = 0; 288 289 // update core descriptor counter 290 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 291 292 #if CONFIG_DEBUG_RPC_SERVER 293 cycle = (uint32_t)hal_get_cycles(); 294 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 285 286 // unblock created RPC thread 287 thread->blocked = 0; 288 289 // update core descriptor counter 290 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 291 292 #if DEBUG_RPC_SERVER_GENERIC 293 cycle = (uint32_t)hal_get_cycles(); 294 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 295 295 printk("\n[DBG] %s : create a new RPC thread %x in cluster %x / cycle %d\n", 296 296 __FUNCTION__, thread, local_cxy, cycle ); 297 297 #endif 298 }299 298 } 300 299 } 301 300 302 #if CONFIG_DEBUG_RPC_SERVER303 cycle = (uint32_t)hal_get_cycles(); 304 if( CONFIG_DEBUG_RPC_SERVER< cycle )301 #if DEBUG_RPC_SERVER_GENERIC 302 cycle = (uint32_t)hal_get_cycles(); 303 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 305 304 printk("\n[DBG] %s : interrupted thread %x deschedules in cluster %x / cycle %d\n", 306 305 __FUNCTION__, this, local_cxy, cycle ); 307 306 #endif 308 307 309 // interrupted thread deschedule always308 // interrupted thread always deschedule 310 309 sched_yield("IPI received"); 311 310 312 #if CONFIG_DEBUG_RPC_SERVER313 cycle = (uint32_t)hal_get_cycles(); 314 if( CONFIG_DEBUG_RPC_SERVER< cycle )311 #if DEBUG_RPC_SERVER_GENERIC 312 cycle = (uint32_t)hal_get_cycles(); 313 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 315 314 printk("\n[DBG] %s : interrupted thread %x resumes in cluster %x / cycle %d\n", 316 315 __FUNCTION__, this, local_cxy, cycle ); … … 346 345 // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests 347 346 348 while(1) // externalloop347 while(1) // infinite loop 349 348 { 350 349 // try to take RPC_FIFO ownership … … 352 351 { 353 352 354 #if CONFIG_DEBUG_RPC_SERVER353 #if DEBUG_RPC_SERVER_GENERIC 355 354 uint32_t cycle = (uint32_t)hal_get_cycles(); 356 if( CONFIG_DEBUG_RPC_SERVER< cycle )355 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 357 356 printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n", 358 357 __FUNCTION__, this, local_cxy, cycle ); … … 360 359 // initializes RPC requests counter 361 360 count = 0; 362 363 // acknowledge local IPI364 dev_pic_ack_ipi();365 361 366 362 // exit internal loop in three cases: … … 381 377 blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) ); 382 378 383 #if CONFIG_DEBUG_RPC_SERVER384 cycle = (uint32_t)hal_get_cycles(); 385 if( CONFIG_DEBUG_RPC_SERVER< cycle )386 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_ ptr %x / cycle %d\n",387 __FUNCTION__, this, local_cxy, index, desc_ ptr, cycle);379 #if DEBUG_RPC_SERVER_GENERIC 380 cycle = (uint32_t)hal_get_cycles(); 381 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 382 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n", 383 __FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr ); 388 384 #endif 389 385 // call the relevant server function 390 386 rpc_server[index]( desc_xp ); 391 387 392 #if CONFIG_DEBUG_RPC_SERVER393 cycle = (uint32_t)hal_get_cycles(); 394 if( CONFIG_DEBUG_RPC_SERVER< cycle )388 #if DEBUG_RPC_SERVER_GENERIC 389 cycle = (uint32_t)hal_get_cycles(); 390 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 395 391 printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n", 396 __FUNCTION__, this, local_cxy, index, cycle );392 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle ); 397 393 #endif 398 394 // increment handled RPCs counter … … 403 399 { 404 400 // decrement responses counter in RPC descriptor 405 hal_remote_atomic_add(XPTR( desc_cxy, &desc_ptr->response ), -1); 401 hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 ); 402 403 // get client thread pointer and client core lid from RPC descriptor 404 thread_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); 405 core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) ); 406 406 407 407 // unblock client thread 408 thread_ptr = (thread_t *)hal_remote_lpt(XPTR(desc_cxy,&desc_ptr->thread)); 409 thread_unblock( XPTR(desc_cxy,thread_ptr) , THREAD_BLOCKED_RPC ); 408 thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC ); 410 409 411 410 hal_fence(); 412 411 413 // get client core lid and send IPI 414 core_lid = hal_remote_lw(XPTR(desc_cxy, &desc_ptr->lid)); 412 #if DEBUG_RPC_SERVER_GENERIC 413 cycle = (uint32_t)hal_get_cycles(); 414 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 415 printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n", 416 __FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle ); 417 #endif 418 // send IPI to client core 415 419 dev_pic_send_ipi( desc_cxy , core_lid ); 416 420 } … … 432 436 { 433 437 434 #if CONFIG_DEBUG_RPC_SERVER438 #if DEBUG_RPC_SERVER_GENERIC 435 439 uint32_t cycle = (uint32_t)hal_get_cycles(); 436 if( CONFIG_DEBUG_RPC_SERVER< cycle )440 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 437 441 printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n", 438 442 __FUNCTION__, this, local_cxy, cycle ); … … 447 451 } 448 452 449 #if CONFIG_DEBUG_RPC_SERVER453 #if DEBUG_RPC_SERVER_GENERIC 450 454 uint32_t cycle = (uint32_t)hal_get_cycles(); 451 if( CONFIG_DEBUG_RPC_SERVER< cycle )455 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 452 456 printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n", 453 457 __FUNCTION__, this, local_cxy, cycle ); 454 458 #endif 455 459 456 // deschedule without blocking 460 // Block and deschedule 461 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE ); 457 462 sched_yield("RPC fifo empty or too much work"); 458 463 459 #if CONFIG_DEBUG_RPC_SERVER460 cycle = (uint32_t)hal_get_cycles(); 461 if( CONFIG_DEBUG_RPC_SERVER< cycle )464 #if DEBUG_RPC_SERVER_GENERIC 465 cycle = (uint32_t)hal_get_cycles(); 466 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 462 467 printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n", 463 468 __FUNCTION__, this, local_cxy, cycle ); 464 469 #endif 465 470 466 } // end externalloop471 } // end infinite loop 467 472 468 473 } // end rpc_thread_func() … … 478 483 page_t ** page ) // out 479 484 { 480 481 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 482 483 // initialise RPC descriptor header 484 rpc_desc_t rpc; 485 rpc.index = RPC_PMEM_GET_PAGES; 486 rpc.response = 1; 487 rpc.blocking = true; 485 #if DEBUG_RPC_PMEM_GET_PAGES 486 uint32_t cycle = (uint32_t)hal_get_cycles(); 487 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 488 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 489 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 490 #endif 491 492 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 493 494 // initialise RPC descriptor header 495 rpc_desc_t rpc; 496 rpc.index = RPC_PMEM_GET_PAGES; 497 rpc.blocking = true; 498 rpc.responses = 1; 488 499 489 500 // set input arguments in RPC descriptor … … 496 507 *page = (page_t *)(intptr_t)rpc.args[1]; 497 508 509 #if DEBUG_RPC_PMEM_GET_PAGES 510 cycle = (uint32_t)hal_get_cycles(); 511 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 512 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 513 __FUNCTION__ , CURRENT_THREAD , cycle ); 514 #endif 498 515 } 499 516 … … 501 518 void rpc_pmem_get_pages_server( xptr_t xp ) 502 519 { 520 #if DEBUG_RPC_PMEM_GET_PAGES 521 uint32_t cycle = (uint32_t)hal_get_cycles(); 522 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 523 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 524 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 525 #endif 503 526 504 527 // get client cluster identifier and pointer on RPC descriptor … … 515 538 hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 516 539 540 #if DEBUG_RPC_PMEM_GET_PAGES 541 cycle = (uint32_t)hal_get_cycles(); 542 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 543 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 544 __FUNCTION__ , CURRENT_THREAD , cycle ); 545 #endif 517 546 } 518 547 … … 525 554 page_t * page ) // out 526 555 { 556 #if DEBUG_RPC_PMEM_RELEASE_PAGES 557 uint32_t cycle = (uint32_t)hal_get_cycles(); 558 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 559 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 560 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 561 #endif 527 562 528 563 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 531 566 rpc_desc_t rpc; 532 567 rpc.index = RPC_PMEM_RELEASE_PAGES; 533 rpc. response = 1;534 rpc. blocking = true;568 rpc.blocking = true; 569 rpc.responses = 1; 535 570 536 571 // set input arguments in RPC descriptor … … 540 575 rpc_send( cxy , &rpc ); 541 576 577 #if DEBUG_RPC_PMEM_RELEASE_PAGES 578 cycle = (uint32_t)hal_get_cycles(); 579 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 580 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 581 __FUNCTION__ , CURRENT_THREAD , cycle ); 582 #endif 542 583 } 543 584 … … 545 586 void rpc_pmem_release_pages_server( xptr_t xp ) 546 587 { 588 #if DEBUG_RPC_PMEM_RELEASE_PAGES 589 uint32_t cycle = (uint32_t)hal_get_cycles(); 590 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 591 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 592 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 593 #endif 547 594 548 595 // get client cluster identifier and pointer on RPC descriptor … … 559 606 kmem_free( &req ); 560 607 608 #if DEBUG_RPC_PMEM_RELEASE_PAGES 609 cycle = (uint32_t)hal_get_cycles(); 610 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 611 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 612 __FUNCTION__ , CURRENT_THREAD , cycle ); 613 #endif 561 614 } 562 615 … … 577 630 error_t * error ) // out 578 631 { 632 #if DEBUG_RPC_PROCESS_MAKE_FORK 633 uint32_t cycle = (uint32_t)hal_get_cycles(); 634 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 635 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 636 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 637 #endif 638 579 639 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 580 640 … … 582 642 rpc_desc_t rpc; 583 643 rpc.index = RPC_PROCESS_MAKE_FORK; 584 rpc. response = 1;585 rpc. blocking = true;644 rpc.blocking = true; 645 rpc.responses = 1; 586 646 587 647 // set input arguments in RPC descriptor … … 597 657 *error = (error_t)rpc.args[4]; 598 658 659 #if DEBUG_RPC_PROCESS_MAKE_FORK 660 cycle = (uint32_t)hal_get_cycles(); 661 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 662 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 663 __FUNCTION__ , CURRENT_THREAD , cycle ); 664 #endif 599 665 } 600 666 … … 602 668 void rpc_process_make_fork_server( xptr_t xp ) 603 669 { 670 #if DEBUG_RPC_PROCESS_MAKE_FORK 671 uint32_t cycle = (uint32_t)hal_get_cycles(); 672 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 673 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 674 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 675 #endif 604 676 605 677 xptr_t ref_process_xp; // extended pointer on reference parent process … … 628 700 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 629 701 702 #if DEBUG_RPC_PROCESS_MAKE_FORK 703 cycle = (uint32_t)hal_get_cycles(); 704 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 705 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 706 __FUNCTION__ , CURRENT_THREAD , cycle ); 707 #endif 630 708 } 631 709 … … 656 734 rpc_desc_t rpc; 657 735 rpc.index = RPC_THREAD_USER_CREATE; 658 rpc. response = 1;659 rpc. blocking = true;736 rpc.blocking = true; 737 rpc.responses = 1; 660 738 661 739 // set input arguments in RPC descriptor … … 690 768 // get client cluster identifier and pointer on RPC descriptor 691 769 cxy_t client_cxy = GET_CXY( xp ); 692 rpc_desc_t * desc = GET_PTR( xp );770 rpc_desc_t * desc = GET_PTR( xp ); 693 771 694 772 // get pointer on attributes structure in client cluster from RPC descriptor … … 736 814 rpc_desc_t rpc; 737 815 rpc.index = RPC_THREAD_KERNEL_CREATE; 738 rpc. response = 1;739 rpc. blocking = true;816 rpc.blocking = true; 817 rpc.responses = 1; 740 818 741 819 // set input arguments in RPC descriptor … … 763 841 // get client cluster identifier and pointer on RPC descriptor 764 842 cxy_t client_cxy = GET_CXY( xp ); 765 rpc_desc_t * desc = GET_PTR( xp );843 rpc_desc_t * desc = GET_PTR( xp ); 766 844 767 845 // get attributes from RPC descriptor … … 797 875 { 798 876 799 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)877 #if DEBUG_RPC_PROCESS_SIGACTION 800 878 uint32_t cycle = (uint32_t)hal_get_cycles(); 801 879 uint32_t action = rpc->args[0]; 802 880 pid_t pid = rpc->args[1]; 803 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )881 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 804 882 printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n", 805 883 __FUNCTION__ , process_action_str( action ) , pid , cxy , cycle ); … … 813 891 rpc_send( cxy , rpc ); 814 892 815 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)816 cycle = (uint32_t)hal_get_cycles(); 817 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )893 #if DEBUG_RPC_PROCESS_SIGACTION 894 cycle = (uint32_t)hal_get_cycles(); 895 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 818 896 printk("\n[DBG] %s : exit after requesting to %s process %x in cluster %x / cycle %d\n", 819 897 __FUNCTION__ , process_action_str( action ) , pid , cxy , cycle ); … … 842 920 pid = (pid_t) hal_remote_lwd( XPTR(client_cxy , &rpc->args[1]) ); 843 921 844 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)922 #if DEBUG_RPC_PROCESS_SIGACTION 845 923 uint32_t cycle = (uint32_t)hal_get_cycles(); 846 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )924 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 847 925 printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n", 848 926 __FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle ); … … 858 936 859 937 // build extended pointer on response counter in RPC 860 count_xp = XPTR( client_cxy , &rpc->response );938 count_xp = XPTR( client_cxy , &rpc->responses ); 861 939 862 940 // decrement the responses counter in RPC descriptor, … … 872 950 } 873 951 874 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)875 cycle = (uint32_t)hal_get_cycles(); 876 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )952 #if DEBUG_RPC_PROCESS_SIGACTION 953 cycle = (uint32_t)hal_get_cycles(); 954 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 877 955 printk("\n[DBG] %s : exit after %s process %x in cluster %x / cycle %d\n", 878 956 __FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle ); … … 903 981 rpc_desc_t rpc; 904 982 rpc.index = RPC_VFS_INODE_CREATE; 905 rpc. response = 1;906 rpc. blocking = true;983 rpc.blocking = true; 984 rpc.responses = 1; 907 985 908 986 // set input arguments in RPC descriptor … … 983 1061 rpc_desc_t rpc; 984 1062 rpc.index = RPC_VFS_INODE_DESTROY; 985 rpc. response = 1;986 rpc. blocking = true;1063 rpc.blocking = true; 1064 rpc.responses = 1; 987 1065 988 1066 // set input arguments in RPC descriptor … … 1023 1101 error_t * error ) // out 1024 1102 { 1025 RPC_DEBUG_ENTER 1103 #if DEBUG_RPC_VFS_DENTRY_CREATE 1104 uint32_t cycle = (uint32_t)hal_get_cycles(); 1105 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1106 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1107 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1108 #endif 1026 1109 1027 1110 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 1030 1113 rpc_desc_t rpc; 1031 1114 rpc.index = RPC_VFS_DENTRY_CREATE; 1032 rpc. response = 1;1033 rpc. blocking = true;1115 rpc.blocking = true; 1116 rpc.responses = 1; 1034 1117 1035 1118 // set input arguments in RPC descriptor … … 1045 1128 *error = (error_t)rpc.args[4]; 1046 1129 1047 RPC_DEBUG_EXIT 1130 #if DEBUG_RPC_VFS_DENTRY_CREATE 1131 cycle = (uint32_t)hal_get_cycles(); 1132 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1133 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1134 __FUNCTION__ , CURRENT_THREAD , cycle ); 1135 #endif 1048 1136 } 1049 1137 … … 1051 1139 void rpc_vfs_dentry_create_server( xptr_t xp ) 1052 1140 { 1141 #if DEBUG_RPC_VFS_DENTRY_CREATE 1142 uint32_t cycle = (uint32_t)hal_get_cycles(); 1143 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1144 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1145 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1146 #endif 1147 1053 1148 uint32_t type; 1054 1149 char * name; … … 1056 1151 xptr_t dentry_xp; 1057 1152 error_t error; 1058 1059 RPC_DEBUG_ENTER1060 1061 1153 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 1062 1154 … … 1083 1175 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1084 1176 1085 RPC_DEBUG_EXIT 1177 #if DEBUG_RPC_VFS_DENTRY_CREATE 1178 cycle = (uint32_t)hal_get_cycles(); 1179 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1180 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1181 __FUNCTION__ , CURRENT_THREAD , cycle ); 1182 #endif 1086 1183 } 1087 1184 … … 1100 1197 rpc_desc_t rpc; 1101 1198 rpc.index = RPC_VFS_DENTRY_DESTROY; 1102 rpc. response = 1;1103 rpc. blocking = true;1199 rpc.blocking = true; 1200 rpc.responses = 1; 1104 1201 1105 1202 // set input arguments in RPC descriptor … … 1140 1237 error_t * error ) // out 1141 1238 { 1239 #if DEBUG_RPC_VFS_FILE_CREATE 1240 uint32_t cycle = (uint32_t)hal_get_cycles(); 1241 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1242 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1243 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1244 #endif 1245 1142 1246 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1143 1247 … … 1145 1249 rpc_desc_t rpc; 1146 1250 rpc.index = RPC_VFS_FILE_CREATE; 1147 rpc. response = 1;1148 rpc. blocking = true;1251 rpc.blocking = true; 1252 rpc.responses = 1; 1149 1253 1150 1254 // set input arguments in RPC descriptor … … 1159 1263 *error = (error_t)rpc.args[3]; 1160 1264 1265 #if DEBUG_RPC_VFS_FILE_CREATE 1266 cycle = (uint32_t)hal_get_cycles(); 1267 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1268 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1269 __FUNCTION__ , CURRENT_THREAD , cycle ); 1270 #endif 1161 1271 } 1162 1272 … … 1164 1274 void rpc_vfs_file_create_server( xptr_t xp ) 1165 1275 { 1276 #if DEBUG_RPC_VFS_FILE_CREATE 1277 uint32_t cycle = (uint32_t)hal_get_cycles(); 1278 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1279 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1280 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1281 #endif 1282 1166 1283 uint32_t file_attr; 1167 1284 vfs_inode_t * inode; … … 1186 1303 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1187 1304 1305 #if DEBUG_RPC_VFS_FILE_CREATE 1306 cycle = (uint32_t)hal_get_cycles(); 1307 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1308 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1309 __FUNCTION__ , CURRENT_THREAD , cycle ); 1310 #endif 1188 1311 } 1189 1312 … … 1201 1324 rpc_desc_t rpc; 1202 1325 rpc.index = RPC_VFS_FILE_DESTROY; 1203 rpc. response = 1;1204 rpc. blocking = true;1326 rpc.blocking = true; 1327 rpc.responses = 1; 1205 1328 1206 1329 // set input arguments in RPC descriptor … … 1245 1368 rpc_desc_t rpc; 1246 1369 rpc.index = RPC_VFS_INODE_LOAD; 1247 rpc. response = 1;1248 rpc. blocking = true;1370 rpc.blocking = true; 1371 rpc.responses = 1; 1249 1372 1250 1373 // set input arguments in RPC descriptor … … 1306 1429 rpc_desc_t rpc; 1307 1430 rpc.index = RPC_VFS_MAPPER_LOAD_ALL; 1308 rpc. response = 1;1309 rpc. blocking = true;1431 rpc.blocking = true; 1432 rpc.responses = 1; 1310 1433 1311 1434 // set input arguments in RPC descriptor … … 1358 1481 rpc_desc_t rpc; 1359 1482 rpc.index = RPC_FATFS_GET_CLUSTER; 1360 rpc. response = 1;1361 rpc. blocking = true;1483 rpc.blocking = true; 1484 rpc.responses = 1; 1362 1485 1363 1486 // set input arguments in RPC descriptor … … 1386 1509 // get client cluster identifier and pointer on RPC descriptor 1387 1510 cxy_t client_cxy = GET_CXY( xp ); 1388 rpc_desc_t * desc = GET_PTR( xp );1511 rpc_desc_t * desc = GET_PTR( xp ); 1389 1512 1390 1513 // get input arguments … … 1418 1541 rpc_desc_t rpc; 1419 1542 rpc.index = RPC_VMM_GET_VSEG; 1420 rpc. response = 1;1421 rpc. blocking = true;1543 rpc.blocking = true; 1544 rpc.responses = 1; 1422 1545 1423 1546 // set input arguments in RPC descriptor … … 1480 1603 rpc_desc_t rpc; 1481 1604 rpc.index = RPC_VMM_GET_PTE; 1482 rpc. response = 1;1483 rpc. blocking = true;1605 rpc.blocking = true; 1606 rpc.responses = 1; 1484 1607 1485 1608 // set input arguments in RPC descriptor … … 1541 1664 rpc_desc_t rpc; 1542 1665 rpc.index = RPC_THREAD_USER_CREATE; 1543 rpc. response = 1;1544 rpc. blocking = true;1666 rpc.blocking = true; 1667 rpc.responses = 1; 1545 1668 1546 1669 // set input arguments in RPC descriptor … … 1560 1683 // get client cluster identifier and pointer on RPC descriptor 1561 1684 cxy_t client_cxy = GET_CXY( xp ); 1562 rpc_desc_t * desc = GET_PTR( xp );1685 rpc_desc_t * desc = GET_PTR( xp ); 1563 1686 1564 1687 // get input argument "kmem_type" from client RPC descriptor … … 1591 1714 rpc_desc_t rpc; 1592 1715 rpc.index = RPC_THREAD_USER_CREATE; 1593 rpc. response = 1;1594 rpc. blocking = true;1716 rpc.blocking = true; 1717 rpc.responses = 1; 1595 1718 1596 1719 // set input arguments in RPC descriptor … … 1608 1731 // get client cluster identifier and pointer on RPC descriptor 1609 1732 cxy_t client_cxy = GET_CXY( xp ); 1610 rpc_desc_t * desc = GET_PTR( xp );1733 rpc_desc_t * desc = GET_PTR( xp ); 1611 1734 1612 1735 // get input arguments "buf" and "kmem_type" from client RPC descriptor … … 1641 1764 rpc_desc_t rpc; 1642 1765 rpc.index = RPC_MAPPER_MOVE_BUFFER; 1643 rpc. response = 1;1644 rpc. blocking = true;1766 rpc.blocking = true; 1767 rpc.responses = 1; 1645 1768 1646 1769 // set input arguments in RPC descriptor … … 1725 1848 rpc_desc_t rpc; 1726 1849 rpc.index = RPC_MAPPER_GET_PAGE; 1727 rpc. response = 1;1728 rpc. blocking = true;1850 rpc.blocking = true; 1851 rpc.responses = 1; 1729 1852 1730 1853 // set input arguments in RPC descriptor … … 1780 1903 rpc_desc_t rpc; 1781 1904 rpc.index = RPC_VMM_CREATE_VSEG; 1782 rpc. response = 1;1783 rpc. blocking = true;1905 rpc.blocking = true; 1906 rpc.responses = 1; 1784 1907 1785 1908 // set input arguments in RPC descriptor … … 1846 1969 rpc_desc_t rpc; 1847 1970 rpc.index = RPC_SCHED_DISPLAY; 1848 rpc. response = 1;1849 rpc. blocking = true;1971 rpc.blocking = true; 1972 rpc.responses = 1; 1850 1973 1851 1974 // set input arguments in RPC descriptor … … 1885 2008 rpc_desc_t rpc; 1886 2009 rpc.index = RPC_VMM_SET_COW; 1887 rpc. response = 1;1888 rpc. blocking = true;2010 rpc.blocking = true; 2011 rpc.responses = 1; 1889 2012 1890 2013 // set input arguments in RPC descriptor … … 1927 2050 rpc_desc_t rpc; 1928 2051 rpc.index = RPC_VMM_DISPLAY; 1929 rpc. response = 1;1930 rpc. blocking = true;2052 rpc.blocking = true; 2053 rpc.responses = 1; 1931 2054 1932 2055 // set input arguments in RPC descriptor -
trunk/kernel/kern/rpc.h
r437 r438 111 111 { 112 112 rpc_index_t index; /*! index of requested RPC service */ 113 volatile uint32_t response ; /*! all responses received when 0*/113 volatile uint32_t responses; /*! number of expected responses */ 114 114 struct thread_s * thread; /*! local pointer on client thread */ 115 115 uint32_t lid; /*! index of core running the calling thread */ … … 150 150 151 151 /*********************************************************************************** 152 * This function is the entry point for RPC handling on the server side.153 * It is executed by a core receiving an IPI, and each time the core enters,154 * or exit the kernel to handle.155 * It does nothing and return if the RPC_FIFO is empty.156 * The calling thread checks if it exist at least one non-blocked RPC thread,157 * creates a new RPC if required, and deschedule to allowthe RPC thead to execute.152 * This function is the entry point for RPC handling on the server cluster. 153 * It is executed by the core receiving the IPI sent by the client thread. 154 * - If the RPC FIFO is empty, it deschedules. 155 * - If the RPC FIFO is not empty, it checks if it exist a non-blocked RPC thread 156 * in the cluster, creates a new one if required, and deschedule to allow 157 * the RPC thead to execute. 158 158 **********************************************************************************/ 159 159 void rpc_check(); -
trunk/kernel/kern/scheduler.c
r437 r438 125 125 thread = LIST_ELEMENT( current , thread_t , sched_list ); 126 126 127 // analyse kernel thread type 128 switch( thread->type ) 127 // execute RPC thread if non blocked 128 if( (thread->blocked == 0) && 129 (thread->type == THREAD_RPC) ) 129 130 { 130 case THREAD_RPC: // if non blocked and RPC FIFO non-empty 131 if( (thread->blocked == 0) && 132 (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) ) 133 { 134 spinlock_unlock( &sched->lock ); 135 return thread; 136 } 137 break; 138 139 case THREAD_DEV: // if non blocked and waiting queue non empty 140 if( (thread->blocked == 0) && 141 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 142 { 143 spinlock_unlock( &sched->lock ); 144 return thread; 145 } 146 break; 147 148 default: 149 break; 131 spinlock_unlock( &sched->lock ); 132 return thread; 133 } 134 135 // execute DEV thread if non blocked and waiting queue non empty 136 if( (thread->blocked == 0) && 137 (thread->type == THREAD_DEV) && 138 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 139 { 140 spinlock_unlock( &sched->lock ); 141 return thread; 150 142 } 151 143 } // end loop on kernel threads … … 174 166 thread = LIST_ELEMENT( current , thread_t , sched_list ); 175 167 176 // return thread if runnable168 // return thread if non blocked 177 169 if( thread->blocked == 0 ) 178 170 { … … 227 219 process = thread->process; 228 220 229 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS221 #if DEBUG_SCHED_HANDLE_SIGNALS 230 222 uint32_t cycle = (uint32_t)hal_get_cycles(); 231 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )223 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 232 224 printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n", 233 225 __FUNCTION__ , thread , process->pid , cycle ); … … 250 242 thread_destroy( thread ); 251 243 252 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS244 #if DEBUG_SCHED_HANDLE_SIGNALS 253 245 cycle = (uint32_t)hal_get_cycles(); 254 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )246 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 255 247 printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n", 256 248 __FUNCTION__ , thread , process->pid , cycle ); … … 262 254 process_destroy( process ); 263 255 264 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS256 #if DEBUG_SCHED_HANDLE_SIGNALS 265 257 cycle = (uint32_t)hal_get_cycles(); 266 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )258 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 267 259 printk("\n[DBG] %s : process %x has been deleted / cycle %d\n", 268 260 __FUNCTION__ , process->pid , cycle ); … … 287 279 scheduler_t * sched = &core->scheduler; 288 280 289 #if ( CONFIG_DEBUG_SCHED_YIELD & 0x1)290 if( CONFIG_DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() )281 #if (DEBUG_SCHED_YIELD & 0x1) 282 if( DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() ) 291 283 sched_display( core->lid ); 292 284 #endif … … 322 314 { 323 315 324 #if CONFIG_DEBUG_SCHED_YIELD316 #if DEBUG_SCHED_YIELD 325 317 uint32_t cycle = (uint32_t)hal_get_cycles(); 326 if( CONFIG_DEBUG_SCHED_YIELD < cycle )318 if( DEBUG_SCHED_YIELD < cycle ) 327 319 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 328 320 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", … … 350 342 { 351 343 352 #if ( CONFIG_DEBUG_SCHED_YIELD & 1)344 #if (DEBUG_SCHED_YIELD & 1) 353 345 uint32_t cycle = (uint32_t)hal_get_cycles(); 354 if( CONFIG_DEBUG_SCHED_YIELD < cycle )346 if( DEBUG_SCHED_YIELD < cycle ) 355 347 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 356 348 " thread %x (%s) (%x,%x) continue / cycle %d\n", -
trunk/kernel/kern/thread.c
r436 r438 112 112 ///////////////////////////////////////////////////////////////////////////////////// 113 113 // This static function initializes a thread descriptor (kernel or user). 114 // It can be called by the threefunctions:114 // It can be called by the four functions: 115 115 // - thread_user_create() 116 116 // - thread_user_fork() 117 117 // - thread_kernel_create() 118 // - thread_idle_init() 119 // It updates the local DQDT. 118 120 ///////////////////////////////////////////////////////////////////////////////////// 119 121 // @ thread : pointer on thread descriptor … … 202 204 thread->save_sr = 0xFF13; 203 205 204 // update local DQDT205 dqdt_local_update_threads( 1 );206 207 206 // register new thread in core scheduler 208 207 sched_register_thread( thread->core , thread ); 208 209 // update DQDT 210 dqdt_update_threads( 1 ); 209 211 210 212 return 0; … … 227 229 assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" ); 228 230 229 #if CONFIG_DEBUG_THREAD_USER_CREATE231 #if DEBUG_THREAD_USER_CREATE 230 232 uint32_t cycle = (uint32_t)hal_get_cycles(); 231 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )233 if( DEBUG_THREAD_USER_CREATE < cycle ) 232 234 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 233 235 __FUNCTION__, CURRENT_THREAD, pid , cycle ); … … 326 328 } 327 329 328 // update DQDT for new thread 329 dqdt_local_update_threads( 1 ); 330 331 #if CONFIG_DEBUG_THREAD_USER_CREATE 330 #if DEBUG_THREAD_USER_CREATE 332 331 cycle = (uint32_t)hal_get_cycles(); 333 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )332 if( DEBUG_THREAD_USER_CREATE < cycle ) 334 333 printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n", 335 334 __FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle ); … … 366 365 vseg_t * vseg; // child thread STACK vseg 367 366 368 #if CONFIG_DEBUG_THREAD_USER_FORK367 #if DEBUG_THREAD_USER_FORK 369 368 uint32_t cycle = (uint32_t)hal_get_cycles(); 370 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )369 if( DEBUG_THREAD_USER_FORK < cycle ) 371 370 printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n", 372 371 __FUNCTION__, CURRENT_THREAD, child_process->pid, cycle ); … … 493 492 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 494 493 495 #if ( CONFIG_DEBUG_THREAD_USER_FORK & 1)494 #if (DEBUG_THREAD_USER_FORK & 1) 496 495 cycle = (uint32_t)hal_get_cycles(); 497 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )496 if( DEBUG_THREAD_USER_FORK < cycle ) 498 497 printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n", 499 498 __FUNCTION__, CURRENT_THREAD, vpn ); … … 508 507 vpn_size ); 509 508 510 // update DQDT for child thread 511 dqdt_local_update_threads( 1 ); 512 513 #if CONFIG_DEBUG_THREAD_USER_FORK 509 #if DEBUG_THREAD_USER_FORK 514 510 cycle = (uint32_t)hal_get_cycles(); 515 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )511 if( DEBUG_THREAD_USER_FORK < cycle ) 516 512 printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n", 517 513 __FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle ); … … 538 534 __FUNCTION__ , "illegal core_lid" ); 539 535 540 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE536 #if DEBUG_THREAD_KERNEL_CREATE 541 537 uint32_t cycle = (uint32_t)hal_get_cycles(); 542 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )538 if( DEBUG_THREAD_KERNEL_CREATE < cycle ) 543 539 printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n", 544 540 __FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle ); … … 568 564 hal_cpu_context_create( thread ); 569 565 570 // update DQDT for kernel thread 571 dqdt_local_update_threads( 1 ); 572 573 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE 566 #if DEBUG_THREAD_KERNEL_CREATE 574 567 cycle = (uint32_t)hal_get_cycles(); 575 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )568 if( DEBUG_THREAD_KERNEL_CREATE < cycle ) 576 569 printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n", 577 570 __FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle ); … … 583 576 } // end thread_kernel_create() 584 577 585 ///////////////////////////////////////////////// //586 error_t thread_ kernel_init( thread_t * thread,587 588 589 590 578 ///////////////////////////////////////////////// 579 error_t thread_idle_init( thread_t * thread, 580 thread_type_t type, 581 void * func, 582 void * args, 583 lid_t core_lid ) 591 584 { 592 585 assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" ); … … 607 600 return error; 608 601 609 } // end thread_ kernel_init()602 } // end thread_idle_init() 610 603 611 604 /////////////////////////////////////////////////////////////////////////////////////// … … 620 613 core_t * core = thread->core; 621 614 622 #if CONFIG_DEBUG_THREAD_DESTROY615 #if DEBUG_THREAD_DESTROY 623 616 uint32_t cycle = (uint32_t)hal_get_cycles(); 624 if( CONFIG_DEBUG_THREAD_DESTROY < cycle )617 if( DEBUG_THREAD_DESTROY < cycle ) 625 618 printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n", 626 619 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); … … 652 645 process_remove_thread( thread ); 653 646 654 // update localDQDT655 dqdt_ local_update_threads( -1 );647 // update DQDT 648 dqdt_update_threads( -1 ); 656 649 657 650 // invalidate thread descriptor … … 661 654 thread_release( thread ); 662 655 663 #if CONFIG_DEBUG_THREAD_DESTROY656 #if DEBUG_THREAD_DESTROY 664 657 cycle = (uint32_t)hal_get_cycles(); 665 if( CONFIG_DEBUG_THREAD_DESTROY < cycle )658 if( DEBUG_THREAD_DESTROY < cycle ) 666 659 printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n", 667 660 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); … … 811 804 hal_fence(); 812 805 813 #if CONFIG_DEBUG_THREAD_BLOCK806 #if DEBUG_THREAD_BLOCK 814 807 uint32_t cycle = (uint32_t)hal_get_cycles(); 815 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )808 if( DEBUG_THREAD_BLOCK < cycle ) 816 809 printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n", 817 810 __FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle ); 818 811 #endif 819 812 820 #if ( CONFIG_DEBUG_THREAD_BLOCK & 1)821 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )813 #if (DEBUG_THREAD_BLOCK & 1) 814 if( DEBUG_THREAD_BLOCK < cycle ) 822 815 sched_display( ptr->core->lid ); 823 816 #endif … … 837 830 hal_fence(); 838 831 839 #if CONFIG_DEBUG_THREAD_BLOCK832 #if DEBUG_THREAD_BLOCK 840 833 uint32_t cycle = (uint32_t)hal_get_cycles(); 841 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )834 if( DEBUG_THREAD_BLOCK < cycle ) 842 835 printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n", 843 836 __FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle ); 844 837 #endif 845 838 846 #if ( CONFIG_DEBUG_THREAD_BLOCK & 1)847 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )839 #if (DEBUG_THREAD_BLOCK & 1) 840 if( DEBUG_THREAD_BLOCK < cycle ) 848 841 sched_display( ptr->core->lid ); 849 842 #endif … … 890 883 killer_xp = XPTR( local_cxy , killer_ptr ); 891 884 892 #if CONFIG_DEBUG_THREAD_KILL885 #if DEBUG_THREAD_KILL 893 886 uint32_t cycle = (uint32_t)hal_get_cycles; 894 if( CONFIG_DEBUG_THREAD_KILL < cycle )887 if( DEBUG_THREAD_KILL < cycle ) 895 888 printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n", 896 889 __FUNCTION__, killer_ptr, target_ptr, cycle ); … … 989 982 else hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL ); 990 983 991 #if CONFIG_DEBUG_THREAD_KILL984 #if DEBUG_THREAD_KILL 992 985 cycle = (uint32_t)hal_get_cycles; 993 if( CONFIG_DEBUG_THREAD_KILL < cycle )986 if( DEBUG_THREAD_KILL < cycle ) 994 987 printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n", 995 988 __FUNCTION__, killer_ptr, target_ptr, cycle ); … … 1002 995 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1003 996 1004 #if CONFIG_DEBUG_THREAD_KILL997 #if DEBUG_THREAD_KILL 1005 998 cycle = (uint32_t)hal_get_cycles; 1006 if( CONFIG_DEBUG_THREAD_KILL < cycle )999 if( DEBUG_THREAD_KILL < cycle ) 1007 1000 printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n", 1008 1001 __FUNCTION__, killer_ptr, target_ptr, cycle ); … … 1024 1017 { 1025 1018 1026 #if CONFIG_DEBUG_THREAD_IDLE1019 #if DEBUG_THREAD_IDLE 1027 1020 uint32_t cycle = (uint32_t)hal_get_cycles; 1028 1021 thread_t * this = CURRENT_THREAD; 1029 if( CONFIG_DEBUG_THREAD_IDLE < cycle )1022 if( DEBUG_THREAD_IDLE < cycle ) 1030 1023 printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n", 1031 1024 __FUNCTION__, this, local_cxy, this->core->lid, cycle ); … … 1034 1027 hal_core_sleep(); 1035 1028 1036 #if CONFIG_DEBUG_THREAD_IDLE1029 #if DEBUG_THREAD_IDLE 1037 1030 cycle = (uint32_t)hal_get_cycles; 1038 if( CONFIG_DEBUG_THREAD_IDLE < cycle )1031 if( DEBUG_THREAD_IDLE < cycle ) 1039 1032 printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n", 1040 1033 __FUNCTION__, this, local_cxy, this->core->lid, cycle ); -
trunk/kernel/kern/thread.h
r437 r438 87 87 #define THREAD_BLOCKED_SEM 0x0020 /*! thread wait semaphore */ 88 88 #define THREAD_BLOCKED_PAGE 0x0040 /*! thread wait page access */ 89 #define THREAD_BLOCKED_IDLE 0x0080 /*! thread RPC wait RPC_FIFO non empty */ 89 90 #define THREAD_BLOCKED_USERSYNC 0x0100 /*! thread wait (cond/mutex/barrier) */ 90 91 #define THREAD_BLOCKED_RPC 0x0200 /*! thread wait RPC completion */ … … 286 287 287 288 /*************************************************************************************** 288 * This function initializes an existing thread descriptor from arguments values. 289 * This function is called by the kernel_init() function to initialize the IDLE thread. 290 * It initializes an existing thread descriptor from arguments values. 289 291 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start. 290 * It is called by the kernel_init() function to initialize the IDLE thread.291 292 *************************************************************************************** 292 293 * @ thread : pointer on existing thread descriptor. … … 297 298 * @ returns 0 if success / returns EINVAL if error 298 299 **************************************************************************************/ 299 error_t thread_ kernel_init( thread_t * thread,300 301 302 303 300 error_t thread_idle_init( thread_t * thread, 301 thread_type_t type, 302 void * func, 303 void * args, 304 lid_t core_lid ); 304 305 305 306 /*************************************************************************************** -
trunk/kernel/libk/elf.c
r433 r438 201 201 vfs_file_count_up( file_xp ); 202 202 203 #if CONFIG_DEBUG_ELF_LOAD203 #if DEBUG_ELF_LOAD 204 204 uint32_t cycle = (uint32_t)hal_get_cycles(); 205 if( CONFIG_DEBUG_ELF_LOAD < cycle )205 if( DEBUG_ELF_LOAD < cycle ) 206 206 printk("\n[DBG] %s : found %s vseg / base %x / size %x\n" 207 207 " file_size %x / file_offset %x / mapper_xp %l / cycle %d\n", … … 228 228 error_t error; 229 229 230 #if CONFIG_DEBUG_ELF_LOAD230 #if DEBUG_ELF_LOAD 231 231 uint32_t cycle = (uint32_t)hal_get_cycles(); 232 if( CONFIG_DEBUG_ELF_LOAD < cycle )232 if( DEBUG_ELF_LOAD < cycle ) 233 233 printk("\n[DBG] %s : thread %d enter for <%s> / cycle %d\n", 234 234 __FUNCTION__, CURRENT_THREAD, pathname, cycle ); … … 252 252 } 253 253 254 #if ( CONFIG_DEBUG_ELF_LOAD & 1)255 if( CONFIG_DEBUG_ELF_LOAD < cycle )254 #if (DEBUG_ELF_LOAD & 1) 255 if( DEBUG_ELF_LOAD < cycle ) 256 256 printk("\n[DBG] %s : open file <%s>\n", __FUNCTION__, pathname ); 257 257 #endif … … 268 268 } 269 269 270 #if ( CONFIG_DEBUG_ELF_LOAD & 1)271 if( CONFIG_DEBUG_ELF_LOAD < cycle )270 #if (DEBUG_ELF_LOAD & 1) 271 if( DEBUG_ELF_LOAD < cycle ) 272 272 printk("\n[DBG] %s : loaded elf header for %s\n", __FUNCTION__ , pathname ); 273 273 #endif … … 308 308 } 309 309 310 #if ( CONFIG_DEBUG_ELF_LOAD & 1)311 if( CONFIG_DEBUG_ELF_LOAD < cycle )310 #if (DEBUG_ELF_LOAD & 1) 311 if( DEBUG_ELF_LOAD < cycle ) 312 312 printk("\n[DBG] %s : segments array allocated for %s\n", __FUNCTION__ , pathname ); 313 313 #endif … … 328 328 } 329 329 330 #if ( CONFIG_DEBUG_ELF_LOAD & 1)331 if( CONFIG_DEBUG_ELF_LOAD < cycle )330 #if (DEBUG_ELF_LOAD & 1) 331 if( DEBUG_ELF_LOAD < cycle ) 332 332 printk("\n[DBG] %s loaded segments descriptors for %s \n", __FUNCTION__ , pathname ); 333 333 #endif … … 356 356 kmem_free(&req); 357 357 358 #if CONFIG_DEBUG_ELF_LOAD358 #if DEBUG_ELF_LOAD 359 359 cycle = (uint32_t)hal_get_cycles(); 360 if( CONFIG_DEBUG_ELF_LOAD < cycle )360 if( DEBUG_ELF_LOAD < cycle ) 361 361 printk("\n[DBG] %s : thread %d exit for <%s> / entry_point %x / cycle %d\n", 362 362 __FUNCTION__, CURRENT_THREAD, pathname, header.e_entry, cycle ); -
trunk/kernel/libk/remote_rwlock.c
r436 r438 41 41 hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->count ) , 0 ); 42 42 43 #if CONFIG_DEBUG_REMOTE_RWLOCKS43 #if DEBUG_REMOTE_RWLOCKS 44 44 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 45 45 xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) ); … … 86 86 thread_ptr->remote_locks++; 87 87 88 #if CONFIG_DEBUG_REMOTE_RWLOCKS88 #if DEBUG_REMOTE_RWLOCKS 89 89 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) , 90 90 XPTR( lock_cxy , &lock_ptr->list ) ); … … 126 126 thread_ptr->remote_locks--; 127 127 128 #if CONFIG_DEBUG_REMOTE_RWLOCKS128 #if DEBUG_REMOTE_RWLOCKS 129 129 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); 130 130 #endif … … 176 176 } 177 177 178 #if CONFIG_DEBUG_REMOTE_RWLOCKS178 #if DEBUG_REMOTE_RWLOCKS 179 179 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 180 180 XPTR( local_cxy , thread_ptr ) ); -
trunk/kernel/libk/remote_rwlock.h
r436 r438 48 48 uint32_t count; /*! current number of reader threads */ 49 49 50 #if CONFIG_DEBUG_REMOTE_RWLOCKS50 #if DEBUG_REMOTE_RWLOCKS 51 51 xptr_t owner; /*! extended pointer on writer thread */ 52 52 xlist_entry_t list; /*! member of list of remote locks taken by owner */ -
trunk/kernel/libk/remote_spinlock.c
r436 r438 38 38 hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 ); 39 39 40 #if CONFIG_DEBUG_REMOTE_SPINLOCKS40 #if DEBUG_REMOTE_SPINLOCKS 41 41 hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL ); 42 42 xlist_entry_init( XPTR( cxy , &ptr->list ) ); … … 75 75 thread_ptr->remote_locks++; 76 76 77 #if CONFIG_DEBUG_REMOTE_SPINLOCKS77 #if DEBUG_REMOTE_SPINLOCKS 78 78 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 79 79 XPTR( local_cxy , thread_ptr) ); … … 120 120 thread_ptr->remote_locks++; 121 121 122 #if CONFIG_DEBUG_REMOTE_SPINLOCKS122 #if DEBUG_REMOTE_SPINLOCKS 123 123 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 124 124 XPTR( local_cxy , thread_ptr) ); … … 143 143 thread_t * thread_ptr = CURRENT_THREAD; 144 144 145 #if CONFIG_DEBUG_REMOTE_SPINLOCKS145 #if DEBUG_REMOTE_SPINLOCKS 146 146 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 147 147 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); … … 196 196 thread_ptr->remote_locks++; 197 197 198 #if CONFIG_DEBUG_REMOTE_SPINLOCKS198 #if DEBUG_REMOTE_SPINLOCKS 199 199 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ), 200 200 XPTR( local_cxy , thread_ptr) ); … … 222 222 thread_t * thread_ptr = CURRENT_THREAD; 223 223 224 #if CONFIG_DEBUG_REMOTE_SPINLOCKS224 #if DEBUG_REMOTE_SPINLOCKS 225 225 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 226 226 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); -
trunk/kernel/libk/remote_spinlock.h
r436 r438 41 41 volatile uint32_t taken; /*! free if 0 / taken if non zero */ 42 42 43 #if CONFIG_DEBUG_REMOTE_SPINLOCKS43 #if DEBUG_REMOTE_SPINLOCKS 44 44 xptr_t owner; /*! extended pointer on the owner thread */ 45 45 xlist_entry_t list; /*! list of all remote_lock taken by owner */ -
trunk/kernel/libk/rwlock.c
r436 r438 38 38 lock->count = 0; 39 39 40 #if CONFIG_DEBUG_RWLOCKS40 #if DEBUG_RWLOCKS 41 41 lock->owner = NULL; 42 42 list_entry_init( &lock->list ); … … 70 70 this->local_locks++; 71 71 72 #if CONFIG_DEBUG_RWLOCKS72 #if DEBUG_RWLOCKS 73 73 list_add_first( &this->locks_root , &lock->list ); 74 74 #endif … … 98 98 this->local_locks--; 99 99 100 #if CONFIG_DEBUG_RWLOCKS100 #if DEBUG_RWLOCKS 101 101 list_unlink( &lock->list ); 102 102 #endif … … 138 138 this->local_locks++; 139 139 140 #if CONFIG_DEBUG_RWLOCKS140 #if DEBUG_RWLOCKS 141 141 lock->owner = this; 142 142 list_add_first( &this->locks_root , &lock->list ); … … 157 157 hal_disable_irq( &mode ); 158 158 159 #if CONFIG_DEBUG_RWLOCKS159 #if DEBUG_RWLOCKS 160 160 lock->owner = NULL; 161 161 list_unlink( &lock->list ); -
trunk/kernel/libk/rwlock.h
r436 r438 59 59 uint32_t count; /*! number of simultaneous readers threads */ 60 60 61 #if CONFIG_DEBUG_RWLOCKS61 #if DEBUG_RWLOCKS 62 62 struct thread_s * owner; /*! pointer on curent writer thread */ 63 63 list_entry_t list; /*! member of list of locks taken by owner */ -
trunk/kernel/libk/spinlock.c
r436 r438 38 38 lock->taken = 0; 39 39 40 #if CONFIG_DEBUG_SPINLOCKS40 #if DEBUG_SPINLOCKS 41 41 lock->owner = NULL; 42 42 list_entry_init( &lock->list ); … … 71 71 this->local_locks++; 72 72 73 #if CONFIG_DEBUG_SPINLOCKS73 #if DEBUG_SPINLOCKS 74 74 lock->owner = this; 75 75 list_add_first( &this->locks_root , &lock->list ); … … 86 86 thread_t * this = CURRENT_THREAD;; 87 87 88 #if CONFIG_DEBUG_SPINLOCKS88 #if DEBUG_SPINLOCKS 89 89 lock->owner = NULL; 90 90 list_unlink( &lock->list ); … … 132 132 this->local_locks++; 133 133 134 #if CONFIG_DEBUG_SPINLOCKS134 #if DEBUG_SPINLOCKS 135 135 lock->owner = this; 136 136 list_add_first( &this->locks_root , &lock->list ); … … 162 162 this->local_locks++; 163 163 164 #if CONFIG_DEBUG_SPINLOCKS164 #if DEBUG_SPINLOCKS 165 165 lock->owner = this; 166 166 list_add_first( &this->locks_root , &lock->list ); … … 177 177 thread_t * this = CURRENT_THREAD; 178 178 179 #if CONFIG_DEBUG_SPINLOCKS179 #if DEBUG_SPINLOCKS 180 180 lock->owner = NULL; 181 181 list_unlink( &lock->list ); -
trunk/kernel/libk/spinlock.h
r436 r438 62 62 uint32_t taken; /*! state : free if zero / taken if non zero */ 63 63 64 #if CONFIG_DEBUG_SPINLOCKS64 #if DEBUG_SPINLOCKS 65 65 struct thread_s * owner; /*! pointer on curent owner thread */ 66 66 list_entry_t list; /*! member of list of locks taken by owner */ -
trunk/kernel/mm/kcm.c
r437 r438 48 48 { 49 49 50 #if CONFIG_DEBUG_KCM50 #if DEBUG_KCM 51 51 uint32_t cycle = (uint32_t)hal_get_cycles(); 52 if( CONFIG_DEBUG_KCM < cycle )52 if( DEBUG_KCM < cycle ) 53 53 printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n", 54 54 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , … … 85 85 + (index * kcm->block_size) ); 86 86 87 #if CONFIG_DEBUG_KCM87 #if DEBUG_KCM 88 88 cycle = (uint32_t)hal_get_cycles(); 89 if( CONFIG_DEBUG_KCM < cycle )89 if( DEBUG_KCM < cycle ) 90 90 printk("\n[DBG] %s : thread %x exit / type %s / ptr %p / page %x / count %d\n", 91 91 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr , -
trunk/kernel/mm/kmem.c
r435 r438 145 145 assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" ); 146 146 147 #if CONFIG_DEBUG_KMEM147 #if DEBUG_KMEM 148 148 uint32_t cycle = (uint32_t)hal_get_cycles(); 149 if( CONFIG_DEBUG_KMEM < cycle )149 if( DEBUG_KMEM < cycle ) 150 150 printk("\n[DBG] %s : thread %x enter / KCM type %s missing in cluster %x / cycle %d\n", 151 151 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle ); … … 173 173 hal_fence(); 174 174 175 #if CONFIG_DEBUG_KMEM175 #if DEBUG_KMEM 176 176 cycle = (uint32_t)hal_get_cycles(); 177 if( CONFIG_DEBUG_KMEM < cycle )177 if( DEBUG_KMEM < cycle ) 178 178 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 179 179 __FUNCTION__, CURRENT_THREAD, cycle ); … … 200 200 assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" ); 201 201 202 #if CONFIG_DEBUG_KMEM202 #if DEBUG_KMEM 203 203 uint32_t cycle = (uint32_t)hal_get_cycles(); 204 if( CONFIG_DEBUG_KMEM < cycle )204 if( DEBUG_KMEM < cycle ) 205 205 printk("\n[DBG] %s : thread %x enter / type %s / cluster %x / cycle %d\n", 206 206 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle ); … … 222 222 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 223 223 224 #if CONFIG_DEBUG_KMEM224 #if DEBUG_KMEM 225 225 cycle = (uint32_t)hal_get_cycles(); 226 if( CONFIG_DEBUG_KMEM < cycle )226 if( DEBUG_KMEM < cycle ) 227 227 printk("\n[DBG] %s : thread %x exit / %d page(s) allocated / ppn %x / cycle %d\n", 228 228 __FUNCTION__, CURRENT_THREAD, 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle ); … … 244 244 if( flags & AF_ZERO ) memset( ptr , 0 , size ); 245 245 246 #if CONFIG_DEBUG_KMEM246 #if DEBUG_KMEM 247 247 cycle = (uint32_t)hal_get_cycles(); 248 if( CONFIG_DEBUG_KMEM < cycle )248 if( DEBUG_KMEM < cycle ) 249 249 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n", 250 250 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), (intptr_t)ptr, size, cycle ); … … 275 275 if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) ); 276 276 277 #if CONFIG_DEBUG_KMEM277 #if DEBUG_KMEM 278 278 cycle = (uint32_t)hal_get_cycles(); 279 if( CONFIG_DEBUG_KMEM < cycle )279 if( DEBUG_KMEM < cycle ) 280 280 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n", 281 281 __FUNCTION__, CURRENT_THREAD, kmem_type_str(type), (intptr_t)ptr, -
trunk/kernel/mm/mapper.c
r435 r438 143 143 error_t error; 144 144 145 #if CONFIG_DEBUG_MAPPER_GET_PAGE145 #if DEBUG_MAPPER_GET_PAGE 146 146 uint32_t cycle = (uint32_t)hal_get_cycles(); 147 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )147 if( DEBUG_MAPPER_GET_PAGE < cycle ) 148 148 printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n", 149 149 __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle ); … … 175 175 { 176 176 177 #if ( CONFIG_DEBUG_MAPPER_GET_PAGE & 1)178 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )177 #if (DEBUG_MAPPER_GET_PAGE & 1) 178 if( DEBUG_MAPPER_GET_PAGE < cycle ) 179 179 printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ ); 180 180 #endif … … 257 257 } 258 258 259 #if CONFIG_DEBUG_MAPPER_GET_PAGE259 #if DEBUG_MAPPER_GET_PAGE 260 260 cycle = (uint32_t)hal_get_cycles(); 261 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )261 if( DEBUG_MAPPER_GET_PAGE < cycle ) 262 262 printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n", 263 263 __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle ); … … 317 317 uint8_t * buf_ptr; // current buffer address 318 318 319 #if CONFIG_DEBUG_MAPPER_MOVE_USER319 #if DEBUG_MAPPER_MOVE_USER 320 320 uint32_t cycle = (uint32_t)hal_get_cycles(); 321 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )321 if( DEBUG_MAPPER_MOVE_USER < cycle ) 322 322 printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n", 323 323 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); … … 347 347 else page_count = CONFIG_PPM_PAGE_SIZE; 348 348 349 #if ( CONFIG_DEBUG_MAPPER_MOVE_USER & 1)350 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )349 #if (DEBUG_MAPPER_MOVE_USER & 1) 350 if( DEBUG_MAPPER_MOVE_USER < cycle ) 351 351 printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n", 352 352 __FUNCTION__ , index , page_offset , page_count ); … … 379 379 } 380 380 381 #if CONFIG_DEBUG_MAPPER_MOVE_USER381 #if DEBUG_MAPPER_MOVE_USER 382 382 cycle = (uint32_t)hal_get_cycles(); 383 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )383 if( DEBUG_MAPPER_MOVE_USER < cycle ) 384 384 printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n", 385 385 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); … … 412 412 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 413 413 414 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL414 #if DEBUG_MAPPER_MOVE_KERNEL 415 415 uint32_t cycle = (uint32_t)hal_get_cycles(); 416 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )416 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 417 417 printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", 418 418 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); … … 427 427 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 428 428 429 #if ( CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)430 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )429 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 430 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 431 431 printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last ); 432 432 #endif … … 459 459 else page_count = CONFIG_PPM_PAGE_SIZE; 460 460 461 #if ( CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)462 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )461 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 462 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 463 463 printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n", 464 464 __FUNCTION__ , index , page_offset , page_count ); … … 494 494 } 495 495 496 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL496 #if DEBUG_MAPPER_MOVE_KERNEL 497 497 cycle = (uint32_t)hal_get_cycles(); 498 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )498 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 499 499 printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", 500 500 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); -
trunk/kernel/mm/ppm.c
r437 r438 201 201 uint32_t current_size; 202 202 203 #if CONFIG_DEBUG_PPM_ALLOC_PAGES203 #if DEBUG_PPM_ALLOC_PAGES 204 204 uint32_t cycle = (uint32_t)hal_get_cycles(); 205 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )205 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 206 206 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n", 207 207 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle ); 208 208 #endif 209 209 210 #if( CONFIG_DEBUG_PPM_ALLOC_PAGES & 0x1)211 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )210 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 211 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 212 212 ppm_print(); 213 213 #endif … … 239 239 spinlock_unlock( &ppm->free_lock ); 240 240 241 #if CONFIG_DEBUG_PPM_ALLOC_PAGES241 #if DEBUG_PPM_ALLOC_PAGES 242 242 cycle = (uint32_t)hal_get_cycles(); 243 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )243 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 244 244 printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) at cycle %d\n", 245 245 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle ); … … 275 275 spinlock_unlock( &ppm->free_lock ); 276 276 277 #if CONFIG_DEBUG_PPM_ALLOC_PAGES277 #if DEBUG_PPM_ALLOC_PAGES 278 278 cycle = (uint32_t)hal_get_cycles(); 279 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )279 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 280 280 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n", 281 281 __FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle ); … … 292 292 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 293 293 294 #if CONFIG_DEBUG_PPM_FREE_PAGES294 #if DEBUG_PPM_FREE_PAGES 295 295 uint32_t cycle = (uint32_t)hal_get_cycles(); 296 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )296 if( DEBUG_PPM_FREE_PAGES < cycle ) 297 297 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n", 298 298 __FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle ); 299 299 #endif 300 300 301 #if( CONFIG_DEBUG_PPM_FREE_PAGES & 0x1)302 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )301 #if(DEBUG_PPM_FREE_PAGES & 0x1) 302 if( DEBUG_PPM_FREE_PAGES < cycle ) 303 303 ppm_print(); 304 304 #endif … … 312 312 spinlock_unlock( &ppm->free_lock ); 313 313 314 #if CONFIG_DEBUG_PPM_FREE_PAGES314 #if DEBUG_PPM_FREE_PAGES 315 315 cycle = (uint32_t)hal_get_cycles(); 316 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )316 if( DEBUG_PPM_FREE_PAGES < cycle ) 317 317 printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n", 318 318 __FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); -
trunk/kernel/mm/vmm.c
r437 r438 63 63 intptr_t size; 64 64 65 #if CONFIG_DEBUG_VMM_INIT65 #if DEBUG_VMM_INIT 66 66 uint32_t cycle = (uint32_t)hal_get_cycles(); 67 if( CONFIG_DEBUG_VMM_INIT )67 if( DEBUG_VMM_INIT ) 68 68 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 69 69 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); … … 183 183 hal_fence(); 184 184 185 #if CONFIG_DEBUG_VMM_INIT185 #if DEBUG_VMM_INIT 186 186 cycle = (uint32_t)hal_get_cycles(); 187 if( CONFIG_DEBUG_VMM_INIT )187 if( DEBUG_VMM_INIT ) 188 188 printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n", 189 189 __FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle ); … … 266 266 lpid_t owner_lpid; 267 267 268 #if CONFIG_DEBUG_VMM_UPDATE_PTE268 #if DEBUG_VMM_UPDATE_PTE 269 269 uint32_t cycle = (uint32_t)hal_get_cycles(); 270 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )270 if( DEBUG_VMM_UPDATE_PTE < cycle ) 271 271 printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n", 272 272 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); … … 292 292 remote_process_cxy = GET_CXY( remote_process_xp ); 293 293 294 #if ( CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1)295 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )294 #if (DEBUG_VMM_UPDATE_PTE & 0x1) 295 if( DEBUG_VMM_UPDATE_PTE < cycle ) 296 296 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 297 297 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); … … 305 305 } 306 306 307 #if CONFIG_DEBUG_VMM_UPDATE_PTE307 #if DEBUG_VMM_UPDATE_PTE 308 308 cycle = (uint32_t)hal_get_cycles(); 309 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )309 if( DEBUG_VMM_UPDATE_PTE < cycle ) 310 310 printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n", 311 311 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); … … 338 338 lpid_t owner_lpid; 339 339 340 #if CONFIG_DEBUG_VMM_SET_COW340 #if DEBUG_VMM_SET_COW 341 341 uint32_t cycle = (uint32_t)hal_get_cycles(); 342 if( CONFIG_DEBUG_VMM_SET_COW < cycle )342 if( DEBUG_VMM_SET_COW < cycle ) 343 343 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 344 344 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); … … 370 370 remote_process_cxy = GET_CXY( remote_process_xp ); 371 371 372 #if ( CONFIG_DEBUG_VMM_SET_COW &0x1)373 if( CONFIG_DEBUG_VMM_SET_COW < cycle )372 #if (DEBUG_VMM_SET_COW &0x1) 373 if( DEBUG_VMM_SET_COW < cycle ) 374 374 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 375 375 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); … … 394 394 vpn_t vpn_size = vseg->vpn_size; 395 395 396 #if ( CONFIG_DEBUG_VMM_SET_COW & 0x1)397 if( CONFIG_DEBUG_VMM_SET_COW < cycle )396 #if (DEBUG_VMM_SET_COW & 0x1) 397 if( DEBUG_VMM_SET_COW < cycle ) 398 398 printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n", 399 399 __FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size ); … … 445 445 } // end loop on process copies 446 446 447 #if CONFIG_DEBUG_VMM_SET_COW447 #if DEBUG_VMM_SET_COW 448 448 cycle = (uint32_t)hal_get_cycles(); 449 if( CONFIG_DEBUG_VMM_SET_COW < cycle )449 if( DEBUG_VMM_SET_COW < cycle ) 450 450 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 451 451 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); … … 480 480 ppn_t ppn; 481 481 482 #if CONFIG_DEBUG_VMM_FORK_COPY482 #if DEBUG_VMM_FORK_COPY 483 483 uint32_t cycle = (uint32_t)hal_get_cycles(); 484 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )484 if( DEBUG_VMM_FORK_COPY < cycle ) 485 485 printk("\n[DBG] %s : thread %x enter / cycle %d\n", 486 486 __FUNCTION__ , CURRENT_THREAD, cycle ); … … 530 530 type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) ); 531 531 532 #if CONFIG_DEBUG_VMM_FORK_COPY532 #if DEBUG_VMM_FORK_COPY 533 533 cycle = (uint32_t)hal_get_cycles(); 534 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )534 if( DEBUG_VMM_FORK_COPY < cycle ) 535 535 printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n", 536 536 __FUNCTION__ , CURRENT_THREAD, vseg_type_str(type), … … 556 556 vseg_attach( child_vmm , child_vseg ); 557 557 558 #if CONFIG_DEBUG_VMM_FORK_COPY558 #if DEBUG_VMM_FORK_COPY 559 559 cycle = (uint32_t)hal_get_cycles(); 560 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )560 if( DEBUG_VMM_FORK_COPY < cycle ) 561 561 printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", 562 562 __FUNCTION__ , CURRENT_THREAD , vseg_type_str(type), … … 597 597 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 598 598 599 #if CONFIG_DEBUG_VMM_FORK_COPY599 #if DEBUG_VMM_FORK_COPY 600 600 cycle = (uint32_t)hal_get_cycles(); 601 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )601 if( DEBUG_VMM_FORK_COPY < cycle ) 602 602 printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n", 603 603 __FUNCTION__ , CURRENT_THREAD , vpn , cycle ); … … 649 649 hal_fence(); 650 650 651 #if CONFIG_DEBUG_VMM_FORK_COPY651 #if DEBUG_VMM_FORK_COPY 652 652 cycle = (uint32_t)hal_get_cycles(); 653 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )653 if( DEBUG_VMM_FORK_COPY < cycle ) 654 654 printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n", 655 655 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 666 666 vseg_t * vseg; 667 667 668 #if CONFIG_DEBUG_VMM_DESTROY668 #if DEBUG_VMM_DESTROY 669 669 uint32_t cycle = (uint32_t)hal_get_cycles(); 670 if( CONFIG_DEBUG_VMM_DESTROY < cycle )670 if( DEBUG_VMM_DESTROY < cycle ) 671 671 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 672 672 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 673 673 #endif 674 674 675 #if ( CONFIG_DEBUG_VMM_DESTROY & 1 )675 #if (DEBUG_VMM_DESTROY & 1 ) 676 676 vmm_display( process , true ); 677 677 #endif … … 694 694 vseg = GET_PTR( vseg_xp ); 695 695 696 #if( CONFIG_DEBUG_VMM_DESTROY & 1 )697 if( CONFIG_DEBUG_VMM_DESTROY < cycle )696 #if( DEBUG_VMM_DESTROY & 1 ) 697 if( DEBUG_VMM_DESTROY < cycle ) 698 698 printk("\n[DBG] %s : %s / vpn_base %x / vpn_size %d\n", 699 699 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); … … 728 728 hal_gpt_destroy( &vmm->gpt ); 729 729 730 #if CONFIG_DEBUG_VMM_DESTROY730 #if DEBUG_VMM_DESTROY 731 731 cycle = (uint32_t)hal_get_cycles(); 732 if( CONFIG_DEBUG_VMM_DESTROY < cycle )732 if( DEBUG_VMM_DESTROY < cycle ) 733 733 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 734 734 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 882 882 error_t error; 883 883 884 #if CONFIG_DEBUG_VMM_CREATE_VSEG884 #if DEBUG_VMM_CREATE_VSEG 885 885 uint32_t cycle = (uint32_t)hal_get_cycles(); 886 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )886 if( DEBUG_VMM_CREATE_VSEG < cycle ) 887 887 printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n", 888 888 __FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle ); … … 973 973 remote_rwlock_wr_unlock( lock_xp ); 974 974 975 #if CONFIG_DEBUG_VMM_CREATE_VSEG975 #if DEBUG_VMM_CREATE_VSEG 976 976 cycle = (uint32_t)hal_get_cycles(); 977 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )977 if( DEBUG_VMM_CREATE_VSEG < cycle ) 978 978 printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n", 979 979 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle ); … … 1110 1110 uint32_t count; // actual number of pendinf forks 1111 1111 1112 #if CONFIG_DEBUG_VMM_UNMAP_VSEG1112 #if DEBUG_VMM_UNMAP_VSEG 1113 1113 uint32_t cycle = (uint32_t)hal_get_cycles(); 1114 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )1114 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1115 1115 printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n", 1116 1116 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); … … 1131 1131 { 1132 1132 1133 #if( CONFIG_DEBUG_VMM_UNMAP_VSEG & 1 )1134 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )1133 #if( DEBUG_VMM_UNMAP_VSEG & 1 ) 1134 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1135 1135 printk("- vpn %x / ppn %x\n" , vpn , ppn ); 1136 1136 #endif … … 1183 1183 } 1184 1184 1185 #if CONFIG_DEBUG_VMM_UNMAP_VSEG1185 #if DEBUG_VMM_UNMAP_VSEG 1186 1186 cycle = (uint32_t)hal_get_cycles(); 1187 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )1187 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1188 1188 printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n", 1189 1189 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); … … 1383 1383 { 1384 1384 1385 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE1386 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )1385 #if DEBUG_VMM_ALLOCATE_PAGE 1386 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1387 1387 printk("\n[DBG] in %s : thread %x enter for vpn %x\n", 1388 1388 __FUNCTION__ , CURRENT_THREAD, vpn ); … … 1427 1427 } 1428 1428 1429 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE1430 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )1429 #if DEBUG_VMM_ALLOCATE_PAGE 1430 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1431 1431 printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n", 1432 1432 __FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) ); … … 1452 1452 index = vpn - vseg->vpn_base; 1453 1453 1454 #if CONFIG_DEBUG_VMM_GET_ONE_PPN1455 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1454 #if DEBUG_VMM_GET_ONE_PPN 1455 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1456 1456 printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n", 1457 1457 __FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index ); … … 1515 1515 uint32_t elf_offset = vseg->file_offset + offset; 1516 1516 1517 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1518 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1517 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1518 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1519 1519 printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n", 1520 1520 __FUNCTION__, CURRENT_THREAD, vpn, elf_offset ); … … 1530 1530 { 1531 1531 1532 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1533 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1532 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1533 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1534 1534 printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n", 1535 1535 __FUNCTION__, CURRENT_THREAD, vpn ); … … 1548 1548 { 1549 1549 1550 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1551 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1550 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1551 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1552 1552 printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n", 1553 1553 __FUNCTION__, CURRENT_THREAD, vpn ); … … 1580 1580 { 1581 1581 1582 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1583 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1582 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1583 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1584 1584 printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n" 1585 1585 " %d bytes from mapper / %d bytes from BSS\n", … … 1627 1627 *ppn = ppm_page2ppn( page_xp ); 1628 1628 1629 #if CONFIG_DEBUG_VMM_GET_ONE_PPN1630 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1629 #if DEBUG_VMM_GET_ONE_PPN 1630 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1631 1631 printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n", 1632 1632 __FUNCTION__ , CURRENT_THREAD , vpn , *ppn ); … … 1655 1655 "not called in the reference cluster\n" ); 1656 1656 1657 #if CONFIG_DEBUG_VMM_GET_PTE1657 #if DEBUG_VMM_GET_PTE 1658 1658 uint32_t cycle = (uint32_t)hal_get_cycles(); 1659 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1660 printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow =%d / cycle %d\n",1659 if( DEBUG_VMM_GET_PTE < cycle ) 1660 printk("\n[DBG] %s : thread %x enter / vpn %x / process %x / cow %d / cycle %d\n", 1661 1661 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle ); 1662 1662 #endif … … 1675 1675 } 1676 1676 1677 #if CONFIG_DEBUG_VMM_GET_PTE1677 #if( DEBUG_VMM_GET_PTE & 1 ) 1678 1678 cycle = (uint32_t)hal_get_cycles(); 1679 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1679 if( DEBUG_VMM_GET_PTE < cycle ) 1680 1680 printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n", 1681 1681 __FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size ); 1682 1682 #endif 1683 1683 1684 // access GPT to get current PTE attributes and PPN1685 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );1686 1687 // for both "copy_on_write" and "page_fault" events, allocate a physical page,1688 // initialize it, register it in the reference GPT, update GPT copies in all1689 // clusters containing a copy, and return the new_ppn and new_attr1690 1691 if( cow ) /////////////////////////// copy_on_write request //////////////////////1692 { 1684 if( cow ) //////////////// copy_on_write request ////////////////////// 1685 // get PTE from reference GPT 1686 // allocate a new physical page if there is pending forks, 1687 // initialize it from old physical page content, 1688 // update PTE in all GPT copies, 1689 { 1690 // access GPT to get current PTE attributes and PPN 1691 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1692 1693 1693 assert( (old_attr & GPT_MAPPED) , __FUNCTION__ , 1694 1694 "PTE must be mapped for a copy-on-write exception\n" ); 1695 1695 1696 #if CONFIG_DEBUG_VMM_GET_PTE1696 #if( DEBUG_VMM_GET_PTE & 1 ) 1697 1697 cycle = (uint32_t)hal_get_cycles(); 1698 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1698 if( DEBUG_VMM_GET_PTE < cycle ) 1699 1699 printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n", 1700 1700 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); … … 1744 1744 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 ); 1745 1745 } 1746 else ////////////////////////////////// page_fault request //////////////////////// 1746 else //////////// page_fault request /////////////////////////// 1747 // get PTE from reference GPT 1748 // allocate a physical page if it is a true page fault, 1749 // register in reference GPT, but don't update GPT copies 1747 1750 { 1751 // access GPT to get current PTE 1752 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1753 1748 1754 if( (old_attr & GPT_MAPPED) == 0 ) // true page_fault => map it 1749 1755 { 1750 1756 1751 #if CONFIG_DEBUG_VMM_GET_PTE1757 #if( DEBUG_VMM_GET_PTE & 1 ) 1752 1758 cycle = (uint32_t)hal_get_cycles(); 1753 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1759 if( DEBUG_VMM_GET_PTE < cycle ) 1754 1760 printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n", 1755 1761 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); … … 1792 1798 } 1793 1799 1794 #if CONFIG_DEBUG_VMM_GET_PTE1800 #if DEBUG_VMM_GET_PTE 1795 1801 cycle = (uint32_t)hal_get_cycles(); 1796 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1797 printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr =%x / cycle %d\n",1802 if( DEBUG_VMM_GET_PTE < cycle ) 1803 printk("\n[DBG] %s : thread,%x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n", 1798 1804 __FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle ); 1799 1805 #endif 1800 1806 1801 // return success1807 // return PPN and flags 1802 1808 *ppn = new_ppn; 1803 1809 *attr = new_attr; … … 1814 1820 error_t error; 1815 1821 1816 #if CONFIG_DEBUG_VMM_GET_PTE1822 #if DEBUG_VMM_GET_PTE 1817 1823 uint32_t cycle = (uint32_t)hal_get_cycles(); 1818 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1824 if( DEBUG_VMM_GET_PTE < cycle ) 1819 1825 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1820 1826 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); … … 1854 1860 } 1855 1861 1856 #if CONFIG_DEBUG_VMM_GET_PTE1862 #if DEBUG_VMM_GET_PTE 1857 1863 cycle = (uint32_t)hal_get_cycles(); 1858 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1864 if( DEBUG_VMM_GET_PTE < cycle ) 1859 1865 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1860 1866 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); … … 1873 1879 error_t error; 1874 1880 1875 #if CONFIG_DEBUG_VMM_GET_PTE1881 #if DEBUG_VMM_GET_PTE 1876 1882 uint32_t cycle = (uint32_t)hal_get_cycles(); 1877 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1883 if( DEBUG_VMM_GET_PTE < cycle ) 1878 1884 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1879 1885 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); … … 1913 1919 } 1914 1920 1915 #if CONFIG_DEBUG_VMM_GET_PTE1921 #if DEBUG_VMM_GET_PTE 1916 1922 cycle = (uint32_t)hal_get_cycles(); 1917 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1923 if( DEBUG_VMM_GET_PTE < cycle ) 1918 1924 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1919 1925 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); -
trunk/kernel/syscalls/sys_display.c
r436 r438 38 38 { 39 39 40 #if CONFIG_DEBUG_SYS_DISPLAY40 #if DEBUG_SYS_DISPLAY 41 41 uint64_t tm_start; 42 42 uint64_t tm_end; … … 44 44 this = CURRENT_THREAD; 45 45 tm_start = hal_get_cycles(); 46 if( CONFIG_DEBUG_SYS_DISPLAY < tm_start )46 if( DEBUG_SYS_DISPLAY < tm_start ) 47 47 printk("\n[DBG] %s : thread %d enter / process %x / type %d / cycle = %d\n", 48 48 __FUNCTION__, this, this->process->pid, type, (uint32_t)tm_start ); … … 183 183 } 184 184 185 #if CONFIG_DEBUG_SYS_DISPLAY185 #if DEBUG_SYS_DISPLAY 186 186 tm_end = hal_get_cycles(); 187 if( CONFIG_DEBUG_SYS_DISPLAY < tm_end )187 if( DEBUG_SYS_DISPLAY < tm_end ) 188 188 printk("\n[DBG] %s : thread %x exit / process %x / cost = %d / cycle %d\n", 189 189 __FUNCTION__, this, this->process->pid, (uint32_t)(tm_end - tm_start) , (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_exec.c
r435 r438 193 193 { 194 194 195 #if CONFIG_DEBUG_SYSCALLS_ERROR195 #if DEBUG_SYSCALLS_ERROR 196 196 printk("\n[ERROR] in %s : pathname too long\n", __FUNCTION__ ); 197 197 #endif … … 203 203 hal_strcpy_from_uspace( exec_info.path , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 204 204 205 #if CONFIG_DEBUG_SYS_EXEC205 #if DEBUG_SYS_EXEC 206 206 uint64_t tm_start; 207 207 uint64_t tm_end; 208 208 tm_start = hal_get_cycles(); 209 if( CONFIG_DEBUG_SYS_EXEC < tm_start )209 if( DEBUG_SYS_EXEC < tm_start ) 210 210 printk("\n[DBG] %s : thread %x enter / process %x / path %s / cycle = %d\n", 211 211 __FUNCTION__, this, pid, exec_info.path, (uint32_t)tm_start ); … … 218 218 { 219 219 220 #if CONFIG_DEBUG_SYSCALLS_ERROR220 #if DEBUG_SYSCALLS_ERROR 221 221 printk("\n[ERROR] in %s : cannot access args\n", __FUNCTION__ ); 222 222 #endif … … 232 232 { 233 233 234 #if CONFIG_DEBUG_SYCALLS_ERROR234 #if DEBUG_SYCALLS_ERROR 235 235 printk("\n[ERROR] in %s : cannot access envs\n", __FUNCTION__ ); 236 236 #endif … … 246 246 { 247 247 248 #if CONFIG_DEBUG_SYSCALLS_ERROR248 #if DEBUG_SYSCALLS_ERROR 249 249 printk("\n[ERROR] in %s : cannot create process %x in cluster %x\n", 250 250 __FUNCTION__, pid, CXY_FROM_PID(pid) ); … … 254 254 } 255 255 256 #if CONFIG_DEBUG_SYS_EXEC256 #if DEBUG_SYS_EXEC 257 257 tm_end = hal_get_cycles(); 258 if( CONFIG_DEBUG_SYS_EXEC < tm_end )258 if( DEBUG_SYS_EXEC < tm_end ) 259 259 printk("\n[DBG] %s : thread %x exit / process %x / cost = %d / cycle %d\n", 260 260 __FUNCTION__, this, pid, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_exit.c
r436 r438 43 43 trdid_t trdid = this->trdid; 44 44 45 #if CONFIG_DEBUG_SYS_EXIT45 #if DEBUG_SYS_EXIT 46 46 uint64_t tm_start; 47 47 uint64_t tm_end; 48 48 tm_start = hal_get_cycles(); 49 if( CONFIG_DEBUG_SYS_EXIT < tm_start )49 if( DEBUG_SYS_EXIT < tm_start ) 50 50 printk("\n[DBG] %s : thread %x enter / process %x / status %x / cycle %d\n", 51 51 __FUNCTION__ , this, pid , status , (uint32_t)tm_start ); … … 59 59 { 60 60 61 #if CONFIG_DEBUG_SYSCALLS_ERROR61 #if DEBUG_SYSCALLS_ERROR 62 62 printk("\n[ERROR] in %s : calling thread %x is not thread 0 in owner cluster %x\n", 63 63 __FUNCTION__, trdid, owner_cxy ); … … 73 73 process->term_state = status; 74 74 75 #if( CONFIG_DEBUG_SYS_EXIT & 1)75 #if( DEBUG_SYS_EXIT & 1) 76 76 printk("\n[DBG] %s : set exit status in process term_state\n", __FUNCTION__); 77 77 #endif … … 80 80 process_txt_detach( XPTR( local_cxy , process ) ); 81 81 82 #if( CONFIG_DEBUG_SYS_EXIT & 1)82 #if( DEBUG_SYS_EXIT & 1) 83 83 printk("\n[DBG] %s : removed from TXT list\n", __FUNCTION__); 84 84 #endif … … 87 87 process_sigaction( pid , DELETE_ALL_THREADS ); 88 88 89 #if( CONFIG_DEBUG_SYS_EXIT & 1)89 #if( DEBUG_SYS_EXIT & 1) 90 90 printk("\n[DBG] %s : deleted all other threads than main\n", __FUNCTION__); 91 91 #endif … … 97 97 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_GLOBAL ); 98 98 99 #if( CONFIG_DEBUG_SYS_EXIT & 1)99 #if( DEBUG_SYS_EXIT & 1) 100 100 printk("\n[DBG] %s : blocked the main thread\n", __FUNCTION__); 101 101 #endif … … 106 106 PROCESS_TERM_EXIT ); 107 107 108 #if( CONFIG_DEBUG_SYS_EXIT & 1)108 #if( DEBUG_SYS_EXIT & 1) 109 109 printk("\n[DBG] %s : set EXIT flag in process term_state\n", __FUNCTION__); 110 110 #endif … … 112 112 hal_fence(); 113 113 114 #if CONFIG_DEBUG_SYS_EXIT114 #if DEBUG_SYS_EXIT 115 115 tm_end = hal_get_cycles(); 116 if( CONFIG_DEBUG_SYS_EXIT < tm_end )116 if( DEBUG_SYS_EXIT < tm_end ) 117 117 printk("\n[DBG] %s : thread %x exit / process %x / status %x / cost = %d / cycle %d\n", 118 118 __FUNCTION__, this, pid, status, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_fg.c
r436 r438 45 45 thread_t * this = CURRENT_THREAD; 46 46 47 #if CONFIG_DEBUG_SYS_FG47 #if DEBUG_SYS_FG 48 48 uint64_t tm_start; 49 49 uint64_t tm_end; 50 50 tm_start = hal_get_cycles(); 51 if( CONFIG_DEBUG_SYS_FG < tm_start )51 if( DEBUG_SYS_FG < tm_start ) 52 52 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 53 53 __FUNCTION__ , CURRENT_THREAD , pid, (uint32_t)tm_start ); … … 60 60 { 61 61 62 #if CONFIG_DEBUG_SYSCALLS_ERROR62 #if DEBUG_SYSCALLS_ERROR 63 63 printk("\n[ERROR] in %s : process %x not found\n", __FUNCTION__ , pid ); 64 64 #endif … … 83 83 hal_fence(); 84 84 85 #if CONFIG_DEBUG_SYS_FG85 #if DEBUG_SYS_FG 86 86 tm_end = hal_get_cycles(); 87 if( CONFIG_DEBUG_SYS_FG < tm_end )87 if( DEBUG_SYS_FG < tm_end ) 88 88 printk("\n[DBG] %s : thread %x exit / process %x get TXT_%d ownership / cycle %d\n", 89 89 __FUNCTION__ , CURRENT_THREAD , pid, -
trunk/kernel/syscalls/sys_fork.c
r435 r438 45 45 pid_t parent_pid; // parent process identifier 46 46 thread_t * parent_thread_ptr; // local pointer on local parent thread descriptor 47 cxy_t parent_cxy; // parent thread cluster 47 48 48 49 pid_t child_pid; // child process identifier 49 50 thread_t * child_thread_ptr; // local pointer on remote child thread descriptor 50 cxy_t target_cxy;// target cluster for forked child process51 cxy_t child_cxy; // target cluster for forked child process 51 52 52 53 xptr_t ref_process_xp; // extended pointer on reference parent process … … 62 63 parent_process_ptr = parent_thread_ptr->process; 63 64 parent_pid = parent_process_ptr->pid; 64 65 #if CONFIG_DEBUG_SYS_FORK 65 parent_cxy = local_cxy; 66 67 #if DEBUG_SYS_FORK 66 68 uint64_t tm_start; 67 69 uint64_t tm_end; 68 70 tm_start = hal_get_cycles(); 69 if( CONFIG_DEBUG_SYS_FORK < tm_start )70 printk("\n[DBG] %s : thread %x enter / parent%x / cycle = %d\n",71 if( DEBUG_SYS_FORK < tm_start ) 72 printk("\n[DBG] %s : parent_thread %x enter / parent_pid %x / cycle = %d\n", 71 73 __FUNCTION__, parent_thread_ptr, parent_pid, (uint32_t)tm_start ); 72 74 #endif 73 75 74 // get infos on reference p rocess76 // get infos on reference parent process 75 77 ref_process_xp = parent_process_ptr->ref_xp; 76 78 ref_process_cxy = GET_CXY( ref_process_xp ); … … 82 84 { 83 85 84 #if CONFIG_DEBUG_SYSCALLS_ERROR86 #if DEBUG_SYSCALLS_ERROR 85 87 printk("\n[ERROR] in %s : too much children processes\n", __FUNCTION__); 86 88 #endif … … 91 93 92 94 // Select target cluster for child process and main thread. 93 // If placement is not user-defined, the placement is defined by the DQDT.94 if( parent_thread_ptr->fork_user ) // user defined placement95 { 96 target_cxy = parent_thread_ptr->fork_cxy;95 // If placement is not user-defined, it is defined by the DQDT. 96 if( parent_thread_ptr->fork_user ) 97 { 98 child_cxy = parent_thread_ptr->fork_cxy; 97 99 parent_thread_ptr->fork_user = false; 98 100 } 99 101 else // DQDT placement 100 102 { 101 target_cxy = dqdt_get_cluster_for_process(); 102 } 103 child_cxy = dqdt_get_cluster_for_process(); 104 } 105 106 #if( DEBUG_SYS_FORK & 1) 107 108 // dqdt_display(); 109 110 if( local_cxy == 0 ) 111 { 112 sched_display( 0 ); 113 rpc_sched_display_client( 1 , 0 ); 114 } 115 else 116 { 117 sched_display( 0 ); 118 rpc_sched_display_client( 0 , 0 ); 119 } 120 121 if( DEBUG_SYS_FORK < tm_start ) 122 printk("\n[DBG] %s : parent_thread %x selected cluster %x\n", 123 __FUNCTION__, parent_thread_ptr, child_cxy ); 124 #endif 103 125 104 126 // call process_make_fork in target cluster 105 if( target_cxy == local_cxy )127 if( child_cxy == local_cxy ) 106 128 { 107 129 error = process_make_fork( ref_process_xp, … … 112 134 else 113 135 { 114 rpc_process_make_fork_client( target_cxy,136 rpc_process_make_fork_client( child_cxy, 115 137 ref_process_xp, 116 138 parent_thread_xp, … … 123 145 { 124 146 125 #if CONFIG_DEBUG_SYSCALLS_ERROR147 #if DEBUG_SYSCALLS_ERROR 126 148 printk("\n[ERROR] in %s : cannot fork process %x in cluster %x\n", 127 149 __FUNCTION__, parent_pid, local_cxy ); … … 135 157 if( CURRENT_THREAD->core->fpu_owner == parent_thread_ptr ) 136 158 { 137 hal_fpu_context_save( XPTR( target_cxy , child_thread_ptr ) ); 138 } 139 140 // set remote child CPU context from parent_thread register values 141 hal_cpu_context_fork( XPTR( target_cxy , child_thread_ptr ) ); 142 143 // From this point, both parent and child threads execute the following code. 144 // They can be distinguished by the CURRENT_THREAD value, and child will only 145 // execute it when it is unblocked by parent. 159 hal_fpu_context_save( XPTR( child_cxy , child_thread_ptr ) ); 160 } 161 162 // set remote child CPU context from parent_thread register values 163 hal_cpu_context_fork( XPTR( child_cxy , child_thread_ptr ) ); 164 165 // From this point, both parent and child threads execute the following code, 166 // but they can be distinguished by the (CURRENT_THREAD,local_cxy) values. 146 167 // - parent unblock child, and return child PID to user application. 147 168 // - child thread does nothing, and return 0 to user pplication 169 // The child thread will only execute it when it is unblocked by parent thread. 148 170 149 171 thread_t * current = CURRENT_THREAD; 150 172 151 if( current == parent_thread_ptr ) // current ==parent thread173 if( (current == parent_thread_ptr) && (local_cxy == parent_cxy) ) // parent thread 152 174 { 153 175 // parent_thread unblock child_thread 154 thread_unblock( XPTR( target_cxy , child_thread_ptr ) , THREAD_BLOCKED_GLOBAL );155 156 #if CONFIG_DEBUG_SYS_FORK176 thread_unblock( XPTR( child_cxy , child_thread_ptr ) , THREAD_BLOCKED_GLOBAL ); 177 178 #if DEBUG_SYS_FORK 157 179 tm_end = hal_get_cycles(); 158 if( CONFIG_DEBUG_SYS_FORK < tm_end )159 printk("\n[DBG] %s : parent_thread %x exit / cost = %d / cycle %d\n",160 __FUNCTION__ , parent_thread_ptr, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );180 if( DEBUG_SYS_FORK < tm_end ) 181 printk("\n[DBG] %s : parent_thread %x on cluster %x exit / cost = %d / cycle %d\n", 182 __FUNCTION__, parent_thread_ptr, parent_cxy, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 161 183 #endif 162 184 163 185 return child_pid; 164 186 } 165 else // current ==child_thread166 { 167 168 #if CONFIG_DEBUG_SYS_FORK187 else // child_thread 188 { 189 190 #if DEBUG_SYS_FORK 169 191 tm_end = hal_get_cycles(); 170 if( CONFIG_DEBUG_SYS_FORK < tm_end )171 printk("\n[DBG] %s : child_thread %x exit / cost = %d / cycle %d\n",172 __FUNCTION__ , child_thread_ptr, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );192 if( DEBUG_SYS_FORK < tm_end ) 193 printk("\n[DBG] %s : child_thread %x on cluster %x exit / cost = %d / cycle %d\n", 194 __FUNCTION__, child_thread_ptr, child_cxy, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 173 195 #endif 174 196 -
trunk/kernel/syscalls/sys_get_config.c
r436 r438 47 47 process_t * process = this->process; 48 48 49 #if CONFIG_DEBUG_SYS_GET_CONFIG49 #if DEBUG_SYS_GET_CONFIG 50 50 uint64_t tm_start; 51 51 uint64_t tm_end; 52 52 tm_start = hal_get_cycles(); 53 if( CONFIG_DEBUG_SYS_GET_CONFIG < tm_start )53 if( DEBUG_SYS_GET_CONFIG < tm_start ) 54 54 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 55 55 __FUNCTION__, this, process->pid, (uint32_t)tm_start ); … … 64 64 { 65 65 66 #if CONFIG_DEBUG_SYSCALLS_ERROR66 #if DEBUG_SYSCALLS_ERROR 67 67 printk("\n[ERROR] in %s : user buffer unmapped for thread %x in process %x\n", 68 68 __FUNCTION__ , this->trdid , process->pid ); … … 84 84 hal_fence(); 85 85 86 #if CONFIG_DEBUG_SYS_GET_CONFIG86 #if DEBUG_SYS_GET_CONFIG 87 87 tm_end = hal_get_cycles(); 88 if( CONFIG_DEBUG_SYS_GET_CONFIG < tm_end )88 if( DEBUG_SYS_GET_CONFIG < tm_end ) 89 89 printk("\n[DBG] %s : thread %x exit / process %x / cost %d / cycle %d\n", 90 90 __FUNCTION__, this, process->pid, (uint32_t)(tm_end-tm_start), (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_isatty.c
r437 r438 48 48 process_t * process = this->process; 49 49 50 #if CONFIG_DEBUG_SYS_ISATTY50 #if DEBUG_SYS_ISATTY 51 51 uint64_t tm_start; 52 52 uint64_t tm_end; 53 53 tm_start = hal_get_cycles(); 54 if( CONFIG_DEBUG_SYS_ISATTY < tm_start )54 if( DEBUG_SYS_ISATTY < tm_start ) 55 55 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n" 56 56 __FUNCTION__ , this, process->pid, (uint32_t)tm_start ); … … 61 61 { 62 62 63 #if CONFIG_DEBUG_SYSCALLS_ERROR63 #if DEBUG_SYSCALLS_ERROR 64 64 printk("\n[ERROR] in %s : illegal file descriptor index = %d\n", __FUNCTION__ , file_id ); 65 65 #endif … … 74 74 { 75 75 76 #if CONFIG_DEBUG_SYSCALLS_ERROR76 #if DEBUG_SYSCALLS_ERROR 77 77 printk("\n[ERROR] in %s : undefined fd_id %d in process %x\n", 78 78 __FUNCTION__ , file_id , process->pid ); … … 108 108 } 109 109 110 #if CONFIG_DEBUG_SYS_ISATTY110 #if DEBUG_SYS_ISATTY 111 111 tm_end = hal_get_cycles(); 112 if( CONFIG_DEBUG_SYS_ISATTY < tm_end )112 if( DEBUG_SYS_ISATTY < tm_end ) 113 113 printk("\n[DBG] %s : thread %x exit / process %x / cost %d / cycle %d\n", 114 114 __FUNCTION__, this, process->pid, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_kill.c
r436 r438 50 50 trdid_t trdid = this->trdid; 51 51 52 #if CONFIG_DEBUG_SYS_KILL52 #if DEBUG_SYS_KILL 53 53 uint64_t tm_start; 54 54 uint64_t tm_end; 55 55 tm_start = hal_get_cycles(); 56 if( CONFIG_DEBUG_SYS_KILL < tm_start )56 if( DEBUG_SYS_KILL < tm_start ) 57 57 printk("\n[DBG] %s : thread %x enter / process %x / sig %d / cycle %d\n", 58 58 __FUNCTION__ , this, pid, sig_id, (uint32_t)tm_start ); … … 68 68 { 69 69 70 #if CONFIG_DEBUG_SYSCALLS_ERROR70 #if DEBUG_SYSCALLS_ERROR 71 71 printk("\n[ERROR] in %s : process %x not found\n", __FUNCTION__, pid ); 72 72 #endif … … 79 79 { 80 80 81 #if CONFIG_DEBUG_SYSCALLS_ERROR81 #if DEBUG_SYSCALLS_ERROR 82 82 printk("\n[ERROR] in %s : only main thread can kill itself\n", __FUNCTION__ ); 83 83 #endif … … 96 96 { 97 97 98 #if CONFIG_DEBUG_SYSCALLS_ERROR98 #if DEBUG_SYSCALLS_ERROR 99 99 printk("\n[ERROR] in %s : process_init cannot be killed\n", __FUNCTION__ ); 100 100 #endif … … 167 167 { 168 168 169 #if CONFIG_DEBUG_SYSCALLS_ERROR169 #if DEBUG_SYSCALLS_ERROR 170 170 printk("\n[ERROR] in %s : illegal signal %d / process %x\n", __FUNCTION__, sig_id, pid ); 171 171 #endif … … 178 178 hal_fence(); 179 179 180 #if CONFIG_DEBUG_SYS_KILL180 #if DEBUG_SYS_KILL 181 181 tm_end = hal_get_cycles(); 182 if( CONFIG_DEBUG_SYS_KILL < tm_end )182 if( DEBUG_SYS_KILL < tm_end ) 183 183 printk("\n[DBG] %s : thread %x exit / process %x / sig %d / cost = %d / cycle %d\n", 184 184 __FUNCTION__ , this, pid, sig_id, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_mmap.c
r435 r438 50 50 process_t * process = this->process; 51 51 52 #if CONFIG_DEBUG_SYS_MMAP52 #if DEBUG_SYS_MMAP 53 53 uint64_t tm_start; 54 54 uint64_t tm_end; 55 55 tm_start = hal_get_cycles(); 56 if ( CONFIG_DEBUG_SYS_MMAP < tm_start )56 if ( DEBUG_SYS_MMAP < tm_start ) 57 57 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 58 58 __FUNCTION__, this, process->pid, (uint32_t)tm_start ); … … 65 65 { 66 66 67 #if CONFIG_DEBUG_SYSCALLS_ERROR67 #if DEBUG_SYSCALLS_ERROR 68 68 printk("\n[ERROR] in %s : arguments not in used space = %x\n", __FUNCTION__ , (intptr_t)attr ); 69 69 #endif … … 91 91 { 92 92 93 #if CONFIG_DEBUG_SYSCALLS_ERROR93 #if DEBUG_SYSCALLS_ERROR 94 94 printk("\n[ERROR] in %s : MAP_FIXED not supported\n", __FUNCTION__ ); 95 95 #endif … … 101 101 { 102 102 103 #if CONFIG_DEBUG_SYSCALLS_ERROR103 #if DEBUG_SYSCALLS_ERROR 104 104 printk("\n[ERROR] in %s : MAP_SHARED xor MAP_PRIVATE\n", __FUNCTION__ ); 105 105 #endif … … 123 123 { 124 124 125 #if CONFIG_DEBUG_SYSCALLS_ERROR125 #if DEBUG_SYSCALLS_ERROR 126 126 printk("\n[ERROR] in %s: bad file descriptor = %d\n", __FUNCTION__ , fdid ); 127 127 #endif … … 136 136 { 137 137 138 #if CONFIG_DEBUG_SYSCALLS_ERROR138 #if DEBUG_SYSCALLS_ERROR 139 139 printk("\n[ERROR] in %s: file %d not found\n", __FUNCTION__ , fdid ); 140 140 #endif … … 159 159 { 160 160 161 #if CONFIG_DEBUG_SYSCALLS_ERROR161 #if DEBUG_SYSCALLS_ERROR 162 162 printk("\n[ERROR] in %s: offset (%d) + len (%d) >= file's size (%d)\n", 163 163 __FUNCTION__, k_attr.offset, k_attr.length, size ); … … 172 172 { 173 173 174 #if CONFIG_DEBUG_SYSCALLS_ERROR174 #if DEBUG_SYSCALLS_ERROR 175 175 printk("\n[ERROR] in %s: prot = %x / file_attr = %x)\n", 176 176 __FUNCTION__ , k_attr.prot , file_attr ); … … 205 205 { 206 206 207 #if CONFIG_DEBUG_SYSCALLS_ERROR207 #if DEBUG_SYSCALLS_ERROR 208 208 printk("\n[ERROR] in %s : illegal cxy for MAP_REMOTE\n", __FUNCTION__ ); 209 209 #endif … … 254 254 { 255 255 256 #if CONFIG_DEBUG_SYSCALLS_ERROR256 #if DEBUG_SYSCALLS_ERROR 257 257 printk("\n[ERROR] in %s : cannot create vseg\n", __FUNCTION__ ); 258 258 #endif … … 266 266 hal_fence(); 267 267 268 #if CONFIG_DEBUG_SYS_MMAP268 #if DEBUG_SYS_MMAP 269 269 tm_end = hal_get_cycles(); 270 if ( CONFIG_DEBUG_SYS_MMAP < tm_start )270 if ( DEBUG_SYS_MMAP < tm_start ) 271 271 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n" 272 272 "vseg %s / cluster %x / base %x / size %x / cost %d\n", -
trunk/kernel/syscalls/sys_munmap.c
r437 r438 43 43 process_t * process = this->process; 44 44 45 #if CONFIG_DEBUG_SYS_MUNMAP45 #if DEBUG_SYS_MUNMAP 46 46 uint64_t tm_start; 47 47 uint64_t tm_end; 48 48 tm_start = hal_get_cycles(); 49 if( CONFIG_DEBUG_SYS_MUNMAP < tm_start )49 if( DEBUG_SYS_MUNMAP < tm_start ) 50 50 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n" 51 51 __FUNCTION__ , this, process->pid, (uint32_t)tm_start ); … … 58 58 { 59 59 60 #if CONFIG_DEBUG_SYSCALLS_ERROR60 #if DEBUG_SYSCALLS_ERROR 61 61 printk("\n[ERROR] in %s : cannot remove mapping\n", __FUNCTION__ ); 62 62 #endif … … 65 65 } 66 66 67 #if CONFIG_DEBUG_SYS_MUNMAP67 #if DEBUG_SYS_MUNMAP 68 68 tm_end = hal_get_cycles(); 69 if( CONFIG_DEBUG_SYS_MUNMAP < tm_start )69 if( DEBUG_SYS_MUNMAP < tm_start ) 70 70 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n" 71 71 __FUNCTION__ , this, process->pid, (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_read.c
r436 r438 66 66 process_t * process = this->process; 67 67 68 #if CONFIG_DEBUG_SYS_READ68 #if DEBUG_SYS_READ 69 69 uint64_t tm_start; 70 70 uint64_t tm_end; 71 71 tm_start = hal_get_cycles(); 72 if( CONFIG_DEBUG_SYS_READ < tm_start )72 if( DEBUG_SYS_READ < tm_start ) 73 73 printk("\n[DBG] %s : thread %x enter / process %x / vaddr = %x / count %d / cycle %d\n", 74 74 __FUNCTION__, this, process->pid, vaddr, count, (uint32_t)tm_start ); 75 75 #endif 76 76 77 #if ( CONFIG_DEBUG_SYS_READ & 1)77 #if (DEBUG_SYS_READ & 1) 78 78 enter_sys_read = (uint32_t)tm_start; 79 79 #endif … … 83 83 { 84 84 85 #if CONFIG_DEBUG_SYSCALLS_ERROR85 #if DEBUG_SYSCALLS_ERROR 86 86 printk("\n[ERROR] in %s : illegal file descriptor index = %d\n", __FUNCTION__ , file_id ); 87 87 #endif … … 96 96 { 97 97 98 #if CONFIG_DEBUG_SYSCALLS_ERROR98 #if DEBUG_SYSCALLS_ERROR 99 99 printk("\n[ERROR] in %s : user buffer unmapped = %x\n", 100 100 __FUNCTION__ , (intptr_t)vaddr ); … … 113 113 { 114 114 115 #if CONFIG_DEBUG_SYSCALLS_ERROR115 #if DEBUG_SYSCALLS_ERROR 116 116 printk("\n[ERROR] in %s : undefined fd_id %d in process %x\n", 117 117 __FUNCTION__ , file_id , process->pid ); … … 130 130 { 131 131 132 #if CONFIG_DEBUG_SYSCALLS_ERROR132 #if DEBUG_SYSCALLS_ERROR 133 133 printk("\n[ERROR] in %s : file %d not readable in process %x\n", 134 134 __FUNCTION__ , file_id , process->pid ); … … 149 149 { 150 150 151 #if CONFIG_DEBUG_SYSCALLS_ERROR151 #if DEBUG_SYSCALLS_ERROR 152 152 printk("\n[ERROR] in %s : file %d not readable in process %x\n", 153 153 __FUNCTION__ , file_id , process->pid ); … … 205 205 { 206 206 207 #if CONFIG_DEBUG_SYSCALLS_ERROR207 #if DEBUG_SYSCALLS_ERROR 208 208 printk("\n[ERROR] in %s cannot read data from file %d in process %x\n", 209 209 __FUNCTION__ , file_id , process->pid ); … … 218 218 hal_fence(); 219 219 220 #if CONFIG_DEBUG_SYS_READ220 #if DEBUG_SYS_READ 221 221 tm_end = hal_get_cycles(); 222 if( CONFIG_DEBUG_SYS_READ < tm_end )222 if( DEBUG_SYS_READ < tm_end ) 223 223 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n" 224 224 "nbytes = %d / first byte = %c / file_id = %d / cost = %d\n", … … 228 228 #endif 229 229 230 #if ( CONFIG_DEBUG_SYS_READ & 1)230 #if (DEBUG_SYS_READ & 1) 231 231 exit_sys_read = (uint32_t)tm_end; 232 232 -
trunk/kernel/syscalls/sys_thread_cancel.c
r436 r438 45 45 { 46 46 47 #if CONFIG_DEBUG_SYSCALLS_ERROR47 #if DEBUG_SYSCALLS_ERROR 48 48 printk("\n[ERROR] in %s : target thread %x not found\n", __FUNCTION__, trdid ); 49 49 #endif … … 52 52 } 53 53 54 #if CONFIG_DEBUG_SYS_THREAD_CANCEL54 #if DEBUG_SYS_THREAD_CANCEL 55 55 uint64_t tm_start; 56 56 uint64_t tm_end; 57 57 tm_start = hal_get_cycles(); 58 if( CONFIG_DEBUG_SYS_THREAD_CANCEL < tm_start )58 if( DEBUG_SYS_THREAD_CANCEL < tm_start ) 59 59 printk("\n[DBG] %s : thread %x enter to kill thread %x / cycle %d\n", 60 60 __FUCTION__, this, GET_PTR( target_xp ), (uint32_t)tm_start ); … … 66 66 0 ); // is forced 67 67 68 #if CONFIG_DEBUG_SYS_THREAD_CANCEL68 #if DEBUG_SYS_THREAD_CANCEL 69 69 tm_end = hal_get_cycles(); 70 if( CONFIG_DEBUG_SYS_THREAD_CANCEL < tm_end )70 if( DEBUG_SYS_THREAD_CANCEL < tm_end ) 71 71 printk("\n[DBG] %s : thread %x exit after kill thread %x / cycle %d\n", 72 72 __FUCTION__, this, GET_PTR( target_xp ), (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_thread_create.c
r437 r438 62 62 process = parent->process; 63 63 64 #if CONFIG_DEBUG_SYS_THREAD_CREATE64 #if DEBUG_SYS_THREAD_CREATE 65 65 uint64_t tm_start; 66 66 uint64_t tm_end; 67 67 tm_start = hal_get_cycles(); 68 if( CONFIG_DEBUG_SYS_THREAD_CREATE < tm_start )68 if( DEBUG_SYS_THREAD_CREATE < tm_start ) 69 69 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n" 70 70 __FUNCTION__ , parent , process->pid, (uint32_t)tm_start ); … … 79 79 { 80 80 81 #if CONFIG_DEBUG_SYSCALLS_ERROR81 #if DEBUG_SYSCALLS_ERROR 82 82 printk("\n[ERROR] in %s : user_attr unmapped\n", __FUNCTION__ ); 83 83 #endif … … 95 95 { 96 96 97 #if CONFIG_DEBUG_SYSCALLS_ERROR97 #if DEBUG_SYSCALLS_ERROR 98 98 printk("\n[ERROR] in %s : start_func unmapped\n", __FUNCTION__ ); 99 99 #endif … … 108 108 { 109 109 110 #if CONFIG_DEBUG_SYSCALLS_ERROR110 #if DEBUG_SYSCALLS_ERROR 111 111 printk("\n[ERROR] in %s : start_arg unmapped\n", __FUNCTION__ ); 112 112 #endif … … 124 124 { 125 125 126 #if CONFIG_DEBUG_SYSCALLS_ERROR126 #if DEBUG_SYSCALLS_ERROR 127 127 printk("\n[ERROR] in %s : illegal target cluster = %x\n", __FUNCTION__ , kern_attr.cxy ); 128 128 #endif … … 174 174 { 175 175 176 #if CONFIG_DEBUG_SYSCALLS_ERROR176 #if DEBUG_SYSCALLS_ERROR 177 177 printk("\n[ERROR] in %s : cannot create thread\n", __FUNCTION__ ); 178 178 #endif … … 196 196 hal_fence(); 197 197 198 #if CONFIG_DEBUG_SYS_THREAD_CREATE198 #if DEBUG_SYS_THREAD_CREATE 199 199 tm_end = hal_get_cycles(); 200 if( CONFIG_DEBUG_SYS_THREAD_CREATE < tm_end )200 if( DEBUG_SYS_THREAD_CREATE < tm_end ) 201 201 printk("\n[DBG] %s : thread %x created thread %x for process %x in cluster %x / cycle %d\n" 202 202 __FUNCTION__, parent, child_ptr, process->pid, target_cxy, (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_thread_exit.c
r436 r438 39 39 { 40 40 41 #if CONFIG_DEBUG_SYSCALLS_ERROR41 #if DEBUG_SYSCALLS_ERROR 42 42 printk("\n[ERROR] in %s : exit_value argument must be NULL for thread %x in process %x\n", 43 43 __FUNCTION__ , exit_value, this->trdid , process->pid ); … … 47 47 } 48 48 49 #if CONFIG_DEBUG_SYS_THREAD_EXIT49 #if DEBUG_SYS_THREAD_EXIT 50 50 uint64_t tm_start; 51 51 uint64_t tm_end; 52 52 tm_start = hal_get_cycles(); 53 if( CONFIG_DEBUG_SYS_THREAD_EXIT < tm_start )53 if( DEBUG_SYS_THREAD_EXIT < tm_start ) 54 54 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 55 55 __FUNCTION__ , this, process->pid , (uint32_t)tm_start ); … … 61 61 0 ); // is forced 62 62 63 #if CONFIG_DEBUG_SYS_THREAD_EXIT63 #if DEBUG_SYS_THREAD_EXIT 64 64 tm_end = hal_get_cycles(); 65 if( CONFIG_DEBUG_SYS_THREAD_EXIT < tm_end )65 if( DEBUG_SYS_THREAD_EXIT < tm_end ) 66 66 printk("\n[DBG] %s : thread %x exit / process %x / cost %d / cycle %d\n", 67 67 __FUNCTION__, this, this->process->pid, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_thread_join.c
r436 r438 60 60 target_cxy = CXY_FROM_TRDID( trdid ); 61 61 62 #if CONFIG_DEBUG_SYS_THREAD_JOIN62 #if DEBUG_SYS_THREAD_JOIN 63 63 uint64_t tm_start; 64 64 uint64_t tm_end; 65 65 tm_start = hal_get_cycles(); 66 if( CONFIG_DEBUG_SYS_THREAD_JOIN < tm_start )66 if( DEBUG_SYS_THREAD_JOIN < tm_start ) 67 67 printk("\n[DBG] %s : parent thread %x enter / process %x / target trdid %x / cycle %d\n", 68 68 __FUNCTION__ , joining_ptr , process->pid , trdid , (uint32_t)tm_start ); … … 73 73 { 74 74 75 #if CONFIG_DEBUG_SYSCALLS_ERROR75 #if DEBUG_SYSCALLS_ERROR 76 76 printk("\n[ERROR] in %s : illegal trdid argument %x\n", __FUNCTION__, trdid ); 77 77 #endif … … 84 84 { 85 85 86 #if CONFIG_DEBUG_SYSCALLS_ERROR86 #if DEBUG_SYSCALLS_ERROR 87 87 printk("\n[ERROR] in %s : exit_value argument must be NULL\n", __FUNCTION__ ); 88 88 #endif … … 95 95 { 96 96 97 #if CONFIG_DEBUG_SYSCALLS_ERROR97 #if DEBUG_SYSCALLS_ERROR 98 98 printk("\n[ERROR] in %s : this thread == target thread\n", __FUNCTION__ ); 99 99 #endif … … 109 109 { 110 110 111 #if CONFIG_DEBUG_SYSCALLS_ERROR111 #if DEBUG_SYSCALLS_ERROR 112 112 printk("\n[ERROR] in %s : target thread %x not found\n", __FUNCTION__, trdid ); 113 113 #endif … … 126 126 { 127 127 128 #if CONFIG_DEBUG_SYSCALLS_ERROR128 #if DEBUG_SYSCALLS_ERROR 129 129 printk("\n[ERROR] in %s : target thread %x not joinable\n", __FUNCTION__, trdid ); 130 130 #endif … … 178 178 } 179 179 180 #if CONFIG_DEBUG_SYS_THREAD_JOIN180 #if DEBUG_SYS_THREAD_JOIN 181 181 tm_end = hal_get_cycles(); 182 if( CONFIG_DEBUG_SYS_THREAD_JOIN < tm_end )182 if( DEBUG_SYS_THREAD_JOIN < tm_end ) 183 183 printk("\n[DBG] %s : parent thread %x exit / process %x / target trdid %x / cycle %d\n", 184 184 __FUNCTION__, joining_ptr, process->pid, trdid, (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_thread_sleep.c
r436 r438 33 33 thread_t * this = CURRENT_THREAD; 34 34 35 #if CONFIG_DEBUG_SYS_THREAD_SLEEP35 #if DEBUG_SYS_THREAD_SLEEP 36 36 uint64_t tm_start; 37 37 uint64_t tm_end; 38 38 tm_start = hal_get_cycles(); 39 if( CONFIG_DEBUG_SYS_THREAD_SLEEP < tm_start )39 if( DEBUG_SYS_THREAD_SLEEP < tm_start ) 40 40 printk("\n[DBG] %s : thread %x blocked / process %x / cycle %d\n", 41 41 __FUNCTION__ , this, this->process->pid , (uint32_t)tm_start ); … … 45 45 sched_yield("blocked on sleep"); 46 46 47 #if CONFIG_DEBUG_SYS_THREAD_SLEEP47 #if DEBUG_SYS_THREAD_SLEEP 48 48 tm_end = hal_get_cycles(); 49 if( CONFIG_DEBUG_SYS_THREAD_SLEEP < tm_end )49 if( DEBUG_SYS_THREAD_SLEEP < tm_end ) 50 50 printk("\n[DBG] %s : thread %x resume / process %x / cycle %d\n", 51 51 __FUNCTION__ , this, this->process->pid , (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_thread_wakeup.c
r436 r438 34 34 process_t * process = this->process; 35 35 36 #if CONFIG_DEBUG_SYS_THREAD_WAKEUP36 #if DEBUG_SYS_THREAD_WAKEUP 37 37 uint64_t tm_start; 38 38 uint64_t tm_end; 39 39 tm_start = hal_get_cycles(); 40 if( CONFIG_DEBUG_SYS_THREAD_WAKEUP < tm_start )40 if( DEBUG_SYS_THREAD_WAKEUP < tm_start ) 41 41 printk("\n[DBG] %s : thread %x enter / activate thread %x in process %x / cycle %d\n", 42 42 __FUNCTION__ , this, trdid, this->process->pid, (uint32_t)tm_start ); … … 51 51 { 52 52 53 #if CONFIG_DEBUG_SISCALLS_ERROR53 #if DEBUG_SISCALLS_ERROR 54 54 printk("\n[ERROR] in %s : illegal trdid argument %x\n", __FUNCTION__, trdid ); 55 55 #endif … … 64 64 { 65 65 66 #if CONFIG_DEBUG_SISCALLS_ERROR66 #if DEBUG_SISCALLS_ERROR 67 67 printk("\n[ERROR] in %s : cannot find thread %x in process %x/n", 68 68 __FUNCTION__ , trdid , this->process->pid ); … … 75 75 thread_unblock( thread_xp , THREAD_BLOCKED_GLOBAL ); 76 76 77 #if CONFIG_DEBUG_SYS_THREAD_WAKEUP77 #if DEBUG_SYS_THREAD_WAKEUP 78 78 tm_end = hal_get_cycles(); 79 if( CONFIG_DEBUG_SYS_THREAD_WAKEUP < tm_end )79 if( DEBUG_SYS_THREAD_WAKEUP < tm_end ) 80 80 printk("\n[DBG] %s : thread %x exit / thread %x in process %x activated / cycle %d\n", 81 81 __FUNCTION__ , this, trdid, this->process->pid, (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_wait.c
r436 r438 49 49 pid_t pid = process->pid; 50 50 51 #if CONFIG_DEBUG_SYS_WAIT51 #if DEBUG_SYS_WAIT 52 52 uint64_t tm_start; 53 53 uint64_t tm_end; 54 54 tm_start = hal_get_cycles(); 55 if( CONFIG_DEBUG_SYS_WAIT < tm_start )55 if( DEBUG_SYS_WAIT < tm_start ) 56 56 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 57 57 __FUNCTION__, this, process->pid, (uint32_t)tm_start ); … … 64 64 { 65 65 66 #if CONFIG_DEBUG_SYSCALLS_ERROR66 #if DEBUG_SYSCALLS_ERROR 67 67 printk("\n[ERROR] in %s : status buffer unmapped for thread %x in process %x\n", 68 68 __FUNCTION__ , this->trdid , process->pid ); … … 80 80 { 81 81 82 #if CONFIG_DEBUG_SYSCALL_ERROR82 #if DEBUG_SYSCALL_ERROR 83 83 printk("\n[ERROR] in %s : calling thread %x is not thread 0 in owner cluster %x\n", 84 84 __FUNCTION__ , trdid , owner_cxy ); … … 135 135 remote_spinlock_unlock( children_lock_xp ); 136 136 137 #if CONFIG_DEBUG_SYS_WAIT137 #if DEBUG_SYS_WAIT 138 138 tm_end = hal_get_cycles(); 139 if( CONFIG_DEBUG_SYS_WAIT < tm_end )139 if( DEBUG_SYS_WAIT < tm_end ) 140 140 printk("\n[DBG] %s : thread %x exit / parent %x / child %x / cycle %d\n", 141 141 __FUNCTION__, this, process->pid, child_pid, (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_write.c
r435 r438 46 46 reg_t save_sr; // required to enable IRQs during syscall 47 47 48 #if ( CONFIG_DEBUG_SYS_WRITE_DEBUG & 1)48 #if (DEBUG_SYS_WRITE_DEBUG & 1) 49 49 enter_sys_read = (uint32_t)tm_start; 50 50 #endif … … 53 53 process_t * process = this->process; 54 54 55 #if CONFIG_DEBUG_SYS_WRITE55 #if DEBUG_SYS_WRITE 56 56 uint32_t tm_start; 57 57 uint32_t tm_end; 58 58 tm_start = hal_get_cycles(); 59 if( CONFIG_DEBUG_SYS_WRITE < tm_start )59 if( DEBUG_SYS_WRITE < tm_start ) 60 60 printk("\n[DBG] %s : thread %x enter / process %x / vaddr %x / count %d / cycle %d\n", 61 61 __FUNCTION__, this, process->pid, vaddr, count, (uint32_t)tm_start ); … … 66 66 { 67 67 68 #if CONFIG_DEBUG_SYSCALLS_ERROR68 #if DEBUG_SYSCALLS_ERROR 69 69 printk("\n[ERROR] in %s : illegal file descriptor index\n", __FUNCTION__ ); 70 70 #endif … … 79 79 { 80 80 81 #if CONFIG_DEBUG_SYSCALLS_ERROR81 #if DEBUG_SYSCALLS_ERROR 82 82 printk("\n[ERROR] in %s : user buffer unmapped = %x\n", __FUNCTION__ , (intptr_t)vaddr ); 83 83 #endif … … 95 95 { 96 96 97 #if CONFIG_DEBUG_SYSCALLS_ERROR97 #if DEBUG_SYSCALLS_ERROR 98 98 printk("\n[ERROR] in %s : undefined file descriptor index = %d in process %x\n", 99 99 __FUNCTION__ , file_id , process->pid ); … … 119 119 { 120 120 121 #if CONFIG_DEBUG_SYSCALLS_ERROR121 #if DEBUG_SYSCALLS_ERROR 122 122 printk("\n[ERROR] in %s : file %d not writable in process %x\n", 123 123 __FUNCTION__ , file_id , process->pid ); … … 150 150 { 151 151 152 #if CONFIG_DEBUG_SYSCALLS_ERROR152 #if DEBUG_SYSCALLS_ERROR 153 153 printk("\n[ERROR] in %s cannot write data to file %d in process %x\n", 154 154 __FUNCTION__ , file_id , process->pid ); … … 163 163 hal_fence(); 164 164 165 #if CONFIG_DEBUG_SYS_WRITE165 #if DEBUG_SYS_WRITE 166 166 tm_end = hal_get_cycles(); 167 if( CONFIG_DEBUG_SYS_WRITE < tm_end )167 if( DEBUG_SYS_WRITE < tm_end ) 168 168 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n" 169 169 "nbytes = %d / first byte = %c / file_id = %d / cost = %d\n", … … 172 172 #endif 173 173 174 #if ( CONFIG_DEBUG_SYS_WRITE & 1)174 #if (DEBUG_SYS_WRITE & 1) 175 175 exit_sys_write = (uint32_t)tm_end; 176 176 -
trunk/kernel_config.h
r437 r438 36 36 //////////////////////////////////////////////////////////////////////////////////////////// 37 37 38 39 40 #define CONFIG_DEBUG_CHDEV_CMD_RX 0 41 #define CONFIG_DEBUG_CHDEV_CMD_TX 0 42 #define CONFIG_DEBUG_CHDEV_SERVER_RX 0 43 #define CONFIG_DEBUG_CHDEV_SERVER_TX 0 44 45 #define CONFIG_DEBUG_CLUSTER_INIT 0 46 #define CONFIG_DEBUG_CLUSTER_PROCESS_COPIES 0 47 48 #define CONFIG_DEBUG_DEV_TXT_RX 0 49 #define CONFIG_DEBUG_DEV_TXT_TX 0 50 #define CONFIG_DEBUG_DEV_IOC_RX 0 51 #define CONFIG_DEBUG_DEV_IOC_TX 0 52 #define CONFIG_DEBUG_DEV_NIC_RX 0 53 #define CONFIG_DEBUG_DEV_NIC_RX 0 54 #define CONFIG_DEBUG_DEV_FBF_RX 0 55 #define CONFIG_DEBUG_DEV_FBF_TX 0 56 #define CONFIG_DEBUG_DEV_DMA 0 57 #define CONFIG_DEBUG_DEV_MMC 0 58 #define CONFIG_DEBUG_DEV_PIC 0 59 60 #define CONFIG_DEBUG_DEVFS_INIT 0 61 #define CONFIG_DEBUG_DEVFS_MOVE 0 62 63 #define CONFIG_DEBUG_FATFS_INIT 0 64 #define CONFIG_DEBUG_FATFS_MOVE 0 65 #define CONFIG_DEBUG_FATFS_LOAD 0 66 67 #define CONFIG_DEBUG_GPT_ACCESS 0 68 69 #define CONFIG_DEBUG_HAL_KENTRY 0 70 #define CONFIG_DEBUG_HAL_EXCEPTIONS 0 71 #define CONFIG_DEBUG_HAL_IRQS 0 72 #define CONFIG_DEBUG_HAL_TXT_RX 0 73 #define CONFIG_DEBUG_HAL_TXT_TX 0 74 #define CONFIG_DEBUG_HAL_IOC_RX 0 75 #define CONFIG_DEBUG_HAL_IOC_TX 0 76 77 #define CONFIG_DEBUG_KCM 0 78 #define CONFIG_DEBUG_KMEM 0 79 80 #define CONFIG_DEBUG_KERNEL_INIT 0 81 #define CONFIG_DEBUG_KMEM_ALLOC 0 82 83 #define CONFIG_DEBUG_MAPPER_GET_PAGE 0 84 #define CONFIG_DEBUG_MAPPER_MOVE_USER 0 85 #define CONFIG_DEBUG_MAPPER_MOVE_KERNEL 0 86 87 #define CONFIG_DEBUG_PPM_ALLOC_PAGES 0 88 #define CONFIG_DEBUG_PPM_FREE_PAGES 0 89 90 #define CONFIG_DEBUG_PROCESS_COPY_INIT 0 91 #define CONFIG_DEBUG_PROCESS_DESTROY 0 92 #define CONFIG_DEBUG_PROCESS_INIT_CREATE 0 93 #define CONFIG_DEBUG_PROCESS_MAKE_EXEC 1 94 #define CONFIG_DEBUG_PROCESS_MAKE_FORK 1 95 #define CONFIG_DEBUG_PROCESS_REFERENCE_INIT 0 96 #define CONFIG_DEBUG_PROCESS_SIGACTION 0 97 #define CONFIG_DEBUG_PROCESS_TXT_ATTACH 0 98 #define CONFIG_DEBUG_PROCESS_ZERO_CREATE 0 99 100 #define CONFIG_DEBUG_RPC_MARSHALING 0 101 #define CONFIG_DEBUG_RPC_SEND 0 102 #define CONFIG_DEBUG_RPC_SERVER 0 103 104 #define CONFIG_DEBUG_SCHED_HANDLE_SIGNALS 0 105 #define CONFIG_DEBUG_SCHED_YIELD 0 106 107 #define CONFIG_DEBUG_SYSCALLS_ERROR 2 108 109 #define CONFIG_DEBUG_SYS_DISPLAY 0 110 #define CONFIG_DEBUG_SYS_EXEC 1 111 #define CONFIG_DEBUG_SYS_EXIT 0 112 #define CONFIG_DEBUG_SYS_FG 0 113 #define CONFIG_DEBUG_SYS_FORK 1 114 #define CONFIG_DEBUG_SYS_GET_CONFIG 0 115 #define CONFIG_DEBUG_SYS_ISATTY 0 116 #define CONFIG_DEBUG_SYS_KILL 1 117 #define CONFIG_DEBUG_SYS_MMAP 0 118 #define CONFIG_DEBUG_SYS_READ 0 119 #define CONFIG_DEBUG_SYS_THREAD_CANCEL 0 120 #define CONFIG_DEBUG_SYS_THREAD_EXIT 0 121 #define CONFIG_DEBUG_SYS_THREAD_JOIN 0 122 #define CONFIG_DEBUG_SYS_THREAD_SLEEP 0 123 #define CONFIG_DEBUG_SYS_THREAD_WAKEUP 0 124 #define CONFIG_DEBUG_SYS_WAIT 0 125 #define CONFIG_DEBUG_SYS_WRITE 0 126 127 #define CONFIG_DEBUG_SPINLOCKS 0 128 #define CONFIG_DEBUG_REMOTE_SPINLOCKS 0 129 #define CONFIG_DEBUG_RWLOCKS 0 130 #define CONFIG_DEBUG_REMOTE_RWLOCKS 0 131 132 #define CONFIG_DEBUG_THREAD_DESTROY 0 133 #define CONFIG_DEBUG_THREAD_IDLE 0 134 #define CONFIG_DEBUG_THREAD_KERNEL_CREATE 0 135 #define CONFIG_DEBUG_THREAD_KILL 0 136 #define CONFIG_DEBUG_THREAD_USER_CREATE 0 137 #define CONFIG_DEBUG_THREAD_USER_FORK 0 138 #define CONFIG_DEBUG_THREAD_BLOCK 0 139 140 #define CONFIG_DEBUG_VFS_INODE_CREATE 0 141 #define CONFIG_DEBUG_VFS_INODE_LOAD 0 142 #define CONFIG_DEBUG_VFS_DENTRY_CREATE 0 143 #define CONFIG_DEBUG_VFS_OPEN 0 144 #define CONFIG_DEBUG_VFS_LOOKUP 0 145 #define CONFIG_DEBUG_VFS_ADD_CHILD 0 146 #define CONFIG_DEBUG_VFS_MAPPER_MOVE 0 147 #define CONFIG_DEBUG_VFS_MAPPER_LOAD 0 148 149 #define CONFIG_DEBUG_VMM_CREATE_VSEG 0 150 #define CONFIG_DEBUG_VMM_DESTROY 0 151 #define CONFIG_DEBUG_VMM_FORK_COPY 0 152 #define CONFIG_DEBUG_VMM_GET_ONE_PPN 0 153 #define CONFIG_DEBUG_VMM_GET_PTE 0 154 #define CONFIG_DEBUG_VMM_INIT 0 155 #define CONFIG_DEBUG_VMM_PAGE_ALLOCATE 0 156 #define CONFIG_DEBUG_VMM_SET_COW 0 157 #define CONFIG_DEBUG_VMM_UNMAP_VSEG 0 158 #define CONFIG_DEBUG_VMM_UPDATE_PTE 0 38 #define DEBUG_CHDEV_CMD_RX 0 39 #define DEBUG_CHDEV_CMD_TX 0 40 #define DEBUG_CHDEV_SERVER_RX 0 41 #define DEBUG_CHDEV_SERVER_TX 0 42 43 #define DEBUG_CLUSTER_INIT 0 44 #define DEBUG_CLUSTER_PROCESS_COPIES 0 45 46 #define DEBUG_DEV_TXT_RX 0 47 #define DEBUG_DEV_TXT_TX 0 48 #define DEBUG_DEV_IOC_RX 0 49 #define DEBUG_DEV_IOC_TX 0 50 #define DEBUG_DEV_NIC_RX 0 51 #define DEBUG_DEV_NIC_RX 0 52 #define DEBUG_DEV_FBF_RX 0 53 #define DEBUG_DEV_FBF_TX 0 54 #define DEBUG_DEV_DMA 0 55 #define DEBUG_DEV_MMC 0 56 #define DEBUG_DEV_PIC 0 57 58 #define DEBUG_DEVFS_INIT 0 59 #define DEBUG_DEVFS_MOVE 0 60 61 #define DEBUG_FATFS_INIT 0 62 #define DEBUG_FATFS_MOVE 0 63 #define DEBUG_FATFS_LOAD 0 64 65 #define DEBUG_GPT_ACCESS 0 66 67 #define DEBUG_HAL_KENTRY 0 68 #define DEBUG_HAL_EXCEPTIONS 0 69 #define DEBUG_HAL_IRQS 0 70 #define DEBUG_HAL_TXT_RX 0 71 #define DEBUG_HAL_TXT_TX 0 72 #define DEBUG_HAL_IOC_RX 0 73 #define DEBUG_HAL_IOC_TX 0 74 75 #define DEBUG_KCM 0 76 #define DEBUG_KMEM 0 77 78 #define DEBUG_KERNEL_INIT 0 79 #define DEBUG_KMEM_ALLOC 0 80 81 #define DEBUG_MAPPER_GET_PAGE 0 82 #define DEBUG_MAPPER_MOVE_USER 0 83 #define DEBUG_MAPPER_MOVE_KERNEL 0 84 85 #define DEBUG_PPM_ALLOC_PAGES 0 86 #define DEBUG_PPM_FREE_PAGES 0 87 88 #define DEBUG_PROCESS_COPY_INIT 0 89 #define DEBUG_PROCESS_DESTROY 2 90 #define DEBUG_PROCESS_INIT_CREATE 0 91 #define DEBUG_PROCESS_MAKE_EXEC 2 92 #define DEBUG_PROCESS_MAKE_FORK 2 93 #define DEBUG_PROCESS_REFERENCE_INIT 0 94 #define DEBUG_PROCESS_SIGACTION 0 95 #define DEBUG_PROCESS_TXT_ATTACH 0 96 #define DEBUG_PROCESS_ZERO_CREATE 0 97 98 #define DEBUG_RPC_CLIENT_GENERIC 0 99 #define DEBUG_RPC_SERVER_GENERIC 0 100 101 #define DEBUG_RPC_PMEM_GET_PAGES 0 102 #define DEBUG_RPC_PMEM_RELEASE_PAGES 0 103 #define DEBUG_RPC_PROCESS_MAKE_FORK 0 104 #define DEBUG_RPC_PROCESS_SIGACTION 0 105 #define DEBUG_RPC_VFS_DENTRY_CREATE 0 106 #define DEBUG_RPC_VFS_FILE_CREATE 0 107 108 #define DEBUG_SCHED_HANDLE_SIGNALS 0 109 #define DEBUG_SCHED_YIELD 0 110 111 #define DEBUG_SYSCALLS_ERROR 2 112 113 #define DEBUG_SYS_DISPLAY 0 114 #define DEBUG_SYS_EXEC 2 115 #define DEBUG_SYS_EXIT 0 116 #define DEBUG_SYS_FG 0 117 #define DEBUG_SYS_FORK 2 118 #define DEBUG_SYS_GET_CONFIG 0 119 #define DEBUG_SYS_ISATTY 0 120 #define DEBUG_SYS_KILL 2 121 #define DEBUG_SYS_MMAP 0 122 #define DEBUG_SYS_READ 0 123 #define DEBUG_SYS_THREAD_CANCEL 0 124 #define DEBUG_SYS_THREAD_EXIT 0 125 #define DEBUG_SYS_THREAD_JOIN 0 126 #define DEBUG_SYS_THREAD_SLEEP 0 127 #define DEBUG_SYS_THREAD_WAKEUP 0 128 #define DEBUG_SYS_WAIT 0 129 #define DEBUG_SYS_WRITE 0 130 131 #define DEBUG_SPINLOCKS 0 132 #define DEBUG_REMOTE_SPINLOCKS 0 133 #define DEBUG_RWLOCKS 0 134 #define DEBUG_REMOTE_RWLOCKS 0 135 136 #define DEBUG_THREAD_DESTROY 2 137 #define DEBUG_THREAD_IDLE 0 138 #define DEBUG_THREAD_KERNEL_CREATE 0 139 #define DEBUG_THREAD_KILL 0 140 #define DEBUG_THREAD_USER_CREATE 0 141 #define DEBUG_THREAD_USER_FORK 0 142 #define DEBUG_THREAD_BLOCK 0 143 144 #define DEBUG_VFS_INODE_CREATE 0 145 #define DEBUG_VFS_INODE_LOAD 0 146 #define DEBUG_VFS_DENTRY_CREATE 0 147 #define DEBUG_VFS_OPEN 0 148 #define DEBUG_VFS_LOOKUP 0 149 #define DEBUG_VFS_ADD_CHILD 0 150 #define DEBUG_VFS_MAPPER_MOVE 0 151 #define DEBUG_VFS_MAPPER_LOAD 0 152 153 #define DEBUG_VMM_CREATE_VSEG 0 154 #define DEBUG_VMM_DESTROY 0 155 #define DEBUG_VMM_FORK_COPY 0 156 #define DEBUG_VMM_GET_ONE_PPN 0 157 #define DEBUG_VMM_GET_PTE 0 158 #define DEBUG_VMM_INIT 0 159 #define DEBUG_VMM_PAGE_ALLOCATE 0 160 #define DEBUG_VMM_SET_COW 0 161 #define DEBUG_VMM_UNMAP_VSEG 0 162 #define DEBUG_VMM_UPDATE_PTE 0 159 163 160 164 //////////////////////////////////////////////////////////////////////////////////////////// -
trunk/user/init/init.c
r437 r438 23 23 int main() 24 24 { 25 int i; 26 int ret_fork; // fork return value 27 int ret_exec; // fork return value 28 int rcv_pid; // pid received from the wait syscall 29 int status; // used by the wait syscall 30 char string[64]; 25 int i; 26 int delay; 27 int ret_fork; // fork return value 28 int ret_exec; // exec return value 29 int rcv_pid; // pid received from the wait syscall 30 int status; // used by the wait syscall 31 char string[64]; // log messages on kernel TXT0 31 32 32 33 // check number of TXT channels … … 66 67 snprintf( string , 64 , "INIT created KSH[%d] / pid = %x", i , ret_fork ); 67 68 display_string( string ); 69 70 // INIT wait a fixed delay between two forks 71 for( delay = 0 ; delay < 50000 ; delay++ ) asm volatile( "nop" ); 68 72 } 69 73 } 70 74 71 // display processes and threads in clusters 0 & 175 // INIT display processes and threads in clusters 0 & 1 72 76 display_cluster_processes( 0 ); 73 77 display_sched( 0 , 0 ); … … 128 132 display_string( string ); 129 133 } 130 } // end KSH kill handling 131 } 134 } // end KSH kill handling 135 136 // INIT wait a fixed delay 137 for( delay = 0 ; delay < 50000 ; delay++ ) asm volatile( "nop" ); 138 139 // INIT display processes and threads in clusters 0 & 1 140 display_cluster_processes( 0 ); 141 display_sched( 0 , 0 ); 142 display_cluster_processes( 1 ); 143 display_sched( 1 , 0 ); 144 145 } // end while waiting KSH[i] termination 132 146 133 147 } // end main()
Note: See TracChangeset
for help on using the changeset viewer.