- Timestamp:
- Jun 29, 2018, 10:44:14 AM (7 years ago)
- Location:
- trunk
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/chdev.c
r447 r450 163 163 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 164 164 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 165 printk("\n[DBG] %s : client_thread %x (%s) enter for RX / cycle %d\n",166 __FUNCTION__, this, thread_type_str(this->type) , rx_cycle );165 printk("\n[DBG] %s : client_thread %x (%s) enter for RX / server = %x / cycle %d\n", 166 __FUNCTION__, this, thread_type_str(this->type) , server_ptr, rx_cycle ); 167 167 #endif 168 168 … … 170 170 uint32_t tx_cycle = (uint32_t)hal_get_cycles(); 171 171 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 172 printk("\n[DBG] %s : client_thread %x (%s) enter for TX / cycle %d\n",173 __FUNCTION__, this, thread_type_str(this->type) , tx_cycle );172 printk("\n[DBG] %s : client_thread %x (%s) enter for TX / server = %x / cycle %d\n", 173 __FUNCTION__, this, thread_type_str(this->type) , server_ptr, tx_cycle ); 174 174 #endif 175 175 … … 186 186 lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock ); 187 187 188 // critical section for the following sequence: 188 // critical section for the following sequence: 189 189 // (1) take the lock protecting waiting queue 190 190 // (2) block the client thread … … 205 205 thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO ); 206 206 207 #if (DEBUG_CHDEV_CMD_TX & 1) 208 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 209 printk("\n[DBG] in %s : client thread %x blocked\n", __FUNCTION__, this ); 210 #endif 211 207 212 // unblock server thread if required 208 213 if( hal_remote_lw( blocked_xp ) & THREAD_BLOCKED_IDLE ) 209 214 thread_unblock( server_xp , THREAD_BLOCKED_IDLE ); 210 215 216 #if (DEBUG_CHDEV_CMD_TX & 1) 217 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 218 { 219 printk("\n[DBG] in %s : server thread %x unblocked\n", __FUNCTION__, server_ptr ); 220 chdev_queue_display( chdev_xp ); 221 } 222 #endif 223 211 224 // register client thread in waiting queue 212 225 xlist_add_last( root_xp , list_xp ); 213 226 227 #if (DEBUG_CHDEV_CMD_TX & 1) 228 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 229 { 230 printk("\n[DBG] in %s : thread %x registered write request in chdev\n", __FUNCTION__, this ); 231 chdev_queue_display( chdev_xp ); 232 } 233 #endif 234 214 235 // send IPI to core running the server thread when server != client 215 236 different = (lid != this->core->lid) || (local_cxy != chdev_cxy); 216 if( different ) dev_pic_send_ipi( chdev_cxy , lid ); 237 if( different ) 238 { 239 dev_pic_send_ipi( chdev_cxy , lid ); 217 240 241 #if (DEBUG_CHDEV_CMD_TX & 1) 242 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 243 printk("\n[DBG] in %s : client thread %x sent IPI to server thread %x\n", 244 __FUNCTION__, this, server_ptr ); 245 #endif 246 247 } 248 218 249 // release lock 219 250 remote_spinlock_unlock( lock_xp ); … … 492 523 493 524 // get extended pointer on root of requests queue 494 root_xp = hal_remote_lwd( XPTR( chdev_cxy , &chdev_ptr->wait_root ));525 root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root ); 495 526 496 527 // get chdev name … … 516 547 pid = hal_remote_lw ( XPTR( thread_cxy , &process->pid ) ); 517 548 518 printk("- trdid %X / pid %X\n", trdid, pid ); 549 printk("- thread %X / cluster %X / trdid %X / pid %X\n", 550 thread_ptr, thread_cxy, trdid, pid ); 519 551 } 520 552 } -
trunk/kernel/kern/chdev.h
r447 r450 158 158 * This structure is replicated in each cluster, and is initialised during kernel init. 159 159 * It is used for fast access to a device descriptor, from type and channel for an 160 * external peripheral, or from type and cluster for a haredinternal peripheral.160 * external peripheral, or from type and cluster for an internal peripheral. 161 161 * - a "shared" chdev can be accessed by any thread running in any cluster. 162 162 * - a "private" chdev can only be accessed by a thread running in local cluster. -
trunk/kernel/kern/process.c
r446 r450 412 412 cluster_process_copies_unlink( process ); 413 413 414 // remove process from children_list if process owner cluster 414 // remove process from children_list 415 // and release PID if owner cluster 415 416 if( CXY_FROM_PID( pid ) == local_cxy ) 416 417 { … … 429 430 hal_remote_atomic_add( children_nr_xp , -1 ); 430 431 remote_spinlock_unlock( children_lock_xp ); 431 } 432 433 // release the process PID to cluster manager if process owner cluster 434 if( CXY_FROM_PID( pid ) == local_cxy ) cluster_pid_release( pid ); 432 433 // release the process PID to cluster manager 434 cluster_pid_release( pid ); 435 436 } 435 437 436 438 // FIXME close all open files and update dirty [AG] … … 1057 1059 // remove thread from th_tbl[] 1058 1060 process->th_tbl[ltid] = NULL; 1059 process->th_nr --;1061 process->th_nr = count-1; 1060 1062 1061 1063 // release lock protecting th_tbl … … 1065 1067 return (count == 1); 1066 1068 1067 } // process_remove_thread()1069 } // end process_remove_thread() 1068 1070 1069 1071 ///////////////////////////////////////////////////////// -
trunk/kernel/kern/process.h
r446 r450 523 523 524 524 /********************************************************************************************* 525 * This function attach a process descriptor in owner cluster, identified by the <process> 526 * argument to a TXT terminal, identified by its <txt_id> channel index argument. 525 * This function attach a process, identified by the <process> argument to a TXT terminal, 526 * identified by the <txt_id> channel index argument. 527 * The process descriptor identified by the <process> argument must be in the owner cluster. 527 528 * It insert the process descriptor in the xlist rooted in the TXT_RX device. 528 529 * It is called by the process_reference_init() function. -
trunk/kernel/kern/rpc.c
r441 r450 77 77 &rpc_mapper_get_page_server, // 25 78 78 &rpc_vmm_create_vseg_server, // 26 79 &rpc_ sched_display_server, // 2779 &rpc_undefined, // 27 unused slot 80 80 &rpc_vmm_set_cow_server, // 28 81 81 &rpc_vmm_display_server, // 29 … … 2063 2063 2064 2064 ///////////////////////////////////////////////////////////////////////////////////////// 2065 // [27] Marshaling functions attached to RPC_SCHED_DISPLAY (blocking) 2066 ///////////////////////////////////////////////////////////////////////////////////////// 2067 2068 //////////////////////////////////////////////////////// 2069 void rpc_sched_display_client( cxy_t cxy, 2070 lid_t lid) 2071 { 2072 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 2073 2074 // initialise RPC descriptor header 2075 rpc_desc_t rpc; 2076 rpc.index = RPC_SCHED_DISPLAY; 2077 rpc.blocking = true; 2078 rpc.responses = 1; 2079 2080 // set input arguments in RPC descriptor 2081 rpc.args[0] = (uint64_t)lid; 2082 2083 // register RPC request in remote RPC fifo 2084 rpc_send( cxy , &rpc ); 2085 2086 } 2087 2088 ////////////////////////////////////////// 2089 void rpc_sched_display_server( xptr_t xp ) 2090 { 2091 // get client cluster identifier and pointer on RPC descriptor 2092 cxy_t cxy = GET_CXY( xp ); 2093 rpc_desc_t * desc = GET_PTR( xp ); 2094 2095 // get input arguments from client RPC descriptor 2096 lid_t lid = (lid_t)hal_remote_lwd( XPTR(cxy , &desc->args[0])); 2097 2098 // call local kernel function 2099 sched_display( lid ); 2100 2101 } 2065 // [27] undefined slot 2066 ///////////////////////////////////////////////////////////////////////////////////////// 2102 2067 2103 2068 ///////////////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/kern/rpc.h
r438 r450 89 89 RPC_MAPPER_GET_PAGE = 25, 90 90 RPC_VMM_CREATE_VSEG = 26, 91 RPC_ SCHED_DISPLAY= 27,91 RPC_UNDEFINED_27 = 27, 92 92 RPC_VMM_SET_COW = 28, 93 93 RPC_VMM_DISPLAY = 29, … … 635 635 636 636 /*********************************************************************************** 637 * [27] The RPC_SCHED_DISPLAY allows a client thread to request the display 638 * of a remote scheduler, identified by the <lid> argument. 639 *********************************************************************************** 640 * @ cxy : server cluster identifier. 641 * @ lid : [in] local index of target core in client cluster. 642 **********************************************************************************/ 643 void rpc_sched_display_client( cxy_t cxy, 644 lid_t lid ); 645 646 void rpc_sched_display_server( xptr_t xp ); 637 * [27] undefined slot 638 **********************************************************************************/ 647 639 648 640 /*********************************************************************************** -
trunk/kernel/kern/scheduler.c
r445 r450 104 104 list_entry_t * root; 105 105 bool_t done; 106 uint32_t count; 106 107 107 108 // take lock protecting sheduler lists … … 113 114 root = &sched->k_root; 114 115 last = sched->k_last; 116 done = false; 117 count = 0; 115 118 current = last; 116 done = false;117 119 118 120 while( done == false ) 119 121 { 122 assert( (count < sched->k_threads_nr), __FUNCTION__, "bad kernel threads list" ); 123 120 124 // get next entry in kernel list 121 125 current = current->next; … … 126 130 // skip the root that does not contain a thread 127 131 if( current == root ) continue; 132 else count++; 128 133 129 134 // get thread pointer for this entry 130 135 thread = LIST_ELEMENT( current , thread_t , sched_list ); 131 136 132 // select kernel thread if non blocked and non IDLE137 // select kernel thread if non blocked and non THREAD_IDLE 133 138 if( (thread->blocked == 0) && (thread->type != THREAD_IDLE) ) 134 139 { … … 137 142 } 138 143 } // end loop on kernel threads 139 } // end ifkernel threads144 } // end kernel threads 140 145 141 146 // second : scan the user threads list if not empty … … 144 149 root = &sched->u_root; 145 150 last = sched->u_last; 151 done = false; 152 count = 0; 146 153 current = last; 147 done = false;148 154 149 155 while( done == false ) 150 156 { 157 assert( (count < sched->u_threads_nr), __FUNCTION__, "bad user threads list" ); 158 151 159 // get next entry in user list 152 160 current = current->next; … … 157 165 // skip the root that does not contain a thread 158 166 if( current == root ) continue; 167 else count++; 159 168 160 169 // get thread pointer for this entry 161 170 thread = LIST_ELEMENT( current , thread_t , sched_list ); 162 171 163 // returnthread if non blocked172 // select thread if non blocked 164 173 if( thread->blocked == 0 ) 165 174 { … … 168 177 } 169 178 } // end loop on user threads 170 } // end ifuser threads179 } // end user threads 171 180 172 181 // third : return idle thread if no other runnable thread … … 240 249 sched->u_threads_nr = threads_nr - 1; 241 250 list_unlink( &thread->sched_list ); 242 if( threads_nr == 1 ) sched->u_last = NULL; 243 244 // delete thread 251 if( sched->u_last == &thread->sched_list ) 252 { 253 if( threads_nr == 1 ) 254 { 255 sched->u_last = NULL; 256 } 257 else if( sched->u_root.next == &thread->sched_list ) 258 { 259 sched->u_last = sched->u_root.pred; 260 } 261 else 262 { 263 sched->u_last = sched->u_root.next; 264 } 265 } 266 267 // delete thread descriptor 245 268 last_thread = thread_destroy( thread ); 246 269 … … 263 286 __FUNCTION__ , process->pid , local_cxy , cycle ); 264 287 #endif 265 266 288 } 267 289 } … … 421 443 } // end sched_display() 422 444 445 ///////////////////////////////////// 446 void sched_remote_display( cxy_t cxy, 447 lid_t lid ) 448 { 449 thread_t * thread; 450 uint32_t save_sr; 451 452 // check cxy 453 bool_t undefined = cluster_is_undefined( cxy ); 454 assert( (undefined == false), __FUNCTION__, "illegal cluster %x\n", cxy ); 455 456 // check lid 457 uint32_t cores = hal_remote_lw( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ); 458 assert( (lid < cores), __FUNCTION__, "illegal core index %d\n", lid); 459 460 // get local pointer on target scheduler 461 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; 462 scheduler_t * sched = &core->scheduler; 463 464 // get local pointer on current thread in target scheduler 465 thread_t * current = hal_remote_lpt( XPTR( cxy, &sched->current ) ); 466 467 // get local pointer on the first kernel and user threads list_entry 468 list_entry_t * k_entry = hal_remote_lpt( XPTR( cxy , &sched->k_root.next ) ); 469 list_entry_t * u_entry = hal_remote_lpt( XPTR( cxy , &sched->u_root.next ) ); 470 471 // get pointers on TXT0 chdev 472 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 473 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 474 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 475 476 // get extended pointer on remote TXT0 chdev lock 477 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 478 479 // get TXT0 lock in busy waiting mode 480 remote_spinlock_lock_busy( lock_xp , &save_sr ); 481 482 // display header 483 nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n", 484 cxy , lid, current, (uint32_t)hal_get_cycles() ); 485 486 // display kernel threads 487 while( k_entry != &sched->k_root ) 488 { 489 // get local pointer on kernel_thread 490 thread = LIST_ELEMENT( k_entry , thread_t , sched_list ); 491 492 // get relevant thead info 493 thread_type_t type = hal_remote_lw ( XPTR( cxy , &thread->type ) ); 494 trdid_t trdid = hal_remote_lw ( XPTR( cxy , &thread->trdid ) ); 495 uint32_t blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) ); 496 uint32_t flags = hal_remote_lw ( XPTR( cxy , &thread->flags ) ); 497 process_t * process = hal_remote_lpt( XPTR( cxy , &thread->process ) ); 498 pid_t pid = hal_remote_lw ( XPTR( cxy , &process->pid ) ); 499 500 // display thread info 501 if (type == THREAD_DEV) 502 { 503 char name[16]; 504 chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) ); 505 hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , &chdev->name ) ); 506 507 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n", 508 thread_type_str( type ), pid, trdid, thread, blocked, flags, name ); 509 } 510 else 511 { 512 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", 513 thread_type_str( type ), pid, trdid, thread, blocked, flags ); 514 } 515 516 // get next remote kernel thread list_entry 517 k_entry = hal_remote_lpt( XPTR( cxy , &k_entry->next ) ); 518 } 519 520 // display user threads 521 while( u_entry != &sched->u_root ) 522 { 523 // get local pointer on user_thread 524 thread = LIST_ELEMENT( u_entry , thread_t , sched_list ); 525 526 // get relevant thead info 527 thread_type_t type = hal_remote_lw ( XPTR( cxy , &thread->type ) ); 528 trdid_t trdid = hal_remote_lw ( XPTR( cxy , &thread->trdid ) ); 529 uint32_t blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) ); 530 uint32_t flags = hal_remote_lw ( XPTR( cxy , &thread->flags ) ); 531 process_t * process = hal_remote_lpt( XPTR( cxy , &thread->process ) ); 532 pid_t pid = hal_remote_lw ( XPTR( cxy , &process->pid ) ); 533 534 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", 535 thread_type_str( type ), pid, trdid, thread, blocked, flags ); 536 537 // get next user thread list_entry 538 u_entry = hal_remote_lpt( XPTR( cxy , &u_entry->next ) ); 539 } 540 541 // release TXT0 lock 542 remote_spinlock_unlock_busy( lock_xp , save_sr ); 543 544 } // end sched_remote_display() 545 -
trunk/kernel/kern/scheduler.h
r443 r450 112 112 113 113 /********************************************************************************************* 114 * This function display the internal state of the local core identified by its <lid>. 114 * This debug function displays on TXT0 the internal state of a local scheduler, 115 * identified by the core local index <lid>. 115 116 ********************************************************************************************* 116 117 * @ lid : local index of target core. … … 118 119 void sched_display( lid_t lid ); 119 120 121 /********************************************************************************************* 122 * This debug function displays on TXT0 the internal state of a scheduler, 123 * identified by the target cluster identifier <cxy> and the core local index <lid>. 124 * It can be called by a thread running in any cluster, as it uses remote accesses, 125 * to scan the scheduler local lists of threads. 126 ********************************************************************************************* 127 * @ cxy : target cluster identifier 128 * @ lid : local index of target core. 129 ********************************************************************************************/ 130 void sched_remote_display( cxy_t cxy, 131 lid_t lid ); 120 132 121 133 #endif /* _SCHEDULER_H_ */ -
trunk/kernel/kern/thread.c
r446 r450 654 654 if( DEBUG_THREAD_DESTROY < cycle ) 655 655 printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n", 656 __FUNCTION__, CURRENT_THREAD, thread , process->pid, cycle );656 __FUNCTION__, CURRENT_THREAD, thread->trdid, process->pid, cycle ); 657 657 #endif 658 658 … … 694 694 cycle = (uint32_t)hal_get_cycles(); 695 695 if( DEBUG_THREAD_DESTROY < cycle ) 696 printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n",697 __FUNCTION__, CURRENT_THREAD, thread , process->pid,cycle );696 printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / last %d / cycle %d\n", 697 __FUNCTION__, CURRENT_THREAD, thread->trdid, process->pid, last_thread / cycle ); 698 698 #endif 699 699 … … 800 800 uint32_t cycle = (uint32_t)hal_get_cycles(); 801 801 if( DEBUG_THREAD_BLOCK < cycle ) 802 printk("\n[ @@@] %s : thread %x in cxy %x blocked thread %x in cxy %x / cause %x / cycle %d\n",802 printk("\n[DBG] %s : thread %x in cxy %x blocked thread %x in cxy %x / cause %x / cycle %d\n", 803 803 __FUNCTION__ , CURRENT_THREAD , local_cxy , ptr , cxy , cause , cycle ); 804 #endif805 806 #if (DEBUG_THREAD_BLOCK & 1)807 if( DEBUG_THREAD_BLOCK < cycle )808 {809 if( cxy == local_cxy)810 {811 sched_display( ptr->core->lid );812 }813 else814 {815 core_t * core = hal_remote_lpt( XPTR( cxy , &ptr->core ) );816 lid_t lid = hal_remote_lw ( XPTR( cxy , &core->lid ) );817 rpc_sched_display_client( cxy , lid );818 }819 }820 804 #endif 821 805 … … 837 821 uint32_t cycle = (uint32_t)hal_get_cycles(); 838 822 if( DEBUG_THREAD_BLOCK < cycle ) 839 printk("\n[ @@@] %s : thread %x in cxy %x unblocked thread %x in cxy %x / cause %x / cycle %d\n",823 printk("\n[DBG] %s : thread %x in cxy %x unblocked thread %x in cxy %x / cause %x / cycle %d\n", 840 824 __FUNCTION__ , CURRENT_THREAD , local_cxy , ptr , cxy , cause , cycle ); 841 #endif842 843 #if (DEBUG_THREAD_BLOCK & 1)844 if( DEBUG_THREAD_BLOCK < cycle )845 {846 if( cxy == local_cxy)847 {848 sched_display( ptr->core->lid );849 }850 else851 {852 core_t * core = hal_remote_lpt( XPTR( cxy , &ptr->core ) );853 lid_t lid = hal_remote_lw ( XPTR( cxy , &core->lid ) );854 rpc_sched_display_client( cxy , lid );855 }856 }857 825 #endif 858 826 -
trunk/kernel/kernel_config.h
r447 r450 99 99 #define DEBUG_PROCESS_REFERENCE_INIT 0 100 100 #define DEBUG_PROCESS_SIGACTION 0 101 #define DEBUG_PROCESS_TXT 2101 #define DEBUG_PROCESS_TXT 0 102 102 #define DEBUG_PROCESS_ZERO_CREATE 0 103 103 … … 116 116 #define DEBUG_RPC_VMM_GET_VSEG 0 117 117 118 #define DEBUG_SCHED_HANDLE_SIGNALS 1119 #define DEBUG_SCHED_YIELD 1// must be activated by the trace() syscall118 #define DEBUG_SCHED_HANDLE_SIGNALS 2 119 #define DEBUG_SCHED_YIELD 2 // must be activated by the trace() syscall 120 120 121 121 #define DEBUG_SYSCALLS_ERROR 2 … … 125 125 #define DEBUG_SYS_EXIT 1 126 126 #define DEBUG_SYS_FG 0 127 #define DEBUG_SYS_FORK 1127 #define DEBUG_SYS_FORK 0 128 128 #define DEBUG_SYS_GET_CONFIG 0 129 129 #define DEBUG_SYS_ISATTY 0 130 130 #define DEBUG_SYS_KILL 0 131 131 #define DEBUG_SYS_MMAP 0 132 #define DEBUG_SYS_READ 2132 #define DEBUG_SYS_READ 0 133 133 #define DEBUG_SYS_THREAD_CANCEL 0 134 134 #define DEBUG_SYS_THREAD_CREATE 0 … … 140 140 #define DEBUG_SYS_TRACE 0 141 141 #define DEBUG_SYS_WAIT 0 142 #define DEBUG_SYS_WRITE 2142 #define DEBUG_SYS_WRITE 0 143 143 144 144 #define DEBUG_SPINLOCKS 0 -
trunk/kernel/libk/list.h
r440 r450 53 53 /*************************************************************************** 54 54 * This structure defines a Double Circular Linked List entry. 55 * Note : The list root is an extra list-entry_t, that is NOT part of the56 * set of linked elements.55 * Note : The list root is an extra list-entry_t, that is NOT part 56 * of the set of linked elements. 57 57 **************************************************************************/ 58 58 -
trunk/kernel/syscalls/sys_display.c
r445 r450 193 193 else 194 194 { 195 rpc_sched_display_client( cxy , lid );195 sched_remote_display( cxy , lid ); 196 196 } 197 197 } -
trunk/kernel/syscalls/sys_read.c
r446 r450 199 199 } 200 200 201 printk("\n###### in %s : thread %x in process %x got TXT_RX ownership\n",202 __FUNCTION__, this->trdid, process->pid );203 204 201 // move count bytes from device 205 202 nbytes = devfs_user_move( true, // from device to buffer -
trunk/libs/libalmosmkh/almosmkh.h
r445 r450 187 187 ****************************************************************************************/ 188 188 int trace( unsigned int active, 189 unsigned int pid,190 unsigned int trdid );189 unsigned int cxy, 190 unsigned int lid ); 191 191 192 192 /**************************************************************************************** -
trunk/user/idbg/idbg.c
r446 r450 18 18 19 19 get_cycle( &cycle ); 20 20 21 printf( "\n[IDBG] starts at cycle %d\n", (unsigned int)cycle ); 21 22 22 /////// 23 idbg(); 24 /////// 23 // idbg(); 25 24 26 get_cycle( &cycle ); 27 printf( "\n[IDBG] exit at cycle %d\n", (unsigned int)cycle ); 28 29 exit( EXIT_SUCCESS ); 25 exit( 0 ); 30 26 31 27 } // end main()
Note: See TracChangeset
for help on using the changeset viewer.