Changeset 438 for trunk/kernel/kern
- Timestamp:
- Apr 4, 2018, 2:49:02 PM (7 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/chdev.c
r437 r438 39 39 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 40 40 41 #if ( CONFIG_DEBUG_SYS_READ & 1)41 #if (DEBUG_SYS_READ & 1) 42 42 extern uint32_t enter_chdev_cmd_read; 43 43 extern uint32_t exit_chdev_cmd_read; … … 46 46 #endif 47 47 48 #if ( CONFIG_DEBUG_SYS_WRITE & 1)48 #if (DEBUG_SYS_WRITE & 1) 49 49 extern uint32_t enter_chdev_cmd_write; 50 50 extern uint32_t exit_chdev_cmd_write; … … 130 130 uint32_t save_sr; // for critical section 131 131 132 #if ( CONFIG_DEBUG_SYS_READ & 1)132 #if (DEBUG_SYS_READ & 1) 133 133 enter_chdev_cmd_read = (uint32_t)hal_get_cycles(); 134 134 #endif 135 135 136 #if ( CONFIG_DEBUG_SYS_WRITE & 1)136 #if (DEBUG_SYS_WRITE & 1) 137 137 enter_chdev_cmd_write = (uint32_t)hal_get_cycles(); 138 138 #endif … … 144 144 chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp ); 145 145 146 #if ( CONFIG_DEBUG_CHDEV_CMD_RX || CONFIG_DEBUG_CHDEV_CMD_TX)146 #if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX) 147 147 bool_t is_rx = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->is_rx ) ); 148 148 #endif 149 149 150 #if CONFIG_DEBUG_CHDEV_CMD_RX150 #if DEBUG_CHDEV_CMD_RX 151 151 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 152 if( (is_rx) && ( CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) )152 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 153 153 printk("\n[DBG] %s : client_thread %x (%s) enter for RX / cycle %d\n", 154 154 __FUNCTION__, this, thread_type_str(this->type) , rx_cycle ); 155 155 #endif 156 156 157 #if CONFIG_DEBUG_CHDEV_CMD_TX157 #if DEBUG_CHDEV_CMD_TX 158 158 uint32_t tx_cycle = (uint32_t)hal_get_cycles(); 159 if( (is_rx == 0) && ( CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) )159 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 160 160 printk("\n[DBG] %s : client_thread %x (%s) enter for TX / cycle %d\n", 161 161 __FUNCTION__, this, thread_type_str(this->type) , tx_cycle ); … … 207 207 hal_restore_irq( save_sr ); 208 208 209 #if CONFIG_DEBUG_CHDEV_CMD_RX209 #if DEBUG_CHDEV_CMD_RX 210 210 rx_cycle = (uint32_t)hal_get_cycles(); 211 if( (is_rx) && ( CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) )211 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 212 212 printk("\n[DBG] %s : client_thread %x (%s) exit for RX / cycle %d\n", 213 213 __FUNCTION__, this, thread_type_str(this->type) , rx_cycle ); 214 214 #endif 215 215 216 #if CONFIG_DEBUG_CHDEV_CMD_TX216 #if DEBUG_CHDEV_CMD_TX 217 217 tx_cycle = (uint32_t)hal_get_cycles(); 218 if( (is_rx == 0) && ( CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) )218 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 219 219 printk("\n[DBG] %s : client_thread %x (%s) exit for TX / cycle %d\n", 220 220 __FUNCTION__, this, thread_type_str(this->type) , tx_cycle ); 221 221 #endif 222 222 223 #if ( CONFIG_DEBUG_SYS_READ & 1)223 #if (DEBUG_SYS_READ & 1) 224 224 exit_chdev_cmd_read = (uint32_t)hal_get_cycles(); 225 225 #endif 226 226 227 #if ( CONFIG_DEBUG_SYS_WRITE & 1)227 #if (DEBUG_SYS_WRITE & 1) 228 228 exit_chdev_cmd_write = (uint32_t)hal_get_cycles(); 229 229 #endif … … 275 275 client_ptr = (thread_t *)GET_PTR( client_xp ); 276 276 277 #if CONFIG_DEBUG_CHDEV_SERVER_RX277 #if DEBUG_CHDEV_SERVER_RX 278 278 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 279 if( (chdev->is_rx) && ( CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) )279 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 280 280 printk("\n[DBG] %s : server_thread %x start RX / client %x / cycle %d\n", 281 281 __FUNCTION__ , server , client_ptr , rx_cycle ); 282 282 #endif 283 283 284 #if CONFIG_DEBUG_CHDEV_SERVER_TX284 #if DEBUG_CHDEV_SERVER_TX 285 285 uint32_t tx_cycle = (uint32_t)hal_get_cycles(); 286 if( (chdev->is_rx == 0) && ( CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) )286 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 287 287 printk("\n[DBG] %s : server_thread %x start TX / client %x / cycle %d\n", 288 288 __FUNCTION__ , server , client_ptr , tx_cycle ); 289 289 #endif 290 290 291 #if ( CONFIG_DEBUG_SYS_READ & 1)291 #if (DEBUG_SYS_READ & 1) 292 292 enter_chdev_server_read = (uint32_t)hal_get_cycles(); 293 293 #endif 294 294 295 #if ( CONFIG_DEBUG_SYS_WRITE & 1)295 #if (DEBUG_SYS_WRITE & 1) 296 296 enter_chdev_server_write = (uint32_t)hal_get_cycles(); 297 297 #endif … … 308 308 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 309 309 310 #if CONFIG_DEBUG_CHDEV_SERVER_RX310 #if DEBUG_CHDEV_SERVER_RX 311 311 rx_cycle = (uint32_t)hal_get_cycles(); 312 if( (chdev->is_rx) && ( CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) )312 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 313 313 printk("\n[DBG] %s : server_thread %x completes RX / client %x / cycle %d\n", 314 314 __FUNCTION__ , server , client_ptr , rx_cycle ); 315 315 #endif 316 316 317 #if CONFIG_DEBUG_CHDEV_SERVER_TX317 #if DEBUG_CHDEV_SERVER_TX 318 318 tx_cycle = (uint32_t)hal_get_cycles(); 319 if( (chdev->is_rx == 0) && ( CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) )319 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 320 320 printk("\n[DBG] %s : server_thread %x completes TX / client %x / cycle %d\n", 321 321 __FUNCTION__ , server , client_ptr , tx_cycle ); 322 322 #endif 323 323 324 #if ( CONFIG_DEBUG_SYS_READ & 1)324 #if (DEBUG_SYS_READ & 1) 325 325 exit_chdev_server_read = (uint32_t)hal_get_cycles(); 326 326 #endif 327 327 328 #if ( CONFIG_DEBUG_SYS_WRITE & 1)328 #if (DEBUG_SYS_WRITE & 1) 329 329 exit_chdev_server_write = (uint32_t)hal_get_cycles(); 330 330 #endif -
trunk/kernel/kern/cluster.c
r437 r438 89 89 spinlock_init( &cluster->kcm_lock ); 90 90 91 #if CONFIG_DEBUG_CLUSTER_INIT91 #if DEBUG_CLUSTER_INIT 92 92 uint32_t cycle = (uint32_t)hal_get_cycles(); 93 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )93 if( DEBUG_CLUSTER_INIT < cycle ) 94 94 printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n", 95 95 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); … … 99 99 cluster->dqdt_root_level = dqdt_init( info->x_size, 100 100 info->y_size, 101 info->y_width ); 102 cluster->threads_var = 0; 103 cluster->pages_var = 0; 101 info->y_width ) - 1; 104 102 105 103 // initialises embedded PPM … … 113 111 } 114 112 115 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )113 #if( DEBUG_CLUSTER_INIT & 1 ) 116 114 cycle = (uint32_t)hal_get_cycles(); 117 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )115 if( DEBUG_CLUSTER_INIT < cycle ) 118 116 printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n", 119 117 __FUNCTION__ , local_cxy , cycle ); … … 123 121 khm_init( &cluster->khm ); 124 122 125 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )123 #if( DEBUG_CLUSTER_INIT & 1 ) 126 124 uint32_t cycle = (uint32_t)hal_get_cycles(); 127 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )125 if( DEBUG_CLUSTER_INIT < cycle ) 128 126 printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n", 129 127 __FUNCTION__ , local_cxy , hal_get_cycles() ); … … 133 131 kcm_init( &cluster->kcm , KMEM_KCM ); 134 132 135 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )133 #if( DEBUG_CLUSTER_INIT & 1 ) 136 134 uint32_t cycle = (uint32_t)hal_get_cycles(); 137 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )135 if( DEBUG_CLUSTER_INIT < cycle ) 138 136 printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n", 139 137 __FUNCTION__ , local_cxy , hal_get_cycles() ); … … 148 146 } 149 147 150 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )148 #if( DEBUG_CLUSTER_INIT & 1 ) 151 149 cycle = (uint32_t)hal_get_cycles(); 152 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )150 if( DEBUG_CLUSTER_INIT < cycle ) 153 151 printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n", 154 152 __FUNCTION__ , local_cxy , cycle ); … … 159 157 cluster->rpc_threads = 0; 160 158 161 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )159 #if( DEBUG_CLUSTER_INIT & 1 ) 162 160 cycle = (uint32_t)hal_get_cycles(); 163 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )161 if( DEBUG_CLUSTER_INIT < cycle ) 164 162 printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n", 165 163 __FUNCTION__ , local_cxy , hal_get_cycles() ); … … 188 186 } 189 187 190 #if CONFIG_DEBUG_CLUSTER_INIT188 #if DEBUG_CLUSTER_INIT 191 189 cycle = (uint32_t)hal_get_cycles(); 192 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )190 if( DEBUG_CLUSTER_INIT < cycle ) 193 191 printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n", 194 192 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); … … 456 454 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 457 455 458 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES456 #if DEBUG_CLUSTER_PROCESS_COPIES 459 457 uint32_t cycle = (uint32_t)hal_get_cycles(); 460 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )458 if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 461 459 printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n", 462 460 __FUNCTION__ , local_cxy , process , cycle ); … … 487 485 remote_spinlock_unlock_busy( copies_lock , irq_state ); 488 486 489 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES487 #if DEBUG_CLUSTER_PROCESS_COPIES 490 488 cycle = (uint32_t)hal_get_cycles(); 491 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )489 if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 492 490 printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n", 493 491 __FUNCTION__ , local_cxy , process , cycle ); … … 502 500 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 503 501 504 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES502 #if DEBUG_CLUSTER_PROCESS_COPIES 505 503 uint32_t cycle = (uint32_t)hal_get_cycles(); 506 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )504 if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 507 505 printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n", 508 506 __FUNCTION__ , local_cxy , process , cycle ); … … 530 528 remote_spinlock_unlock_busy( copies_lock , irq_state ); 531 529 532 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES530 #if DEBUG_CLUSTER_PROCESS_COPIES 533 531 cycle = (uint32_t)hal_get_cycles(); 534 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )532 if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 535 533 printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n", 536 534 __FUNCTION__ , local_cxy , process , cycle ); -
trunk/kernel/kern/cluster.h
r437 r438 132 132 133 133 // DQDT 134 int32_t pages_var; /*! pages number increment from last DQQT updt */135 int32_t threads_var; /*! threads number increment from last DQDT updt */136 137 134 dqdt_node_t dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster */ 138 135 -
trunk/kernel/kern/core.c
r433 r438 85 85 // handle scheduler 86 86 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK"); 87 88 // update DQDT89 if( ((ticks % CONFIG_DQDT_TICKS_PER_QUANTUM) == 0) && (core->lid == 0) )90 dqdt_global_update();91 87 } 92 88 -
trunk/kernel/kern/do_syscall.c
r437 r438 173 173 int error = 0; 174 174 175 assert( (this == CURRENT_THREAD), __FUNCTION__, 176 "wrong <this> argument\n" ); 177 175 178 // update user time 176 179 thread_user_time_update( this ); … … 194 197 195 198 // check kernel stack overflow 196 assert( (this->signature == THREAD_SIGNATURE), __FUNCTION__, "kernel stack overflow\n" ); 199 assert( (CURRENT_THREAD->signature == THREAD_SIGNATURE), __FUNCTION__, 200 "kernel stack overflow after for thread %x in cluster %x\n", CURRENT_THREAD, local_cxy ); 197 201 198 202 // update kernel time -
trunk/kernel/kern/dqdt.c
r437 r438 28 28 #include <hal_remote.h> 29 29 #include <printk.h> 30 #include <chdev.h> 30 31 #include <cluster.h> 31 32 #include <bits.h> … … 33 34 34 35 35 /////////////////////////////////////////// 36 void dqdt_local_print( dqdt_node_t * node ) 37 { 38 printk("DQDT node : level = %d / cluster = %x / threads = %x / pages = %x\n", 39 node->level, 40 local_cxy, 41 node->threads, 42 node->pages ); 43 } 44 45 ///////////////////////////////////////// 46 void dqdt_global_print( xptr_t node_xp ) 36 /////////////////////////////////////////////////////////////////////////////////////////// 37 // Extern variables 38 /////////////////////////////////////////////////////////////////////////////////////////// 39 40 extern chdev_directory_t chdev_dir; // defined in chdev.h / allocated in kernel_init.c 41 42 43 /////////////////////////////////////////////////////////////////////////////////////////// 44 // This static recursive function traverse the DQDT quad-tree from root to bottom. 45 /////////////////////////////////////////////////////////////////////////////////////////// 46 static void dqdt_recursive_print( xptr_t node_xp ) 47 47 { 48 48 uint32_t i; 49 dqdt_node_t local_node; 50 51 // get root node local copy 52 hal_remote_memcpy( XPTR( local_cxy , &local_node ), node_xp , sizeof(dqdt_node_t) ); 53 54 // display DQDT node content 55 dqdt_local_print( &local_node ); 49 dqdt_node_t node; 50 51 // get node local copy 52 hal_remote_memcpy( XPTR( local_cxy , &node ), node_xp , sizeof(dqdt_node_t) ); 53 54 // display node content 55 nolock_printk("- level %d in cluster %x (node %x) : threads = %x / pages = %x\n", 56 node.level, GET_CXY( node_xp ), GET_PTR( node_xp ), node.threads, node.pages ); 56 57 57 58 // recursive call on children if node is not terminal 58 if ( local_node.level > 0 )59 if ( node.level > 0 ) 59 60 { 60 61 for ( i = 0 ; i < 4 ; i++ ) 61 62 { 62 if ( local_node.children[i] != XPTR_NULL ) 63 dqdt_global_print( local_node.children[i] ); 63 if ( node.children[i] != XPTR_NULL ) dqdt_recursive_print( node.children[i] ); 64 64 } 65 65 } 66 } 67 68 /////////////////// 69 void dqdt_display() 70 { 71 reg_t save_sr; 72 73 // build extended pointer on DQDT root node 74 cluster_t * cluster = LOCAL_CLUSTER; 75 uint32_t level = cluster->dqdt_root_level; 76 xptr_t root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] ); 77 78 // get pointers on TXT0 chdev 79 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 80 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 81 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 82 83 // get extended pointer on remote TXT0 chdev lock 84 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 85 86 // get TXT0 lock in busy waiting mode 87 remote_spinlock_lock_busy( lock_xp , &save_sr ); 88 89 // print header 90 nolock_printk("\n***** DQDT state\n\n"); 91 92 // call recursive function 93 dqdt_recursive_print( root_xp ); 94 95 // release lock 96 remote_spinlock_unlock_busy( lock_xp , save_sr ); 66 97 } 67 98 … … 161 192 } // end dqdt_init() 162 193 163 164 /////////////////////////////////////////////////////////////////////////// 165 // This recursive function is called by the dqdt_global_update() function. 194 /////////////////////////////////////////////////////////////////////////// 195 // This recursive function is called by the dqdt_update_threads() function. 166 196 // It traverses the quad tree from clusters to root. 167 197 /////////////////////////////////////////////////////////////////////////// 168 static void dqdt_propagate( xptr_t node, // extended pointer on current node 169 int32_t threads_var, // number of threads variation 170 int32_t pages_var ) // number of pages variation 198 // @ node : extended pointer on current node 199 // @ increment : number of threads variation 200 /////////////////////////////////////////////////////////////////////////// 201 static void dqdt_propagate_threads( xptr_t node, 202 int32_t increment ) 171 203 { 172 204 // get current node cluster identifier and local pointer 173 cxy_t cxy = (cxy_t)GET_CXY( node );174 dqdt_node_t * ptr = (dqdt_node_t *)GET_PTR( node );205 cxy_t cxy = GET_CXY( node ); 206 dqdt_node_t * ptr = GET_PTR( node ); 175 207 176 208 // update current node threads number 177 hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , threads_var ); 178 179 // update current node pages number 180 hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , pages_var ); 209 hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , increment ); 181 210 182 211 // get extended pointer on parent node … … 184 213 185 214 // propagate if required 186 if ( parent != XPTR_NULL ) 187 { 188 dqdt_propagate( parent, threads_var, pages_var ); 189 } 190 } 191 192 ///////////////////////// 193 void dqdt_global_update() 215 if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment ); 216 } 217 218 /////////////////////////////////////////////////////////////////////////// 219 // This recursive function is called by the dqdt_update_pages() function. 220 // It traverses the quad tree from clusters to root. 221 /////////////////////////////////////////////////////////////////////////// 222 // @ node : extended pointer on current node 223 // @ increment : number of pages variation 224 /////////////////////////////////////////////////////////////////////////// 225 static void dqdt_propagate_pages( xptr_t node, 226 int32_t increment ) 227 { 228 // get current node cluster identifier and local pointer 229 cxy_t cxy = GET_CXY( node ); 230 dqdt_node_t * ptr = GET_PTR( node ); 231 232 // update current node threads number 233 hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , increment ); 234 235 // get extended pointer on parent node 236 xptr_t parent = (xptr_t)hal_remote_lwd( XPTR( cxy , &ptr->parent ) ); 237 238 // propagate if required 239 if ( parent != XPTR_NULL ) dqdt_propagate_pages( parent, increment ); 240 } 241 242 ///////////////////////////////////////////// 243 void dqdt_update_threads( int32_t increment ) 194 244 { 195 245 cluster_t * cluster = LOCAL_CLUSTER; 196 246 dqdt_node_t * node = &cluster->dqdt_tbl[0]; 197 247 198 // get variations199 int32_t threads_var = cluster->threads_var;200 int32_t pages_var = cluster->pages_var;201 202 // propagate this variation to DQDT upper levels203 if( (threads_var || pages_var) && (node->parent != XPTR_NULL) )204 {205 dqdt_propagate( node->parent, threads_var, pages_var );206 }207 208 // update variations209 hal_atomic_add( &cluster->threads_var , -threads_var );210 hal_atomic_add( &cluster->pages_var , -pages_var );211 }212 213 ///////////////////////////////////////////////////214 void dqdt_local_update_threads( int32_t increment )215 {216 cluster_t * cluster = LOCAL_CLUSTER;217 218 // register change for future propagation in DQDT219 hal_atomic_add( &cluster->threads_var , increment );220 221 248 // update DQDT node level 0 222 hal_atomic_add( &cluster->dqdt_tbl[0].threads , increment ); 223 } 224 225 ///////////////////////////////////////////////// 226 void dqdt_local_update_pages( int32_t increment ) 227 { 228 cluster_t * cluster = LOCAL_CLUSTER; 229 230 // register change for future propagation in DQDT 231 hal_atomic_add( &cluster->pages_var , increment ); 249 hal_atomic_add( &node->threads , increment ); 250 251 // propagate to DQDT upper levels 252 if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , increment ); 253 } 254 255 /////////////////////////////////////////// 256 void dqdt_update_pages( int32_t increment ) 257 { 258 cluster_t * cluster = LOCAL_CLUSTER; 259 dqdt_node_t * node = &cluster->dqdt_tbl[0]; 232 260 233 261 // update DQDT node level 0 234 hal_atomic_add( &cluster->dqdt_tbl[0].pages , increment ); 235 } 262 hal_atomic_add( &node->pages , increment ); 263 264 // propagate to DQDT upper levels 265 if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , increment ); 266 } 267 236 268 237 269 //////////////////////////////////////////////////////////////////////////////// … … 289 321 cluster_t * cluster = LOCAL_CLUSTER; 290 322 uint32_t level = cluster->dqdt_root_level; 291 xptr_t root 323 xptr_t root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] ); 292 324 293 325 // call recursive function 294 return dqdt_select_cluster( root , false );326 return dqdt_select_cluster( root_xp , false ); 295 327 } 296 328 … … 301 333 cluster_t * cluster = LOCAL_CLUSTER; 302 334 uint32_t level = cluster->dqdt_root_level; 303 xptr_t root 335 xptr_t root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] ); 304 336 305 337 // call recursive function 306 return dqdt_select_cluster( root , true );307 } 308 338 return dqdt_select_cluster( root_xp , true ); 339 } 340 -
trunk/kernel/kern/dqdt.h
r437 r438 93 93 94 94 /**************************************************************************************** 95 * This recursive function traverses the DQDT quad-tree from bottom to root, to propagate 96 * the change in the threads number and allocated pages number in a leaf cluster, 97 * toward the upper levels of the DQDT quad-tree. 98 * It should be called periodically by each instance of the kernel. 99 ***************************************************************************************/ 100 void dqdt_global_update(); 101 102 /**************************************************************************************** 103 * This local function updates both the total number of threads, 104 * in the level 0 DQDT node, and the variation of the number of threads 105 * for future propagation to the DQDT upper levels. 95 * This local function updates the total number of threads in level 0 DQDT node, 96 * and propagates the variation to the DQDT upper levels. 106 97 * It should be called on each thread creation or destruction. 107 98 **************************************************************************************** 108 99 * @ increment : increment (can be positive or negative) 109 100 ***************************************************************************************/ 110 void dqdt_ local_update_threads( int32_t increment );101 void dqdt_update_threads( int32_t increment ); 111 102 112 103 /**************************************************************************************** 113 * This local function updates both the total number of allocated pages, 114 * in the level 0 DQDT node, and the variation of the number of pages 115 * for future propagation to the DQDT upper levels. 116 * It should be called on each memory allocation or release. 104 * This local function updates the total number of pages in level 0 DQDT node, 105 * and propagates the variation to the DQDT upper levels. 106 * It should be called on each physical memory page allocation or release. 117 107 **************************************************************************************** 118 108 * @ increment : increment (can be positive or negative) 119 109 ***************************************************************************************/ 120 void dqdt_ local_update_pages( int32_t increment );110 void dqdt_update_pages( int32_t increment ); 121 111 122 112 /**************************************************************************************** … … 139 129 140 130 /**************************************************************************************** 141 * This recursive function displays usage information for all DQDT nodes in the subtree 142 * defined by the node argument. It traverses the quadtree from root to bottom. 143 **************************************************************************************** 144 * @ node_xp : extended pointer on a DQDT node. 131 * This function displays on kernel TXT0 the DQDT state for all nodes in the quad-tree. 132 * It traverses the quadtree from root to bottom, and can be called by a thread 133 * running in any cluster 145 134 ***************************************************************************************/ 146 void dqdt_global_print( xptr_t node_xp ); 147 148 /**************************************************************************************** 149 * This function displays summary usage information in a given DQDT local node. 150 **************************************************************************************** 151 * @ node : local pointer on a DQDT node. 152 ***************************************************************************************/ 153 void dqdt_local_print( dqdt_node_t * node ); 135 void dqdt_display(); 154 136 155 137 -
trunk/kernel/kern/kernel_init.c
r437 r438 125 125 // these debug variables are used to analyse the sys_read() syscall timing 126 126 127 #if CONFIG_DEBUG_SYS_READ127 #if DEBUG_SYS_READ 128 128 uint32_t enter_sys_read; 129 129 uint32_t exit_sys_read; … … 150 150 // these debug variables are used to analyse the sys_write() syscall timing 151 151 152 #if CONFIG_DEBUG_SYS_WRITE152 #if DEBUG_SYS_WRITE 153 153 uint32_t enter_sys_write; 154 154 uint32_t exit_sys_write; … … 324 324 } 325 325 326 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )327 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )326 #if( DEBUG_KERNEL_INIT & 0x1 ) 327 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 328 328 printk("\n[DBG] %s : created MMC in cluster %x / chdev = %x\n", 329 329 __FUNCTION__ , local_cxy , chdev_ptr ); … … 353 353 chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr ); 354 354 355 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )356 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )355 #if( DEBUG_KERNEL_INIT & 0x1 ) 356 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 357 357 printk("\n[DBG] %s : created DMA[%d] in cluster %x / chdev = %x\n", 358 358 __FUNCTION__ , channel , local_cxy , chdev_ptr ); … … 488 488 } 489 489 490 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )491 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )490 #if( DEBUG_KERNEL_INIT & 0x1 ) 491 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 492 492 printk("\n[DBG] %s : create chdev %s / channel = %d / rx = %d / cluster %x / chdev = %x\n", 493 493 __FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy , chdev ); … … 623 623 } 624 624 625 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )626 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )625 #if( DEBUG_KERNEL_INIT & 0x1 ) 626 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 627 627 { 628 628 printk("\n[DBG] %s created PIC chdev in cluster %x at cycle %d\n", … … 807 807 ///////////////////////////////////////////////////////////////////////////////// 808 808 809 #if CONFIG_DEBUG_KERNEL_INIT810 if( (core_lid == 0) & &(local_cxy == 0) )809 #if DEBUG_KERNEL_INIT 810 if( (core_lid == 0) & (local_cxy == 0) ) 811 811 printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n", 812 812 __FUNCTION__, (uint32_t)hal_get_cycles() ); … … 845 845 ///////////////////////////////////////////////////////////////////////////////// 846 846 847 #if CONFIG_DEBUG_KERNEL_INIT848 if( (core_lid == 0) & &(local_cxy == 0) )847 #if DEBUG_KERNEL_INIT 848 if( (core_lid == 0) & (local_cxy == 0) ) 849 849 printk("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n", 850 850 __FUNCTION__, (uint32_t)hal_get_cycles() ); … … 872 872 //////////////////////////////////////////////////////////////////////////////// 873 873 874 #if CONFIG_DEBUG_KERNEL_INIT875 if( (core_lid == 0) & &(local_cxy == 0) )874 #if DEBUG_KERNEL_INIT 875 if( (core_lid == 0) & (local_cxy == 0) ) 876 876 printk("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n", 877 877 __FUNCTION__, (uint32_t)hal_get_cycles() ); … … 905 905 ///////////////////////////////////////////////////////////////////////////////// 906 906 907 #if CONFIG_DEBUG_KERNEL_INIT908 if( (core_lid == 0) & &(local_cxy == 0) )907 #if DEBUG_KERNEL_INIT 908 if( (core_lid == 0) & (local_cxy == 0) ) 909 909 printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n", 910 910 __FUNCTION__, (uint32_t)hal_get_cycles() ); 911 911 #endif 912 912 913 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )913 #if( DEBUG_KERNEL_INIT & 1 ) 914 914 chdev_dir_display(); 915 915 #endif … … 927 927 928 928 // all cores initialize the idle thread descriptor 929 error = thread_ kernel_init( thread,930 931 932 933 929 error = thread_idle_init( thread, 930 THREAD_IDLE, 931 &thread_idle_func, 932 NULL, 933 core_lid ); 934 934 if( error ) 935 935 { … … 942 942 core->scheduler.idle = thread; 943 943 944 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )944 #if( DEBUG_KERNEL_INIT & 1 ) 945 945 sched_display( core_lid ); 946 946 #endif … … 1014 1014 ///////////////////////////////////////////////////////////////////////////////// 1015 1015 1016 #if CONFIG_DEBUG_KERNEL_INIT1017 if( (core_lid == 0) & &(local_cxy == 0) )1016 #if DEBUG_KERNEL_INIT 1017 if( (core_lid == 0) & (local_cxy == 0) ) 1018 1018 printk("\n[DBG] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n", 1019 1019 __FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles()); … … 1075 1075 ///////////////////////////////////////////////////////////////////////////////// 1076 1076 1077 #if CONFIG_DEBUG_KERNEL_INIT1078 if( (core_lid == 0) & & (local_cxy == io_cxy) )1077 #if DEBUG_KERNEL_INIT 1078 if( (core_lid == 0) & (local_cxy == 0) ) 1079 1079 printk("\n[DBG] %s : exit barrier 5 : VFS_root = %l in cluster %x / cycle %d\n", 1080 1080 __FUNCTION__, vfs_root_inode_xp , io_cxy , (uint32_t)hal_get_cycles()); … … 1110 1110 ///////////////////////////////////////////////////////////////////////////////// 1111 1111 1112 #if CONFIG_DEBUG_KERNEL_INIT1113 if( (core_lid == 0) & & (local_cxy == io_cxy) )1112 #if DEBUG_KERNEL_INIT 1113 if( (core_lid == 0) & (local_cxy == 0) ) 1114 1114 printk("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster %x / cycle %d\n", 1115 1115 __FUNCTION__, devfs_dev_inode_xp , io_cxy , (uint32_t)hal_get_cycles() ); … … 1149 1149 ///////////////////////////////////////////////////////////////////////////////// 1150 1150 1151 #if CONFIG_DEBUG_KERNEL_INIT1152 if( (core_lid == 0) & &(local_cxy == 0) )1151 #if DEBUG_KERNEL_INIT 1152 if( (core_lid == 0) & (local_cxy == 0) ) 1153 1153 printk("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n", 1154 1154 __FUNCTION__, devfs_dev_inode_xp , (uint32_t)hal_get_cycles() ); … … 1162 1162 { 1163 1163 1164 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )1164 #if( DEBUG_KERNEL_INIT & 1 ) 1165 1165 vfs_display( vfs_root_inode_xp ); 1166 1166 #endif … … 1175 1175 ///////////////////////////////////////////////////////////////////////////////// 1176 1176 1177 #if CONFIG_DEBUG_KERNEL_INIT1178 if( (core_lid == 0) & &(local_cxy == 0) )1177 #if DEBUG_KERNEL_INIT 1178 if( (core_lid == 0) & (local_cxy == 0) ) 1179 1179 printk("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n", 1180 1180 __FUNCTION__ , (uint32_t)hal_get_cycles() ); … … 1189 1189 print_banner( (info->x_size * info->y_size) , info->cores_nr ); 1190 1190 1191 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )1191 #if( DEBUG_KERNEL_INIT & 1 ) 1192 1192 printk("\n\n***** memory fooprint for main kernel objects\n\n" 1193 1193 " - thread descriptor : %d bytes\n" -
trunk/kernel/kern/process.c
r437 r438 124 124 model_pid = hal_remote_lw( XPTR( model_cxy , &model_ptr->pid ) ); 125 125 126 #if CONFIG_DEBUG_PROCESS_REFERENCE_INIT126 #if DEBUG_PROCESS_REFERENCE_INIT 127 127 uint32_t cycle = (uint32_t)hal_get_cycles(); 128 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )128 if( DEBUG_PROCESS_REFERENCE_INIT ) 129 129 printk("\n[DBG] %s : thread %x enter / pid = %x / ppid = %x / model_pid = %x / cycle %d\n", 130 130 __FUNCTION__ , CURRENT_THREAD , pid , parent_pid , model_pid , cycle ); … … 141 141 assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" ); 142 142 143 #if ( CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)144 cycle = (uint32_t)hal_get_cycles(); 145 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )143 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 144 cycle = (uint32_t)hal_get_cycles(); 145 if( DEBUG_PROCESS_REFERENCE_INIT ) 146 146 printk("\n[DBG] %s : thread %x / vmm empty for process %x / cycle %d\n", 147 147 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); … … 232 232 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 233 233 234 #if ( CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)235 cycle = (uint32_t)hal_get_cycles(); 236 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )234 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 235 cycle = (uint32_t)hal_get_cycles(); 236 if( DEBUG_PROCESS_REFERENCE_INIT ) 237 237 printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n", 238 238 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); … … 272 272 hal_fence(); 273 273 274 #if ( CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)275 cycle = (uint32_t)hal_get_cycles(); 276 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )274 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 275 cycle = (uint32_t)hal_get_cycles(); 276 if( DEBUG_PROCESS_REFERENCE_INIT ) 277 277 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n", 278 278 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); … … 297 297 local_process->term_state = 0; 298 298 299 #if CONFIG_DEBUG_PROCESS_COPY_INIT299 #if DEBUG_PROCESS_COPY_INIT 300 300 uint32_t cycle = (uint32_t)hal_get_cycles(); 301 if( CONFIG_DEBUG_PROCESS_COPY_INIT )301 if( DEBUG_PROCESS_COPY_INIT ) 302 302 printk("\n[DBG] %s : thread %x enter for process %x\n", 303 303 __FUNCTION__ , CURRENT_THREAD , local_process->pid ); … … 347 347 hal_fence(); 348 348 349 #if CONFIG_DEBUG_PROCESS_COPY_INIT350 cycle = (uint32_t)hal_get_cycles(); 351 if( CONFIG_DEBUG_PROCESS_COPY_INIT )349 #if DEBUG_PROCESS_COPY_INIT 350 cycle = (uint32_t)hal_get_cycles(); 351 if( DEBUG_PROCESS_COPY_INIT ) 352 352 printk("\n[DBG] %s : thread %x exit for process %x\n", 353 353 __FUNCTION__ , CURRENT_THREAD , local_process->pid ); … … 371 371 "process %x in cluster %x has still active threads", pid , local_cxy ); 372 372 373 #if CONFIG_DEBUG_PROCESS_DESTROY373 #if DEBUG_PROCESS_DESTROY 374 374 uint32_t cycle = (uint32_t)hal_get_cycles(); 375 if( CONFIG_DEBUG_PROCESS_DESTROY )375 if( DEBUG_PROCESS_DESTROY ) 376 376 printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n", 377 377 __FUNCTION__ , CURRENT_THREAD , process, pid , cycle ); 378 #endif379 380 #if CONFIG_DEBUG_PROCESS_DESTROY381 if( CONFIG_DEBUG_PROCESS_DESTROY & 1 )382 cluster_processes_display( CXY_FROM_PID( pid ) );383 378 #endif 384 379 … … 422 417 process_free( process ); 423 418 424 #if CONFIG_DEBUG_PROCESS_DESTROY425 cycle = (uint32_t)hal_get_cycles(); 426 if( CONFIG_DEBUG_PROCESS_DESTROY )419 #if DEBUG_PROCESS_DESTROY 420 cycle = (uint32_t)hal_get_cycles(); 421 if( DEBUG_PROCESS_DESTROY ) 427 422 printk("\n[DBG] %s : thread %x exit / destroyed process %x (pid = %x) / cycle %d\n", 428 423 __FUNCTION__ , CURRENT_THREAD , process, pid, cycle ); … … 457 452 thread_t * client = CURRENT_THREAD; 458 453 459 #if CONFIG_DEBUG_PROCESS_SIGACTION454 #if DEBUG_PROCESS_SIGACTION 460 455 uint32_t cycle = (uint32_t)hal_get_cycles(); 461 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )456 if( DEBUG_PROCESS_SIGACTION < cycle ) 462 457 printk("\n[DBG] %s : thread %x enter to %s process %x / cycle %d\n", 463 458 __FUNCTION__ , client, process_action_str( action_type ) , pid , cycle ); … … 483 478 // it can be shared because all parallel, non-blocking, server threads 484 479 // use the same input arguments, and use the shared RPC response field 485 // but use486 480 487 481 // the client thread makes the following sequence: … … 502 496 503 497 // initialize shared RPC descriptor 504 rpc.response = 0;505 rpc.blocking = false;506 rpc.index = RPC_PROCESS_SIGACTION;507 rpc.thread = client;508 rpc.lid = client->core->lid;509 rpc.args[0] = action_type;510 rpc.args[1] = pid;498 rpc.responses = 0; 499 rpc.blocking = false; 500 rpc.index = RPC_PROCESS_SIGACTION; 501 rpc.thread = client; 502 rpc.lid = client->core->lid; 503 rpc.args[0] = action_type; 504 rpc.args[1] = pid; 511 505 512 506 // send RPCs to all clusters containing process copiess … … 514 508 { 515 509 516 #if CONFIG_DEBUG_PROCESS_SIGACTION517 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )510 #if DEBUG_PROCESS_SIGACTION 511 if( DEBUG_PROCESS_SIGACTION < cycle ) 518 512 printk("\n[DBG] %s : send RPC to %s process %x in cluster %x\n", 519 513 __FUNCTION__ , process_action_str( action_type ) , pid , process_cxy ); 520 514 #endif 521 515 // atomically increment responses counter 522 hal_atomic_add( (void *)&rpc.response , 1 );516 hal_atomic_add( (void *)&rpc.responses , 1 ); 523 517 524 518 process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); … … 538 532 sched_yield("blocked on rpc_process_sigaction"); 539 533 540 #if CONFIG_DEBUG_PROCESS_SIGACTION541 cycle = (uint32_t)hal_get_cycles(); 542 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )534 #if DEBUG_PROCESS_SIGACTION 535 cycle = (uint32_t)hal_get_cycles(); 536 if( DEBUG_PROCESS_SIGACTION < cycle ) 543 537 printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n", 544 538 __FUNCTION__ , client, process_action_str( action_type ) , pid , local_cxy , cycle ); … … 563 557 owner_cxy = CXY_FROM_PID( process->pid ); 564 558 565 #if CONFIG_DEBUG_PROCESS_SIGACTION559 #if DEBUG_PROCESS_SIGACTION 566 560 uint32_t cycle = (uint32_t)hal_get_cycles(); 567 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )561 if( DEBUG_PROCESS_SIGACTION < cycle ) 568 562 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 569 563 __FUNCTION__ , this , process->pid , local_cxy , cycle ); … … 623 617 } 624 618 625 #if CONFIG_DEBUG_PROCESS_SIGACTION626 cycle = (uint32_t)hal_get_cycles(); 627 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )619 #if DEBUG_PROCESS_SIGACTION 620 cycle = (uint32_t)hal_get_cycles(); 621 if( DEBUG_PROCESS_SIGACTION < cycle ) 628 622 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 629 623 __FUNCTION__ , this , process->pid , local_cxy , cycle ); … … 643 637 this = CURRENT_THREAD; 644 638 645 #if CONFIG_DEBUG_PROCESS_SIGACTION639 #if DEBUG_PROCESS_SIGACTION 646 640 uint32_t cycle = (uint32_t)hal_get_cycles(); 647 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )641 if( DEBUG_PROCESS_SIGACTION < cycle ) 648 642 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 649 643 __FUNCTION__ , this , process->pid , local_cxy , cycle ); … … 671 665 spinlock_unlock( &process->th_lock ); 672 666 673 #if CONFIG_DEBUG_PROCESS_SIGACTION674 cycle = (uint32_t)hal_get_cycles(); 675 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )667 #if DEBUG_PROCESS_SIGACTION 668 cycle = (uint32_t)hal_get_cycles(); 669 if( DEBUG_PROCESS_SIGACTION < cycle ) 676 670 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 677 671 __FUNCTION__ , this , process->pid , local_cxy , cycle ); … … 687 681 uint32_t count; // threads counter 688 682 689 #if CONFIG_DEBUG_PROCESS_SIGACTION683 #if DEBUG_PROCESS_SIGACTION 690 684 uint32_t cycle = (uint32_t)hal_get_cycles(); 691 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )685 if( DEBUG_PROCESS_SIGACTION < cycle ) 692 686 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 693 687 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle ); … … 716 710 spinlock_unlock( &process->th_lock ); 717 711 718 #if CONFIG_DEBUG_PROCESS_SIGACTION719 cycle = (uint32_t)hal_get_cycles(); 720 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )712 #if DEBUG_PROCESS_SIGACTION 713 cycle = (uint32_t)hal_get_cycles(); 714 if( DEBUG_PROCESS_SIGACTION < cycle ) 721 715 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 722 716 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle ); … … 1036 1030 vfs_bin_xp = hal_remote_lwd(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp)); 1037 1031 1038 // check parent process is the reference 1032 // check parent process is the reference process 1039 1033 ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); 1034 1035 printk("\n@@@ %s : parent_cxy = %x / parent_ptr = %x / ref_cxy = %x / ref_ptr = %x\n", 1036 __FUNCTION__, parent_process_cxy, parent_process_ptr, GET_CXY( ref_xp ), GET_PTR( ref_xp ) ); 1037 1040 1038 assert( (parent_process_xp == ref_xp ) , __FUNCTION__ , 1041 1039 "parent process must be the reference process\n" ); 1042 1040 1043 #if CONFIG_DEBUG_PROCESS_MAKE_FORK1041 #if DEBUG_PROCESS_MAKE_FORK 1044 1042 uint32_t cycle = (uint32_t)hal_get_cycles(); 1045 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1046 printk("\n[DBG] %s : thread %x enter for process %x / c ycle %d\n",1047 __FUNCTION__, CURRENT_THREAD, parent_pid, cycle );1043 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1044 printk("\n[DBG] %s : thread %x enter for process %x / cluster %x / cycle %d\n", 1045 __FUNCTION__, CURRENT_THREAD, parent_pid, local_cxy, cycle ); 1048 1046 #endif 1049 1047 … … 1073 1071 parent_process_xp ); 1074 1072 1075 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )1076 cycle = (uint32_t)hal_get_cycles(); 1077 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1073 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1074 cycle = (uint32_t)hal_get_cycles(); 1075 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1078 1076 printk("\n[DBG] %s : thread %x created child_process %x / child_pid %x / cycle %d\n", 1079 1077 __FUNCTION__, CURRENT_THREAD, process, new_pid, cycle ); … … 1092 1090 } 1093 1091 1094 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )1095 cycle = (uint32_t)hal_get_cycles(); 1096 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1092 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1093 cycle = (uint32_t)hal_get_cycles(); 1094 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1097 1095 printk("\n[DBG] %s : thread %x copied VMM from parent %x to child %x / cycle %d\n", 1098 1096 __FUNCTION__ , CURRENT_THREAD , parent_pid, new_pid, cycle ); … … 1115 1113 } 1116 1114 1117 // check main thread index 1118 assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1119 1120 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 ) 1121 cycle = (uint32_t)hal_get_cycles(); 1122 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1115 // check main thread LTID 1116 assert( (LTID_FROM_TRDID(thread->trdid) == 0) , __FUNCTION__ , 1117 "main thread must have LTID == 0\n" ); 1118 1119 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1120 cycle = (uint32_t)hal_get_cycles(); 1121 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1123 1122 printk("\n[DBG] %s : thread %x created child thread %x / cycle %d\n", 1124 1123 __FUNCTION__ , CURRENT_THREAD, thread, cycle ); … … 1140 1139 vmm_set_cow( process ); 1141 1140 1142 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )1143 cycle = (uint32_t)hal_get_cycles(); 1144 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1141 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1142 cycle = (uint32_t)hal_get_cycles(); 1143 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1145 1144 printk("\n[DBG] %s : thread %x set COW in parent and child / cycle %d\n", 1146 1145 __FUNCTION__ , CURRENT_THREAD, cycle ); … … 1162 1161 *child_pid = new_pid; 1163 1162 1164 #if CONFIG_DEBUG_PROCESS_MAKE_FORK1165 cycle = (uint32_t)hal_get_cycles(); 1166 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )1163 #if DEBUG_PROCESS_MAKE_FORK 1164 cycle = (uint32_t)hal_get_cycles(); 1165 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1167 1166 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1168 1167 __FUNCTION__, CURRENT_THREAD, cycle ); … … 1205 1204 "must be called by the main thread\n" ); 1206 1205 1207 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC1206 #if DEBUG_PROCESS_MAKE_EXEC 1208 1207 uint32_t cycle = (uint32_t)hal_get_cycles(); 1209 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )1208 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1210 1209 printk("\n[DBG] %s : thread %x enters for process %x / %s / cycle %d\n", 1211 1210 __FUNCTION__, old_thread, pid, path, cycle ); … … 1244 1243 process_txt_set_ownership( XPTR( local_cxy , new_process) ); 1245 1244 1246 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )1247 cycle = (uint32_t)hal_get_cycles(); 1248 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )1245 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1246 cycle = (uint32_t)hal_get_cycles(); 1247 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1249 1248 printk("\n[DBG] %s : thread %x created new process %x / cycle %d \n", 1250 1249 __FUNCTION__ , old_thread , new_process , cycle ); … … 1261 1260 } 1262 1261 1263 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )1264 cycle = (uint32_t)hal_get_cycles(); 1265 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )1262 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1263 cycle = (uint32_t)hal_get_cycles(); 1264 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1266 1265 printk("\n[DBG] %s : thread %x registered code/data vsegs in new process %x / cycle %d\n", 1267 1266 __FUNCTION__, old_thread , new_process->pid , cycle ); … … 1290 1289 } 1291 1290 1292 // check main thread index 1293 assert( (new_thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1294 1295 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 ) 1296 cycle = (uint32_t)hal_get_cycles(); 1297 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1291 // check main thread LTID 1292 assert( (LTID_FROM_TRDID(new_thread->trdid) == 0) , __FUNCTION__ , 1293 "main thread must have LTID == 0\n" ); 1294 1295 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1296 cycle = (uint32_t)hal_get_cycles(); 1297 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1298 1298 printk("\n[DBG] %s : thread %x created new_process main thread %x / cycle %d\n", 1299 1299 __FUNCTION__ , old_thread , new_thread , cycle ); … … 1327 1327 hal_fence(); 1328 1328 1329 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC1330 cycle = (uint32_t)hal_get_cycles(); 1331 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )1329 #if DEBUG_PROCESS_MAKE_EXEC 1330 cycle = (uint32_t)hal_get_cycles(); 1331 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1332 1332 printk("\n[DBG] %s : old_thread %x blocked / new_thread %x activated / cycle %d\n", 1333 1333 __FUNCTION__ , old_thread , new_thread , cycle ); … … 1342 1342 { 1343 1343 1344 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE1344 #if DEBUG_PROCESS_ZERO_CREATE 1345 1345 uint32_t cycle = (uint32_t)hal_get_cycles(); 1346 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )1346 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1347 1347 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1348 1348 #endif … … 1370 1370 hal_fence(); 1371 1371 1372 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE1373 cycle = (uint32_t)hal_get_cycles(); 1374 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )1372 #if DEBUG_PROCESS_ZERO_CREATE 1373 cycle = (uint32_t)hal_get_cycles(); 1374 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1375 1375 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1376 1376 #endif … … 1388 1388 error_t error; 1389 1389 1390 #if CONFIG_DEBUG_PROCESS_INIT_CREATE1390 #if DEBUG_PROCESS_INIT_CREATE 1391 1391 uint32_t cycle = (uint32_t)hal_get_cycles(); 1392 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )1392 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1393 1393 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1394 1394 #endif … … 1468 1468 hal_fence(); 1469 1469 1470 #if CONFIG_DEBUG_PROCESS_INIT_CREATE1471 cycle = (uint32_t)hal_get_cycles(); 1472 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )1470 #if DEBUG_PROCESS_INIT_CREATE 1471 cycle = (uint32_t)hal_get_cycles(); 1472 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1473 1473 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1474 1474 #endif … … 1605 1605 xptr_t lock_xp; // extended pointer on list lock in chdev 1606 1606 1607 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1607 #if DEBUG_PROCESS_TXT_ATTACH 1608 1608 uint32_t cycle = (uint32_t)hal_get_cycles(); 1609 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1609 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1610 1610 printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d / cycle %d\n", 1611 1611 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle ); … … 1634 1634 remote_spinlock_unlock( lock_xp ); 1635 1635 1636 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1637 cycle = (uint32_t)hal_get_cycles(); 1638 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1636 #if DEBUG_PROCESS_TXT_ATTACH 1637 cycle = (uint32_t)hal_get_cycles(); 1638 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1639 1639 printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n", 1640 1640 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle ); … … 1664 1664 "process descriptor not in owner cluster" ); 1665 1665 1666 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1666 #if DEBUG_PROCESS_TXT_ATTACH 1667 1667 uint32_t cycle = (uint32_t)hal_get_cycles(); 1668 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1668 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1669 1669 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1670 1670 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); … … 1690 1690 remote_spinlock_unlock( lock_xp ); 1691 1691 1692 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )1693 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1692 #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) 1693 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1694 1694 { 1695 1695 xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root ); … … 1706 1706 #endif 1707 1707 1708 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1709 cycle = (uint32_t)hal_get_cycles(); 1710 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1708 #if DEBUG_PROCESS_TXT_ATTACH 1709 cycle = (uint32_t)hal_get_cycles(); 1710 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1711 1711 printk("\n[DBG] %s : thread %x exit / process %x detached from TXT / cycle %d\n", 1712 1712 __FUNCTION__, CURRENT_THREAD, process->pid, cycle ); … … 1737 1737 "process descriptor not in owner cluster\n" ); 1738 1738 1739 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1739 #if DEBUG_PROCESS_TXT_ATTACH 1740 1740 uint32_t cycle = (uint32_t)hal_get_cycles(); 1741 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1741 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1742 1742 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1743 1743 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); … … 1755 1755 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp ); 1756 1756 1757 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1758 cycle = (uint32_t)hal_get_cycles(); 1759 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1757 #if DEBUG_PROCESS_TXT_ATTACH 1758 cycle = (uint32_t)hal_get_cycles(); 1759 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1760 1760 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 1761 1761 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); … … 1794 1794 "process descriptor not in owner cluster\n" ); 1795 1795 1796 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1796 #if DEBUG_PROCESS_TXT_ATTACH 1797 1797 uint32_t cycle = (uint32_t)hal_get_cycles(); 1798 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1798 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1799 1799 printk("\n[DBG] %s : thread %x enter / process %x / pid %x / cycle %d\n", 1800 1800 __FUNCTION__, CURRENT_THREAD, process_ptr, process_pid, cycle ); … … 1813 1813 txt_id = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) ); 1814 1814 1815 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )1816 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1815 #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) 1816 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1817 1817 printk("\n[DBG] %s : file_ptr %x / txt_ptr %x / txt_id %d / owner_ptr = %x\n", 1818 1818 __FUNCTION__, GET_PTR(file_xp), txt_ptr, txt_id, GET_PTR(owner_xp) ); … … 1832 1832 { 1833 1833 1834 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )1835 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1834 #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) 1835 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1836 1836 printk("\n[DBG] %s : process is not the KSH process => search the KSH\n", __FUNCTION__ ); 1837 1837 #endif … … 1851 1851 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 1852 1852 1853 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1854 cycle = (uint32_t)hal_get_cycles(); 1855 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1853 #if DEBUG_PROCESS_TXT_ATTACH 1854 cycle = (uint32_t)hal_get_cycles(); 1855 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1856 1856 printk("\n[DBG] %s : thread %x exit / process %x to KSH process %x / cycle %d\n", 1857 1857 __FUNCTION__, CURRENT_THREAD, process_pid, … … 1873 1873 { 1874 1874 1875 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )1876 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1875 #if( DEBUG_PROCESS_TXT_ATTACH & 1 ) 1876 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1877 1877 printk("\n[DBG] %s : process is the KSH process => search another\n", __FUNCTION__ ); 1878 1878 #endif … … 1893 1893 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 1894 1894 1895 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1896 cycle = (uint32_t)hal_get_cycles(); 1897 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1895 #if DEBUG_PROCESS_TXT_ATTACH 1896 cycle = (uint32_t)hal_get_cycles(); 1897 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1898 1898 printk("\n[DBG] %s : thread %x exit / KSH process %x to process %x / cycle %d\n", 1899 1899 __FUNCTION__, CURRENT_THREAD, process_pid, … … 1910 1910 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL ); 1911 1911 1912 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1913 cycle = (uint32_t)hal_get_cycles(); 1914 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1912 #if DEBUG_PROCESS_TXT_ATTACH 1913 cycle = (uint32_t)hal_get_cycles(); 1914 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1915 1915 printk("\n[DBG] %s : thread %x exit / KSH process %x to nobody / cycle %d\n", 1916 1916 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); … … 1922 1922 { 1923 1923 1924 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH1925 cycle = (uint32_t)hal_get_cycles(); 1926 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )1924 #if DEBUG_PROCESS_TXT_ATTACH 1925 cycle = (uint32_t)hal_get_cycles(); 1926 if( DEBUG_PROCESS_TXT_ATTACH < cycle ) 1927 1927 printk("\n[DBG] %s : thread %x exit / process %x is not TXT owner / cycle %d\n", 1928 1928 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); -
trunk/kernel/kern/rpc.c
r437 r438 43 43 44 44 ///////////////////////////////////////////////////////////////////////////////////////// 45 // Debug macros for marshalling functions46 /////////////////////////////////////////////////////////////////////////////////////////47 48 #if CONFIG_DEBUG_RPC_MARSHALING49 50 #define RPC_DEBUG_ENTER \51 uint32_t cycle = (uint32_t)hal_get_cycles(); \52 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \53 printk("\n[DBG] %s : enter thread %x on core[%x,%d] / cycle %d\n", \54 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );55 56 #define RPC_DEBUG_EXIT \57 cycle = (uint32_t)hal_get_cycles(); \58 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \59 printk("\n[DBG] %s : exit thread %x on core[%x,%d] / cycle %d\n", \60 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );61 62 #else63 64 #define RPC_DEBUG_ENTER65 66 #define RPC_DEBUG_EXIT67 68 #endif69 70 /////////////////////////////////////////////////////////////////////////////////////////71 45 // array of function pointers (must be consistent with enum in rpc.h) 72 46 ///////////////////////////////////////////////////////////////////////////////////////// … … 122 96 rpc_desc_t * rpc ) 123 97 { 124 volatile error_t full = 0; 125 thread_t * this = CURRENT_THREAD; 126 core_t * core = this->core; 127 128 #if CONFIG_DEBUG_RPC_SEND 98 lid_t server_core_lid; 99 lid_t client_core_lid; 100 volatile error_t full; 101 thread_t * this; 102 cluster_t * cluster; 103 104 #if DEBUG_RPC_CLIENT_GENERIC 129 105 uint32_t cycle = (uint32_t)hal_get_cycles(); 130 if( CONFIG_DEBUG_RPC_SEND< cycle )106 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 131 107 printk("\n[DBG] %s : thread %x in cluster %x enter for rpc[%d] / rpc_ptr %x / cycle %d\n", 132 108 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, rpc, cycle ); 133 109 #endif 134 110 135 // register client thread pointer and core lid in RPC descriptor 111 full = 0; 112 this = CURRENT_THREAD; 113 cluster = LOCAL_CLUSTER; 114 client_core_lid = this->core->lid; 115 116 // select a server_core index: 117 // use client core index if possible / core 0 otherwise 118 if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) ) 119 { 120 server_core_lid = client_core_lid; 121 } 122 else 123 { 124 server_core_lid = 0; 125 } 126 127 // register client_thread pointer and client_core lid in RPC descriptor 136 128 rpc->thread = this; 137 rpc->lid = c ore->lid;138 139 // build anextended pointer on the RPC descriptor129 rpc->lid = client_core_lid; 130 131 // build extended pointer on the RPC descriptor 140 132 xptr_t desc_xp = XPTR( local_cxy , rpc ); 141 133 … … 160 152 hal_fence(); 161 153 162 // send IPI to the remote core corresponding to the clientcore163 dev_pic_send_ipi( server_cxy , core->lid );154 // send IPI to the selected server core 155 dev_pic_send_ipi( server_cxy , server_core_lid ); 164 156 165 157 // wait RPC completion before returning if blocking RPC … … 171 163 { 172 164 173 #if CONFIG_DEBUG_RPC_SEND174 cycle = (uint32_t)hal_get_cycles(); 175 if( CONFIG_DEBUG_RPC_SEND< cycle )165 #if DEBUG_RPC_CLIENT_GENERIC 166 cycle = (uint32_t)hal_get_cycles(); 167 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 176 168 printk("\n[DBG] %s : thread %x in cluster %x busy waiting / rpc[%d] / cycle %d\n", 177 169 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle ); 178 170 #endif 179 171 180 while( rpc->response ) hal_fixed_delay( 100 );172 while( rpc->responses ) hal_fixed_delay( 100 ); 181 173 182 #if CONFIG_DEBUG_RPC_SEND183 cycle = (uint32_t)hal_get_cycles(); 184 if( CONFIG_DEBUG_RPC_SEND< cycle )185 printk("\n[DBG] %s : thread % in cluster %x resume/ rpc[%d] / cycle %d\n",174 #if DEBUG_RPC_CLIENT_GENERIC 175 cycle = (uint32_t)hal_get_cycles(); 176 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 177 printk("\n[DBG] %s : thread %x in cluster %x resumes / rpc[%d] / cycle %d\n", 186 178 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle ); 187 179 #endif … … 190 182 { 191 183 192 #if CONFIG_DEBUG_RPC_SEND193 cycle = (uint32_t)hal_get_cycles(); 194 if( CONFIG_DEBUG_RPC_SEND< cycle )195 printk("\n[DBG] %s : thread %x in cluster %x deschedule/ rpc[%d] / cycle %d\n",184 #if DEBUG_RPC_CLIENT_GENERIC 185 cycle = (uint32_t)hal_get_cycles(); 186 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 187 printk("\n[DBG] %s : thread %x in cluster %x blocks & deschedules / rpc[%d] / cycle %d\n", 196 188 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle ); 197 189 #endif … … 199 191 sched_yield("blocked on RPC"); 200 192 201 #if CONFIG_DEBUG_RPC_SEND202 cycle = (uint32_t)hal_get_cycles(); 203 if( CONFIG_DEBUG_RPC_SEND< cycle )204 printk("\n[DBG] %s : thread % in cluster %x resume/ rpcr[%d] / cycle %d\n",193 #if DEBUG_RPC_CLIENT_GENERIC 194 cycle = (uint32_t)hal_get_cycles(); 195 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 196 printk("\n[DBG] %s : thread %x in cluster %x resumes / rpcr[%d] / cycle %d\n", 205 197 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle ); 206 198 #endif … … 208 200 209 201 // check response available 210 assert( (rpc->response == 0) , __FUNCTION__, "illegal RPC response\n" ); 211 212 // acknowledge the IPI sent by the server 213 dev_pic_ack_ipi(); 202 assert( (rpc->responses == 0) , __FUNCTION__, "illegal RPC response\n" ); 214 203 } 215 else 204 else // non blocking RPC 216 205 { 217 206 218 #if CONFIG_DEBUG_RPC_SEND219 cycle = (uint32_t)hal_get_cycles(); 220 if( CONFIG_DEBUG_RPC_SEND< cycle )207 #if DEBUG_RPC_CLIENT_GENERIC 208 cycle = (uint32_t)hal_get_cycles(); 209 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 221 210 printk("\n[DBG] %s : non blocking rpc[%d] => thread %x return / cycle %d\n", 222 211 __FUNCTION__, rpc->index, CURRENT_THREAD, cycle ); … … 244 233 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 245 234 246 #if CONFIG_DEBUG_RPC_SERVER235 #if DEBUG_RPC_SERVER_GENERIC 247 236 uint32_t cycle = (uint32_t)hal_get_cycles(); 248 if( CONFIG_DEBUG_RPC_SERVER< cycle )237 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 249 238 printk("\n[DBG] %s : thread %x interrupted in cluster %x / cycle %d\n", 250 239 __FUNCTION__, this, local_cxy, cycle ); … … 254 243 hal_disable_irq( &sr_save ); 255 244 256 // check RPC FIFO not empty and no RPC thread handling it245 // activate (or create) RPC thread if RPC FIFO not empty 257 246 if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) ) 258 247 { 259 // search one non blocked RPC thread 248 249 #if DEBUG_RPC_SERVER_GENERIC 250 cycle = (uint32_t)hal_get_cycles(); 251 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 252 printk("\n[DBG] %s : RPC FIFO non empty in cluster %x / cycle %d\n", 253 __FUNCTION__, local_cxy, cycle ); 254 #endif 255 256 // search one IDLE RPC thread 260 257 list_entry_t * iter; 261 258 LIST_FOREACH( &sched->k_root , iter ) 262 259 { 263 260 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 264 if( (thread->type == THREAD_RPC) && (thread->blocked == 0) )261 if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) ) 265 262 { 263 // unblock found RPC thread 264 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE ); 265 266 // exit loop 266 267 found = true; 267 268 break; … … 279 280 if( error ) 280 281 { 281 printk("\n[WARNING] in %s : no memory for new RPC thread in cluster %x\n",282 __FUNCTION__, local_cxy );282 assert( false , __FUNCTION__ , 283 "no memory to allocate a new RPC thread in cluster %x", local_cxy ); 283 284 } 284 else 285 { 286 // unblock created RPC thread 287 thread->blocked = 0; 288 289 // update core descriptor counter 290 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 291 292 #if CONFIG_DEBUG_RPC_SERVER 293 cycle = (uint32_t)hal_get_cycles(); 294 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 285 286 // unblock created RPC thread 287 thread->blocked = 0; 288 289 // update core descriptor counter 290 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 291 292 #if DEBUG_RPC_SERVER_GENERIC 293 cycle = (uint32_t)hal_get_cycles(); 294 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 295 295 printk("\n[DBG] %s : create a new RPC thread %x in cluster %x / cycle %d\n", 296 296 __FUNCTION__, thread, local_cxy, cycle ); 297 297 #endif 298 }299 298 } 300 299 } 301 300 302 #if CONFIG_DEBUG_RPC_SERVER303 cycle = (uint32_t)hal_get_cycles(); 304 if( CONFIG_DEBUG_RPC_SERVER< cycle )301 #if DEBUG_RPC_SERVER_GENERIC 302 cycle = (uint32_t)hal_get_cycles(); 303 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 305 304 printk("\n[DBG] %s : interrupted thread %x deschedules in cluster %x / cycle %d\n", 306 305 __FUNCTION__, this, local_cxy, cycle ); 307 306 #endif 308 307 309 // interrupted thread deschedule always308 // interrupted thread always deschedule 310 309 sched_yield("IPI received"); 311 310 312 #if CONFIG_DEBUG_RPC_SERVER313 cycle = (uint32_t)hal_get_cycles(); 314 if( CONFIG_DEBUG_RPC_SERVER< cycle )311 #if DEBUG_RPC_SERVER_GENERIC 312 cycle = (uint32_t)hal_get_cycles(); 313 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 315 314 printk("\n[DBG] %s : interrupted thread %x resumes in cluster %x / cycle %d\n", 316 315 __FUNCTION__, this, local_cxy, cycle ); … … 346 345 // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests 347 346 348 while(1) // externalloop347 while(1) // infinite loop 349 348 { 350 349 // try to take RPC_FIFO ownership … … 352 351 { 353 352 354 #if CONFIG_DEBUG_RPC_SERVER353 #if DEBUG_RPC_SERVER_GENERIC 355 354 uint32_t cycle = (uint32_t)hal_get_cycles(); 356 if( CONFIG_DEBUG_RPC_SERVER< cycle )355 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 357 356 printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n", 358 357 __FUNCTION__, this, local_cxy, cycle ); … … 360 359 // initializes RPC requests counter 361 360 count = 0; 362 363 // acknowledge local IPI364 dev_pic_ack_ipi();365 361 366 362 // exit internal loop in three cases: … … 381 377 blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) ); 382 378 383 #if CONFIG_DEBUG_RPC_SERVER384 cycle = (uint32_t)hal_get_cycles(); 385 if( CONFIG_DEBUG_RPC_SERVER< cycle )386 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_ ptr %x / cycle %d\n",387 __FUNCTION__, this, local_cxy, index, desc_ ptr, cycle);379 #if DEBUG_RPC_SERVER_GENERIC 380 cycle = (uint32_t)hal_get_cycles(); 381 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 382 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n", 383 __FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr ); 388 384 #endif 389 385 // call the relevant server function 390 386 rpc_server[index]( desc_xp ); 391 387 392 #if CONFIG_DEBUG_RPC_SERVER393 cycle = (uint32_t)hal_get_cycles(); 394 if( CONFIG_DEBUG_RPC_SERVER< cycle )388 #if DEBUG_RPC_SERVER_GENERIC 389 cycle = (uint32_t)hal_get_cycles(); 390 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 395 391 printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n", 396 __FUNCTION__, this, local_cxy, index, cycle );392 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle ); 397 393 #endif 398 394 // increment handled RPCs counter … … 403 399 { 404 400 // decrement responses counter in RPC descriptor 405 hal_remote_atomic_add(XPTR( desc_cxy, &desc_ptr->response ), -1); 401 hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 ); 402 403 // get client thread pointer and client core lid from RPC descriptor 404 thread_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); 405 core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) ); 406 406 407 407 // unblock client thread 408 thread_ptr = (thread_t *)hal_remote_lpt(XPTR(desc_cxy,&desc_ptr->thread)); 409 thread_unblock( XPTR(desc_cxy,thread_ptr) , THREAD_BLOCKED_RPC ); 408 thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC ); 410 409 411 410 hal_fence(); 412 411 413 // get client core lid and send IPI 414 core_lid = hal_remote_lw(XPTR(desc_cxy, &desc_ptr->lid)); 412 #if DEBUG_RPC_SERVER_GENERIC 413 cycle = (uint32_t)hal_get_cycles(); 414 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 415 printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n", 416 __FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle ); 417 #endif 418 // send IPI to client core 415 419 dev_pic_send_ipi( desc_cxy , core_lid ); 416 420 } … … 432 436 { 433 437 434 #if CONFIG_DEBUG_RPC_SERVER438 #if DEBUG_RPC_SERVER_GENERIC 435 439 uint32_t cycle = (uint32_t)hal_get_cycles(); 436 if( CONFIG_DEBUG_RPC_SERVER< cycle )440 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 437 441 printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n", 438 442 __FUNCTION__, this, local_cxy, cycle ); … … 447 451 } 448 452 449 #if CONFIG_DEBUG_RPC_SERVER453 #if DEBUG_RPC_SERVER_GENERIC 450 454 uint32_t cycle = (uint32_t)hal_get_cycles(); 451 if( CONFIG_DEBUG_RPC_SERVER< cycle )455 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 452 456 printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n", 453 457 __FUNCTION__, this, local_cxy, cycle ); 454 458 #endif 455 459 456 // deschedule without blocking 460 // Block and deschedule 461 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE ); 457 462 sched_yield("RPC fifo empty or too much work"); 458 463 459 #if CONFIG_DEBUG_RPC_SERVER460 cycle = (uint32_t)hal_get_cycles(); 461 if( CONFIG_DEBUG_RPC_SERVER< cycle )464 #if DEBUG_RPC_SERVER_GENERIC 465 cycle = (uint32_t)hal_get_cycles(); 466 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 462 467 printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n", 463 468 __FUNCTION__, this, local_cxy, cycle ); 464 469 #endif 465 470 466 } // end externalloop471 } // end infinite loop 467 472 468 473 } // end rpc_thread_func() … … 478 483 page_t ** page ) // out 479 484 { 480 481 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 482 483 // initialise RPC descriptor header 484 rpc_desc_t rpc; 485 rpc.index = RPC_PMEM_GET_PAGES; 486 rpc.response = 1; 487 rpc.blocking = true; 485 #if DEBUG_RPC_PMEM_GET_PAGES 486 uint32_t cycle = (uint32_t)hal_get_cycles(); 487 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 488 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 489 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 490 #endif 491 492 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 493 494 // initialise RPC descriptor header 495 rpc_desc_t rpc; 496 rpc.index = RPC_PMEM_GET_PAGES; 497 rpc.blocking = true; 498 rpc.responses = 1; 488 499 489 500 // set input arguments in RPC descriptor … … 496 507 *page = (page_t *)(intptr_t)rpc.args[1]; 497 508 509 #if DEBUG_RPC_PMEM_GET_PAGES 510 cycle = (uint32_t)hal_get_cycles(); 511 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 512 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 513 __FUNCTION__ , CURRENT_THREAD , cycle ); 514 #endif 498 515 } 499 516 … … 501 518 void rpc_pmem_get_pages_server( xptr_t xp ) 502 519 { 520 #if DEBUG_RPC_PMEM_GET_PAGES 521 uint32_t cycle = (uint32_t)hal_get_cycles(); 522 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 523 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 524 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 525 #endif 503 526 504 527 // get client cluster identifier and pointer on RPC descriptor … … 515 538 hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 516 539 540 #if DEBUG_RPC_PMEM_GET_PAGES 541 cycle = (uint32_t)hal_get_cycles(); 542 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 543 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 544 __FUNCTION__ , CURRENT_THREAD , cycle ); 545 #endif 517 546 } 518 547 … … 525 554 page_t * page ) // out 526 555 { 556 #if DEBUG_RPC_PMEM_RELEASE_PAGES 557 uint32_t cycle = (uint32_t)hal_get_cycles(); 558 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 559 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 560 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 561 #endif 527 562 528 563 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 531 566 rpc_desc_t rpc; 532 567 rpc.index = RPC_PMEM_RELEASE_PAGES; 533 rpc. response = 1;534 rpc. blocking = true;568 rpc.blocking = true; 569 rpc.responses = 1; 535 570 536 571 // set input arguments in RPC descriptor … … 540 575 rpc_send( cxy , &rpc ); 541 576 577 #if DEBUG_RPC_PMEM_RELEASE_PAGES 578 cycle = (uint32_t)hal_get_cycles(); 579 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 580 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 581 __FUNCTION__ , CURRENT_THREAD , cycle ); 582 #endif 542 583 } 543 584 … … 545 586 void rpc_pmem_release_pages_server( xptr_t xp ) 546 587 { 588 #if DEBUG_RPC_PMEM_RELEASE_PAGES 589 uint32_t cycle = (uint32_t)hal_get_cycles(); 590 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 591 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 592 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 593 #endif 547 594 548 595 // get client cluster identifier and pointer on RPC descriptor … … 559 606 kmem_free( &req ); 560 607 608 #if DEBUG_RPC_PMEM_RELEASE_PAGES 609 cycle = (uint32_t)hal_get_cycles(); 610 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 611 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 612 __FUNCTION__ , CURRENT_THREAD , cycle ); 613 #endif 561 614 } 562 615 … … 577 630 error_t * error ) // out 578 631 { 632 #if DEBUG_RPC_PROCESS_MAKE_FORK 633 uint32_t cycle = (uint32_t)hal_get_cycles(); 634 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 635 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 636 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 637 #endif 638 579 639 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 580 640 … … 582 642 rpc_desc_t rpc; 583 643 rpc.index = RPC_PROCESS_MAKE_FORK; 584 rpc. response = 1;585 rpc. blocking = true;644 rpc.blocking = true; 645 rpc.responses = 1; 586 646 587 647 // set input arguments in RPC descriptor … … 597 657 *error = (error_t)rpc.args[4]; 598 658 659 #if DEBUG_RPC_PROCESS_MAKE_FORK 660 cycle = (uint32_t)hal_get_cycles(); 661 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 662 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 663 __FUNCTION__ , CURRENT_THREAD , cycle ); 664 #endif 599 665 } 600 666 … … 602 668 void rpc_process_make_fork_server( xptr_t xp ) 603 669 { 670 #if DEBUG_RPC_PROCESS_MAKE_FORK 671 uint32_t cycle = (uint32_t)hal_get_cycles(); 672 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 673 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 674 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 675 #endif 604 676 605 677 xptr_t ref_process_xp; // extended pointer on reference parent process … … 628 700 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 629 701 702 #if DEBUG_RPC_PROCESS_MAKE_FORK 703 cycle = (uint32_t)hal_get_cycles(); 704 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 705 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 706 __FUNCTION__ , CURRENT_THREAD , cycle ); 707 #endif 630 708 } 631 709 … … 656 734 rpc_desc_t rpc; 657 735 rpc.index = RPC_THREAD_USER_CREATE; 658 rpc. response = 1;659 rpc. blocking = true;736 rpc.blocking = true; 737 rpc.responses = 1; 660 738 661 739 // set input arguments in RPC descriptor … … 690 768 // get client cluster identifier and pointer on RPC descriptor 691 769 cxy_t client_cxy = GET_CXY( xp ); 692 rpc_desc_t * desc = GET_PTR( xp );770 rpc_desc_t * desc = GET_PTR( xp ); 693 771 694 772 // get pointer on attributes structure in client cluster from RPC descriptor … … 736 814 rpc_desc_t rpc; 737 815 rpc.index = RPC_THREAD_KERNEL_CREATE; 738 rpc. response = 1;739 rpc. blocking = true;816 rpc.blocking = true; 817 rpc.responses = 1; 740 818 741 819 // set input arguments in RPC descriptor … … 763 841 // get client cluster identifier and pointer on RPC descriptor 764 842 cxy_t client_cxy = GET_CXY( xp ); 765 rpc_desc_t * desc = GET_PTR( xp );843 rpc_desc_t * desc = GET_PTR( xp ); 766 844 767 845 // get attributes from RPC descriptor … … 797 875 { 798 876 799 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)877 #if DEBUG_RPC_PROCESS_SIGACTION 800 878 uint32_t cycle = (uint32_t)hal_get_cycles(); 801 879 uint32_t action = rpc->args[0]; 802 880 pid_t pid = rpc->args[1]; 803 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )881 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 804 882 printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n", 805 883 __FUNCTION__ , process_action_str( action ) , pid , cxy , cycle ); … … 813 891 rpc_send( cxy , rpc ); 814 892 815 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)816 cycle = (uint32_t)hal_get_cycles(); 817 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )893 #if DEBUG_RPC_PROCESS_SIGACTION 894 cycle = (uint32_t)hal_get_cycles(); 895 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 818 896 printk("\n[DBG] %s : exit after requesting to %s process %x in cluster %x / cycle %d\n", 819 897 __FUNCTION__ , process_action_str( action ) , pid , cxy , cycle ); … … 842 920 pid = (pid_t) hal_remote_lwd( XPTR(client_cxy , &rpc->args[1]) ); 843 921 844 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)922 #if DEBUG_RPC_PROCESS_SIGACTION 845 923 uint32_t cycle = (uint32_t)hal_get_cycles(); 846 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )924 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 847 925 printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n", 848 926 __FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle ); … … 858 936 859 937 // build extended pointer on response counter in RPC 860 count_xp = XPTR( client_cxy , &rpc->response );938 count_xp = XPTR( client_cxy , &rpc->responses ); 861 939 862 940 // decrement the responses counter in RPC descriptor, … … 872 950 } 873 951 874 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)875 cycle = (uint32_t)hal_get_cycles(); 876 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )952 #if DEBUG_RPC_PROCESS_SIGACTION 953 cycle = (uint32_t)hal_get_cycles(); 954 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 877 955 printk("\n[DBG] %s : exit after %s process %x in cluster %x / cycle %d\n", 878 956 __FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle ); … … 903 981 rpc_desc_t rpc; 904 982 rpc.index = RPC_VFS_INODE_CREATE; 905 rpc. response = 1;906 rpc. blocking = true;983 rpc.blocking = true; 984 rpc.responses = 1; 907 985 908 986 // set input arguments in RPC descriptor … … 983 1061 rpc_desc_t rpc; 984 1062 rpc.index = RPC_VFS_INODE_DESTROY; 985 rpc. response = 1;986 rpc. blocking = true;1063 rpc.blocking = true; 1064 rpc.responses = 1; 987 1065 988 1066 // set input arguments in RPC descriptor … … 1023 1101 error_t * error ) // out 1024 1102 { 1025 RPC_DEBUG_ENTER 1103 #if DEBUG_RPC_VFS_DENTRY_CREATE 1104 uint32_t cycle = (uint32_t)hal_get_cycles(); 1105 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1106 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1107 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1108 #endif 1026 1109 1027 1110 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 1030 1113 rpc_desc_t rpc; 1031 1114 rpc.index = RPC_VFS_DENTRY_CREATE; 1032 rpc. response = 1;1033 rpc. blocking = true;1115 rpc.blocking = true; 1116 rpc.responses = 1; 1034 1117 1035 1118 // set input arguments in RPC descriptor … … 1045 1128 *error = (error_t)rpc.args[4]; 1046 1129 1047 RPC_DEBUG_EXIT 1130 #if DEBUG_RPC_VFS_DENTRY_CREATE 1131 cycle = (uint32_t)hal_get_cycles(); 1132 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1133 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1134 __FUNCTION__ , CURRENT_THREAD , cycle ); 1135 #endif 1048 1136 } 1049 1137 … … 1051 1139 void rpc_vfs_dentry_create_server( xptr_t xp ) 1052 1140 { 1141 #if DEBUG_RPC_VFS_DENTRY_CREATE 1142 uint32_t cycle = (uint32_t)hal_get_cycles(); 1143 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1144 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1145 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1146 #endif 1147 1053 1148 uint32_t type; 1054 1149 char * name; … … 1056 1151 xptr_t dentry_xp; 1057 1152 error_t error; 1058 1059 RPC_DEBUG_ENTER1060 1061 1153 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 1062 1154 … … 1083 1175 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1084 1176 1085 RPC_DEBUG_EXIT 1177 #if DEBUG_RPC_VFS_DENTRY_CREATE 1178 cycle = (uint32_t)hal_get_cycles(); 1179 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1180 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1181 __FUNCTION__ , CURRENT_THREAD , cycle ); 1182 #endif 1086 1183 } 1087 1184 … … 1100 1197 rpc_desc_t rpc; 1101 1198 rpc.index = RPC_VFS_DENTRY_DESTROY; 1102 rpc. response = 1;1103 rpc. blocking = true;1199 rpc.blocking = true; 1200 rpc.responses = 1; 1104 1201 1105 1202 // set input arguments in RPC descriptor … … 1140 1237 error_t * error ) // out 1141 1238 { 1239 #if DEBUG_RPC_VFS_FILE_CREATE 1240 uint32_t cycle = (uint32_t)hal_get_cycles(); 1241 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1242 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1243 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1244 #endif 1245 1142 1246 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1143 1247 … … 1145 1249 rpc_desc_t rpc; 1146 1250 rpc.index = RPC_VFS_FILE_CREATE; 1147 rpc. response = 1;1148 rpc. blocking = true;1251 rpc.blocking = true; 1252 rpc.responses = 1; 1149 1253 1150 1254 // set input arguments in RPC descriptor … … 1159 1263 *error = (error_t)rpc.args[3]; 1160 1264 1265 #if DEBUG_RPC_VFS_FILE_CREATE 1266 cycle = (uint32_t)hal_get_cycles(); 1267 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1268 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1269 __FUNCTION__ , CURRENT_THREAD , cycle ); 1270 #endif 1161 1271 } 1162 1272 … … 1164 1274 void rpc_vfs_file_create_server( xptr_t xp ) 1165 1275 { 1276 #if DEBUG_RPC_VFS_FILE_CREATE 1277 uint32_t cycle = (uint32_t)hal_get_cycles(); 1278 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1279 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1280 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1281 #endif 1282 1166 1283 uint32_t file_attr; 1167 1284 vfs_inode_t * inode; … … 1186 1303 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1187 1304 1305 #if DEBUG_RPC_VFS_FILE_CREATE 1306 cycle = (uint32_t)hal_get_cycles(); 1307 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1308 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1309 __FUNCTION__ , CURRENT_THREAD , cycle ); 1310 #endif 1188 1311 } 1189 1312 … … 1201 1324 rpc_desc_t rpc; 1202 1325 rpc.index = RPC_VFS_FILE_DESTROY; 1203 rpc. response = 1;1204 rpc. blocking = true;1326 rpc.blocking = true; 1327 rpc.responses = 1; 1205 1328 1206 1329 // set input arguments in RPC descriptor … … 1245 1368 rpc_desc_t rpc; 1246 1369 rpc.index = RPC_VFS_INODE_LOAD; 1247 rpc. response = 1;1248 rpc. blocking = true;1370 rpc.blocking = true; 1371 rpc.responses = 1; 1249 1372 1250 1373 // set input arguments in RPC descriptor … … 1306 1429 rpc_desc_t rpc; 1307 1430 rpc.index = RPC_VFS_MAPPER_LOAD_ALL; 1308 rpc. response = 1;1309 rpc. blocking = true;1431 rpc.blocking = true; 1432 rpc.responses = 1; 1310 1433 1311 1434 // set input arguments in RPC descriptor … … 1358 1481 rpc_desc_t rpc; 1359 1482 rpc.index = RPC_FATFS_GET_CLUSTER; 1360 rpc. response = 1;1361 rpc. blocking = true;1483 rpc.blocking = true; 1484 rpc.responses = 1; 1362 1485 1363 1486 // set input arguments in RPC descriptor … … 1386 1509 // get client cluster identifier and pointer on RPC descriptor 1387 1510 cxy_t client_cxy = GET_CXY( xp ); 1388 rpc_desc_t * desc = GET_PTR( xp );1511 rpc_desc_t * desc = GET_PTR( xp ); 1389 1512 1390 1513 // get input arguments … … 1418 1541 rpc_desc_t rpc; 1419 1542 rpc.index = RPC_VMM_GET_VSEG; 1420 rpc. response = 1;1421 rpc. blocking = true;1543 rpc.blocking = true; 1544 rpc.responses = 1; 1422 1545 1423 1546 // set input arguments in RPC descriptor … … 1480 1603 rpc_desc_t rpc; 1481 1604 rpc.index = RPC_VMM_GET_PTE; 1482 rpc. response = 1;1483 rpc. blocking = true;1605 rpc.blocking = true; 1606 rpc.responses = 1; 1484 1607 1485 1608 // set input arguments in RPC descriptor … … 1541 1664 rpc_desc_t rpc; 1542 1665 rpc.index = RPC_THREAD_USER_CREATE; 1543 rpc. response = 1;1544 rpc. blocking = true;1666 rpc.blocking = true; 1667 rpc.responses = 1; 1545 1668 1546 1669 // set input arguments in RPC descriptor … … 1560 1683 // get client cluster identifier and pointer on RPC descriptor 1561 1684 cxy_t client_cxy = GET_CXY( xp ); 1562 rpc_desc_t * desc = GET_PTR( xp );1685 rpc_desc_t * desc = GET_PTR( xp ); 1563 1686 1564 1687 // get input argument "kmem_type" from client RPC descriptor … … 1591 1714 rpc_desc_t rpc; 1592 1715 rpc.index = RPC_THREAD_USER_CREATE; 1593 rpc. response = 1;1594 rpc. blocking = true;1716 rpc.blocking = true; 1717 rpc.responses = 1; 1595 1718 1596 1719 // set input arguments in RPC descriptor … … 1608 1731 // get client cluster identifier and pointer on RPC descriptor 1609 1732 cxy_t client_cxy = GET_CXY( xp ); 1610 rpc_desc_t * desc = GET_PTR( xp );1733 rpc_desc_t * desc = GET_PTR( xp ); 1611 1734 1612 1735 // get input arguments "buf" and "kmem_type" from client RPC descriptor … … 1641 1764 rpc_desc_t rpc; 1642 1765 rpc.index = RPC_MAPPER_MOVE_BUFFER; 1643 rpc. response = 1;1644 rpc. blocking = true;1766 rpc.blocking = true; 1767 rpc.responses = 1; 1645 1768 1646 1769 // set input arguments in RPC descriptor … … 1725 1848 rpc_desc_t rpc; 1726 1849 rpc.index = RPC_MAPPER_GET_PAGE; 1727 rpc. response = 1;1728 rpc. blocking = true;1850 rpc.blocking = true; 1851 rpc.responses = 1; 1729 1852 1730 1853 // set input arguments in RPC descriptor … … 1780 1903 rpc_desc_t rpc; 1781 1904 rpc.index = RPC_VMM_CREATE_VSEG; 1782 rpc. response = 1;1783 rpc. blocking = true;1905 rpc.blocking = true; 1906 rpc.responses = 1; 1784 1907 1785 1908 // set input arguments in RPC descriptor … … 1846 1969 rpc_desc_t rpc; 1847 1970 rpc.index = RPC_SCHED_DISPLAY; 1848 rpc. response = 1;1849 rpc. blocking = true;1971 rpc.blocking = true; 1972 rpc.responses = 1; 1850 1973 1851 1974 // set input arguments in RPC descriptor … … 1885 2008 rpc_desc_t rpc; 1886 2009 rpc.index = RPC_VMM_SET_COW; 1887 rpc. response = 1;1888 rpc. blocking = true;2010 rpc.blocking = true; 2011 rpc.responses = 1; 1889 2012 1890 2013 // set input arguments in RPC descriptor … … 1927 2050 rpc_desc_t rpc; 1928 2051 rpc.index = RPC_VMM_DISPLAY; 1929 rpc. response = 1;1930 rpc. blocking = true;2052 rpc.blocking = true; 2053 rpc.responses = 1; 1931 2054 1932 2055 // set input arguments in RPC descriptor -
trunk/kernel/kern/rpc.h
r437 r438 111 111 { 112 112 rpc_index_t index; /*! index of requested RPC service */ 113 volatile uint32_t response ; /*! all responses received when 0*/113 volatile uint32_t responses; /*! number of expected responses */ 114 114 struct thread_s * thread; /*! local pointer on client thread */ 115 115 uint32_t lid; /*! index of core running the calling thread */ … … 150 150 151 151 /*********************************************************************************** 152 * This function is the entry point for RPC handling on the server side.153 * It is executed by a core receiving an IPI, and each time the core enters,154 * or exit the kernel to handle.155 * It does nothing and return if the RPC_FIFO is empty.156 * The calling thread checks if it exist at least one non-blocked RPC thread,157 * creates a new RPC if required, and deschedule to allowthe RPC thead to execute.152 * This function is the entry point for RPC handling on the server cluster. 153 * It is executed by the core receiving the IPI sent by the client thread. 154 * - If the RPC FIFO is empty, it deschedules. 155 * - If the RPC FIFO is not empty, it checks if it exist a non-blocked RPC thread 156 * in the cluster, creates a new one if required, and deschedule to allow 157 * the RPC thead to execute. 158 158 **********************************************************************************/ 159 159 void rpc_check(); -
trunk/kernel/kern/scheduler.c
r437 r438 125 125 thread = LIST_ELEMENT( current , thread_t , sched_list ); 126 126 127 // analyse kernel thread type 128 switch( thread->type ) 127 // execute RPC thread if non blocked 128 if( (thread->blocked == 0) && 129 (thread->type == THREAD_RPC) ) 129 130 { 130 case THREAD_RPC: // if non blocked and RPC FIFO non-empty 131 if( (thread->blocked == 0) && 132 (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) ) 133 { 134 spinlock_unlock( &sched->lock ); 135 return thread; 136 } 137 break; 138 139 case THREAD_DEV: // if non blocked and waiting queue non empty 140 if( (thread->blocked == 0) && 141 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 142 { 143 spinlock_unlock( &sched->lock ); 144 return thread; 145 } 146 break; 147 148 default: 149 break; 131 spinlock_unlock( &sched->lock ); 132 return thread; 133 } 134 135 // execute DEV thread if non blocked and waiting queue non empty 136 if( (thread->blocked == 0) && 137 (thread->type == THREAD_DEV) && 138 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 139 { 140 spinlock_unlock( &sched->lock ); 141 return thread; 150 142 } 151 143 } // end loop on kernel threads … … 174 166 thread = LIST_ELEMENT( current , thread_t , sched_list ); 175 167 176 // return thread if runnable168 // return thread if non blocked 177 169 if( thread->blocked == 0 ) 178 170 { … … 227 219 process = thread->process; 228 220 229 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS221 #if DEBUG_SCHED_HANDLE_SIGNALS 230 222 uint32_t cycle = (uint32_t)hal_get_cycles(); 231 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )223 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 232 224 printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n", 233 225 __FUNCTION__ , thread , process->pid , cycle ); … … 250 242 thread_destroy( thread ); 251 243 252 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS244 #if DEBUG_SCHED_HANDLE_SIGNALS 253 245 cycle = (uint32_t)hal_get_cycles(); 254 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )246 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 255 247 printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n", 256 248 __FUNCTION__ , thread , process->pid , cycle ); … … 262 254 process_destroy( process ); 263 255 264 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS256 #if DEBUG_SCHED_HANDLE_SIGNALS 265 257 cycle = (uint32_t)hal_get_cycles(); 266 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )258 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 267 259 printk("\n[DBG] %s : process %x has been deleted / cycle %d\n", 268 260 __FUNCTION__ , process->pid , cycle ); … … 287 279 scheduler_t * sched = &core->scheduler; 288 280 289 #if ( CONFIG_DEBUG_SCHED_YIELD & 0x1)290 if( CONFIG_DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() )281 #if (DEBUG_SCHED_YIELD & 0x1) 282 if( DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() ) 291 283 sched_display( core->lid ); 292 284 #endif … … 322 314 { 323 315 324 #if CONFIG_DEBUG_SCHED_YIELD316 #if DEBUG_SCHED_YIELD 325 317 uint32_t cycle = (uint32_t)hal_get_cycles(); 326 if( CONFIG_DEBUG_SCHED_YIELD < cycle )318 if( DEBUG_SCHED_YIELD < cycle ) 327 319 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 328 320 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", … … 350 342 { 351 343 352 #if ( CONFIG_DEBUG_SCHED_YIELD & 1)344 #if (DEBUG_SCHED_YIELD & 1) 353 345 uint32_t cycle = (uint32_t)hal_get_cycles(); 354 if( CONFIG_DEBUG_SCHED_YIELD < cycle )346 if( DEBUG_SCHED_YIELD < cycle ) 355 347 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 356 348 " thread %x (%s) (%x,%x) continue / cycle %d\n", -
trunk/kernel/kern/thread.c
r436 r438 112 112 ///////////////////////////////////////////////////////////////////////////////////// 113 113 // This static function initializes a thread descriptor (kernel or user). 114 // It can be called by the threefunctions:114 // It can be called by the four functions: 115 115 // - thread_user_create() 116 116 // - thread_user_fork() 117 117 // - thread_kernel_create() 118 // - thread_idle_init() 119 // It updates the local DQDT. 118 120 ///////////////////////////////////////////////////////////////////////////////////// 119 121 // @ thread : pointer on thread descriptor … … 202 204 thread->save_sr = 0xFF13; 203 205 204 // update local DQDT205 dqdt_local_update_threads( 1 );206 207 206 // register new thread in core scheduler 208 207 sched_register_thread( thread->core , thread ); 208 209 // update DQDT 210 dqdt_update_threads( 1 ); 209 211 210 212 return 0; … … 227 229 assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" ); 228 230 229 #if CONFIG_DEBUG_THREAD_USER_CREATE231 #if DEBUG_THREAD_USER_CREATE 230 232 uint32_t cycle = (uint32_t)hal_get_cycles(); 231 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )233 if( DEBUG_THREAD_USER_CREATE < cycle ) 232 234 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 233 235 __FUNCTION__, CURRENT_THREAD, pid , cycle ); … … 326 328 } 327 329 328 // update DQDT for new thread 329 dqdt_local_update_threads( 1 ); 330 331 #if CONFIG_DEBUG_THREAD_USER_CREATE 330 #if DEBUG_THREAD_USER_CREATE 332 331 cycle = (uint32_t)hal_get_cycles(); 333 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )332 if( DEBUG_THREAD_USER_CREATE < cycle ) 334 333 printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n", 335 334 __FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle ); … … 366 365 vseg_t * vseg; // child thread STACK vseg 367 366 368 #if CONFIG_DEBUG_THREAD_USER_FORK367 #if DEBUG_THREAD_USER_FORK 369 368 uint32_t cycle = (uint32_t)hal_get_cycles(); 370 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )369 if( DEBUG_THREAD_USER_FORK < cycle ) 371 370 printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n", 372 371 __FUNCTION__, CURRENT_THREAD, child_process->pid, cycle ); … … 493 492 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 494 493 495 #if ( CONFIG_DEBUG_THREAD_USER_FORK & 1)494 #if (DEBUG_THREAD_USER_FORK & 1) 496 495 cycle = (uint32_t)hal_get_cycles(); 497 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )496 if( DEBUG_THREAD_USER_FORK < cycle ) 498 497 printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n", 499 498 __FUNCTION__, CURRENT_THREAD, vpn ); … … 508 507 vpn_size ); 509 508 510 // update DQDT for child thread 511 dqdt_local_update_threads( 1 ); 512 513 #if CONFIG_DEBUG_THREAD_USER_FORK 509 #if DEBUG_THREAD_USER_FORK 514 510 cycle = (uint32_t)hal_get_cycles(); 515 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )511 if( DEBUG_THREAD_USER_FORK < cycle ) 516 512 printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n", 517 513 __FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle ); … … 538 534 __FUNCTION__ , "illegal core_lid" ); 539 535 540 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE536 #if DEBUG_THREAD_KERNEL_CREATE 541 537 uint32_t cycle = (uint32_t)hal_get_cycles(); 542 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )538 if( DEBUG_THREAD_KERNEL_CREATE < cycle ) 543 539 printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n", 544 540 __FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle ); … … 568 564 hal_cpu_context_create( thread ); 569 565 570 // update DQDT for kernel thread 571 dqdt_local_update_threads( 1 ); 572 573 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE 566 #if DEBUG_THREAD_KERNEL_CREATE 574 567 cycle = (uint32_t)hal_get_cycles(); 575 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )568 if( DEBUG_THREAD_KERNEL_CREATE < cycle ) 576 569 printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n", 577 570 __FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle ); … … 583 576 } // end thread_kernel_create() 584 577 585 ///////////////////////////////////////////////// //586 error_t thread_ kernel_init( thread_t * thread,587 588 589 590 578 ///////////////////////////////////////////////// 579 error_t thread_idle_init( thread_t * thread, 580 thread_type_t type, 581 void * func, 582 void * args, 583 lid_t core_lid ) 591 584 { 592 585 assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" ); … … 607 600 return error; 608 601 609 } // end thread_ kernel_init()602 } // end thread_idle_init() 610 603 611 604 /////////////////////////////////////////////////////////////////////////////////////// … … 620 613 core_t * core = thread->core; 621 614 622 #if CONFIG_DEBUG_THREAD_DESTROY615 #if DEBUG_THREAD_DESTROY 623 616 uint32_t cycle = (uint32_t)hal_get_cycles(); 624 if( CONFIG_DEBUG_THREAD_DESTROY < cycle )617 if( DEBUG_THREAD_DESTROY < cycle ) 625 618 printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n", 626 619 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); … … 652 645 process_remove_thread( thread ); 653 646 654 // update localDQDT655 dqdt_ local_update_threads( -1 );647 // update DQDT 648 dqdt_update_threads( -1 ); 656 649 657 650 // invalidate thread descriptor … … 661 654 thread_release( thread ); 662 655 663 #if CONFIG_DEBUG_THREAD_DESTROY656 #if DEBUG_THREAD_DESTROY 664 657 cycle = (uint32_t)hal_get_cycles(); 665 if( CONFIG_DEBUG_THREAD_DESTROY < cycle )658 if( DEBUG_THREAD_DESTROY < cycle ) 666 659 printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n", 667 660 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); … … 811 804 hal_fence(); 812 805 813 #if CONFIG_DEBUG_THREAD_BLOCK806 #if DEBUG_THREAD_BLOCK 814 807 uint32_t cycle = (uint32_t)hal_get_cycles(); 815 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )808 if( DEBUG_THREAD_BLOCK < cycle ) 816 809 printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n", 817 810 __FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle ); 818 811 #endif 819 812 820 #if ( CONFIG_DEBUG_THREAD_BLOCK & 1)821 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )813 #if (DEBUG_THREAD_BLOCK & 1) 814 if( DEBUG_THREAD_BLOCK < cycle ) 822 815 sched_display( ptr->core->lid ); 823 816 #endif … … 837 830 hal_fence(); 838 831 839 #if CONFIG_DEBUG_THREAD_BLOCK832 #if DEBUG_THREAD_BLOCK 840 833 uint32_t cycle = (uint32_t)hal_get_cycles(); 841 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )834 if( DEBUG_THREAD_BLOCK < cycle ) 842 835 printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n", 843 836 __FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle ); 844 837 #endif 845 838 846 #if ( CONFIG_DEBUG_THREAD_BLOCK & 1)847 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )839 #if (DEBUG_THREAD_BLOCK & 1) 840 if( DEBUG_THREAD_BLOCK < cycle ) 848 841 sched_display( ptr->core->lid ); 849 842 #endif … … 890 883 killer_xp = XPTR( local_cxy , killer_ptr ); 891 884 892 #if CONFIG_DEBUG_THREAD_KILL885 #if DEBUG_THREAD_KILL 893 886 uint32_t cycle = (uint32_t)hal_get_cycles; 894 if( CONFIG_DEBUG_THREAD_KILL < cycle )887 if( DEBUG_THREAD_KILL < cycle ) 895 888 printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n", 896 889 __FUNCTION__, killer_ptr, target_ptr, cycle ); … … 989 982 else hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL ); 990 983 991 #if CONFIG_DEBUG_THREAD_KILL984 #if DEBUG_THREAD_KILL 992 985 cycle = (uint32_t)hal_get_cycles; 993 if( CONFIG_DEBUG_THREAD_KILL < cycle )986 if( DEBUG_THREAD_KILL < cycle ) 994 987 printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n", 995 988 __FUNCTION__, killer_ptr, target_ptr, cycle ); … … 1002 995 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1003 996 1004 #if CONFIG_DEBUG_THREAD_KILL997 #if DEBUG_THREAD_KILL 1005 998 cycle = (uint32_t)hal_get_cycles; 1006 if( CONFIG_DEBUG_THREAD_KILL < cycle )999 if( DEBUG_THREAD_KILL < cycle ) 1007 1000 printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n", 1008 1001 __FUNCTION__, killer_ptr, target_ptr, cycle ); … … 1024 1017 { 1025 1018 1026 #if CONFIG_DEBUG_THREAD_IDLE1019 #if DEBUG_THREAD_IDLE 1027 1020 uint32_t cycle = (uint32_t)hal_get_cycles; 1028 1021 thread_t * this = CURRENT_THREAD; 1029 if( CONFIG_DEBUG_THREAD_IDLE < cycle )1022 if( DEBUG_THREAD_IDLE < cycle ) 1030 1023 printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n", 1031 1024 __FUNCTION__, this, local_cxy, this->core->lid, cycle ); … … 1034 1027 hal_core_sleep(); 1035 1028 1036 #if CONFIG_DEBUG_THREAD_IDLE1029 #if DEBUG_THREAD_IDLE 1037 1030 cycle = (uint32_t)hal_get_cycles; 1038 if( CONFIG_DEBUG_THREAD_IDLE < cycle )1031 if( DEBUG_THREAD_IDLE < cycle ) 1039 1032 printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n", 1040 1033 __FUNCTION__, this, local_cxy, this->core->lid, cycle ); -
trunk/kernel/kern/thread.h
r437 r438 87 87 #define THREAD_BLOCKED_SEM 0x0020 /*! thread wait semaphore */ 88 88 #define THREAD_BLOCKED_PAGE 0x0040 /*! thread wait page access */ 89 #define THREAD_BLOCKED_IDLE 0x0080 /*! thread RPC wait RPC_FIFO non empty */ 89 90 #define THREAD_BLOCKED_USERSYNC 0x0100 /*! thread wait (cond/mutex/barrier) */ 90 91 #define THREAD_BLOCKED_RPC 0x0200 /*! thread wait RPC completion */ … … 286 287 287 288 /*************************************************************************************** 288 * This function initializes an existing thread descriptor from arguments values. 289 * This function is called by the kernel_init() function to initialize the IDLE thread. 290 * It initializes an existing thread descriptor from arguments values. 289 291 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start. 290 * It is called by the kernel_init() function to initialize the IDLE thread.291 292 *************************************************************************************** 292 293 * @ thread : pointer on existing thread descriptor. … … 297 298 * @ returns 0 if success / returns EINVAL if error 298 299 **************************************************************************************/ 299 error_t thread_ kernel_init( thread_t * thread,300 301 302 303 300 error_t thread_idle_init( thread_t * thread, 301 thread_type_t type, 302 void * func, 303 void * args, 304 lid_t core_lid ); 304 305 305 306 /***************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.