Changeset 637 for trunk/kernel
- Timestamp:
- Jul 18, 2019, 2:06:55 PM (5 years ago)
- Location:
- trunk/kernel
- Files:
-
- 52 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/Makefile
r633 r637 175 175 176 176 SYS_OBJS_4 = build/syscalls/sys_get_config.o \ 177 build/syscalls/sys_get_core .o\177 build/syscalls/sys_get_core_id.o \ 178 178 build/syscalls/sys_get_cycle.o \ 179 179 build/syscalls/sys_display.o \ … … 187 187 SYS_OBJS_5 = build/syscalls/sys_exit.o \ 188 188 build/syscalls/sys_sync.o \ 189 build/syscalls/sys_fsync.o 189 build/syscalls/sys_fsync.o \ 190 build/syscalls/sys_get_best_core.o \ 191 build/syscalls/sys_get_nb_cores.o 190 192 191 193 VFS_OBJS = build/fs/vfs.o \ -
trunk/kernel/devices/dev_dma.c
r619 r637 2 2 * dev_dma.c - DMA (Interrupt Controler Unit) generic device API implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 61 61 error_t error; 62 62 63 lid_t lid = cluster_select_local_core( local_cxy ); 64 63 65 error = thread_kernel_create( &new_thread, 64 66 THREAD_DEV, 65 67 &chdev_server_func, 66 68 dma, 67 cluster_select_local_core());69 lid ); 68 70 if( error ) 69 71 { -
trunk/kernel/devices/dev_ioc.c
r626 r637 67 67 68 68 // select a core to execute the IOC server thread 69 lid_t lid = cluster_select_local_core( );69 lid_t lid = cluster_select_local_core( local_cxy ); 70 70 71 71 // bind the IOC IRQ to the selected core -
trunk/kernel/devices/dev_nic.c
r619 r637 2 2 * dev_nic.c - NIC (Network Controler) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 58 58 59 59 // select a core to execute the NIC server thread 60 lid_t lid = cluster_select_local_core( );60 lid_t lid = cluster_select_local_core( local_cxy ); 61 61 62 62 // bind the NIC IRQ to the selected core -
trunk/kernel/devices/dev_txt.c
r626 r637 95 95 { 96 96 // select a core to execute the server thread 97 lid_t lid = cluster_select_local_core( );97 lid_t lid = cluster_select_local_core( local_cxy ); 98 98 99 99 // The unique IRQ from cluster 00's MTTY must be bound to a RX chdev … … 131 131 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 132 132 } 133 } 133 } // end dev_txt_init() 134 134 135 135 ////////////////////////////////////////////////////////////////////////////////// … … 166 166 // return I/O operation status from calling thread descriptor 167 167 return this->txt_cmd.error; 168 } 168 169 } // end dev_txt_access() 169 170 170 171 ///////////////////////////////////////// … … 173 174 uint32_t count ) 174 175 { 176 error_t error; 175 177 176 178 #if (DEBUG_SYS_WRITE & 1) … … 182 184 uint32_t cycle = (uint32_t)hal_get_cycles(); 183 185 if( DEBUG_DEV_TXT_TX < cycle ) 184 printk("\n[%s] thread[%x,%x] enters / cycle %d\n", 185 __FUNCTION__, this->process->pid, this->trdid, cycle ); 186 #endif 187 188 // get extended pointer on TXT[0] chdev 186 printk("\n[%s] thread[%x,%x] enters for <%s> / cycle %d\n", 187 __FUNCTION__, this->process->pid, this->trdid, buffer, cycle ); 188 #endif 189 190 // If we use MTTY (vci_multi_tty), we do a synchronous write on TXT[0] 191 // If we use TTY (vci_tty_tsar), we do a standard asynchronous write 192 // TODO this is not very clean ... [AG] 193 194 // get pointers on chdev 189 195 xptr_t dev_xp = chdev_dir.txt_tx[0]; 190 191 assert( (dev_xp != XPTR_NULL) , __FUNCTION__ , 192 "undefined TXT0 chdev descriptor" ); 193 194 // get TXTO chdev cluster and local pointer 195 cxy_t dev_cxy = GET_CXY( dev_xp ); 196 chdev_t * dev_ptr = (chdev_t *)GET_PTR( dev_xp ); 197 198 // If we use MTTYs (vci_multi_tty), we perform only sync writes 199 // Otherwise, we use vci_tty_tsar so we can use async writes 196 cxy_t dev_cxy = GET_CXY( dev_xp ); 197 chdev_t * dev_ptr = GET_PTR( dev_xp ); 200 198 201 199 if( dev_ptr->impl == IMPL_TXT_MTY ) … … 211 209 args.channel = channel; 212 210 213 // call d river function211 // call directly the driver function 214 212 aux( &args ); 215 213 216 return 0; 217 } 218 214 error = 0; 215 } 219 216 else 220 217 { 221 return dev_txt_access( TXT_WRITE , channel , buffer , count ); 218 // register command in chdev queue for an asynchronous access 219 error = dev_txt_access( TXT_WRITE , channel , buffer , count ); 220 221 if( error ) 222 { 223 printk("\n[ERROR] in %s : cannot write string %s / cycle %d\n", 224 __FUNCTION__, buffer, (uint32_t)hal_get_cycles() ); 225 } 222 226 } 223 227 … … 225 229 cycle = (uint32_t)hal_get_cycles(); 226 230 if( DEBUG_DEV_TXT_TX < cycle ) 227 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",231 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 228 232 __FUNCTION__, this->process->pid, this->trdid, cycle ); 229 233 #endif … … 233 237 #endif 234 238 235 } 239 return error; 240 241 } // end dev_txt_write() 236 242 237 243 ///////////////////////////////////////// … … 239 245 char * buffer ) 240 246 { 247 error_t error; 241 248 242 249 #if (DEBUG_SYS_READ & 1) … … 252 259 #endif 253 260 254 return dev_txt_access( TXT_READ , channel , buffer , 1 ); 261 // register command in chdev queue for an asynchronous access 262 error = dev_txt_access( TXT_READ , channel , buffer , 1 ); 263 264 if( error ) 265 { 266 printk("\n[ERROR] in %s : cannot get character / cycle %d\n", 267 __FUNCTION__, (uint32_t)hal_get_cycles() ); 268 } 255 269 256 270 #if DEBUG_DEV_TXT_RX 257 271 cycle = (uint32_t)hal_get_cycles(); 258 272 if( DEBUG_DEV_TXT_RX < cycle ) 259 printk("\n[%s] thread[%x,%x] exit/ cycle %d\n",260 __FUNCTION__, this->process->pid, this->trdid, cycle );273 printk("\n[%s] thread[%x,%x] get character <%c> / cycle %d\n", 274 __FUNCTION__, this->process->pid, this->trdid, *buffer, cycle ); 261 275 #endif 262 276 … … 265 279 #endif 266 280 267 } 281 return error; 282 283 } // end dev_txt_read() 268 284 269 285 //////////////////////////////////////////////// -
trunk/kernel/devices/dev_txt.h
r626 r637 124 124 * device and the driver specific data structures when required. 125 125 * It creates the associated server thread and allocates a WTI from local ICU. 126 * It must de executed by a local thread.126 * It must be executed by a thread running in cluster containing the chdev descriptor. 127 127 ****************************************************************************************** 128 128 * @ chdev : local pointer on TXT device descriptor. … … 134 134 * by the "channel" argument. The corresponding request is actually registered in the 135 135 * chdev requests queue, and the calling thread is descheduled, blocked until 136 * transfer completion. 137 * It must be called in the client cluster. 136 * transfer completion. It can be called by any thread running in any cluster. 138 137 ****************************************************************************************** 139 138 * @ channel : TXT channel index. … … 148 147 * by the "channel" argument. The corresponding request is actually registered in the 149 148 * chdev requests queue, and the calling thread is descheduled, blocked until 150 * transfer completion. 151 * It must be called in the client cluster. 149 * transfer completion. It can be called by any thread running in any cluster. 152 150 ****************************************************************************************** 153 151 * @ channel : TXT channel index. … … 166 164 * interfering with another possible TXT access to another terminal. 167 165 * As it is used for debug, the command arguments <buffer> and <count> are registerd 168 * in a specific "txt_sy c_args_t" structure passed to the driver "aux" function.166 * in a specific "txt_sync_args_t" structure passed to the driver "aux" function. 169 167 **************************************************************************************** 170 168 * @ buffer : local pointer on source buffer containing the string. -
trunk/kernel/fs/devfs.c
r635 r637 675 675 676 676 // move burst bytes from k_buf to u_buf 677 hal_strcpy_to_uspace( u_buf , k_buf , burst ); 677 hal_strcpy_to_uspace( u_buf, 678 XPTR( local_cxy , k_buf ), 679 burst ); 678 680 679 681 // update loop variables … … 704 706 705 707 // move burst bytes from u_buf to k_buf 706 hal_strcpy_from_uspace( k_buf, u_buf , burst );708 hal_strcpy_from_uspace( XPTR( local_cxy , k_buf ) , u_buf , burst ); 707 709 708 710 // write burst bytes from kernel buffer to TXT device -
trunk/kernel/kern/cluster.c
r635 r637 76 76 77 77 // initialize the cluster_info[][] array 78 for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++)79 { 80 for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++)78 for( x = 0 ; x < CONFIG_MAX_CLUSTERS_X ; x++ ) 79 { 80 for( y = 0; y < CONFIG_MAX_CLUSTERS_Y ; y++ ) 81 81 { 82 82 cluster->cluster_info[x][y] = info->cluster_info[x][y]; … … 95 95 } 96 96 97 // initialize number of cores97 // initialize number of local cores 98 98 cluster->cores_nr = info->cores_nr; 99 99 100 100 } // end cluster_info_init() 101 102 ////////////////////////////////////// 103 void cluster_info_display( cxy_t cxy ) 104 { 105 uint32_t x; 106 uint32_t y; 107 uint32_t ncores; 108 109 cluster_t * cluster = LOCAL_CLUSTER; 110 111 // get x_size & y_size from target cluster 112 uint32_t x_size = hal_remote_l32( XPTR( cxy , &cluster->x_size ) ); 113 uint32_t y_size = hal_remote_l32( XPTR( cxy , &cluster->y_size ) ); 114 115 // get pointers on TXT0 chdev 116 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 117 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 118 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 119 120 // get extended pointer on remote TXT0 lock 121 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 122 123 // get TXT0 lock 124 remote_busylock_acquire( lock_xp ); 125 126 nolock_printk("\n***** cluster_info in cluster %x / x_size %d / y_size %d\n", 127 cxy, x_size, y_size ); 128 129 for( x = 0 ; x < x_size ; x++ ) 130 { 131 for( y = 0 ; y < y_size ; y++ ) 132 { 133 ncores = (uint32_t)hal_remote_lb( XPTR( cxy , &cluster->cluster_info[x][y] ) ); 134 nolock_printk(" - ncores[%d][%d] = %d\n", x, y, ncores ); 135 } 136 } 137 138 // release TXT0 lock 139 remote_busylock_release( lock_xp ); 140 141 } // end cluster_info_display() 101 142 102 143 ///////////////////////////////////////////////////////// … … 115 156 printk("\n[%s] thread[%x,%x] enters for cluster %x / cycle %d\n", 116 157 __FUNCTION__, this->process->pid, this->trdid, local_cxy , cycle ); 158 #endif 159 160 #if (DEBUG_CLUSTER_INIT & 1) 161 cluster_info_display( local_cxy ); 117 162 #endif 118 163 … … 243 288 } 244 289 245 //////////////////////////////////////// 246 bool_t cluster_is_undefined( cxy_t cxy ) 247 { 248 uint32_t x_size = LOCAL_CLUSTER->x_size; 249 uint32_t y_size = LOCAL_CLUSTER->y_size; 250 251 uint32_t x = HAL_X_FROM_CXY( cxy ); 252 uint32_t y = HAL_Y_FROM_CXY( cxy ); 253 254 if( x >= x_size ) return true; 255 if( y >= y_size ) return true; 256 257 return false; 258 } 259 260 ////////////////////////////////////// 261 bool_t cluster_is_active ( cxy_t cxy ) 290 ///////////////////////////////////////////// 291 inline bool_t cluster_is_active ( cxy_t cxy ) 262 292 { 263 293 uint32_t x = HAL_X_FROM_CXY( cxy ); … … 271 301 //////////////////////////////////////////////////////////////////////////////////// 272 302 273 /////////////////////////////////////// 274 lid_t cluster_select_local_core( void)275 { 276 uint32_t min = 1000 ;303 ///////////////////////////////////////////// 304 lid_t cluster_select_local_core( cxy_t cxy ) 305 { 306 uint32_t min = 1000000; 277 307 lid_t sel = 0; 278 308 uint32_t nthreads; 279 309 lid_t lid; 280 310 scheduler_t * sched; 281 282 cluster_t * cluster = LOCAL_CLUSTER; 283 284 for( lid = 0 ; lid < cluster->cores_nr ; lid++ ) 285 { 286 sched = &cluster->core_tbl[lid].scheduler; 287 nthreads = sched->u_threads_nr + sched->k_threads_nr; 311 cluster_t * cluster = LOCAL_CLUSTER; 312 uint32_t ncores = hal_remote_l32( XPTR( cxy , &cluster->cores_nr ) ); 313 314 for( lid = 0 ; lid < ncores ; lid++ ) 315 { 316 sched = &cluster->core_tbl[lid].scheduler; 317 318 nthreads = hal_remote_l32( XPTR( cxy , &sched->u_threads_nr ) ) + 319 hal_remote_l32( XPTR( cxy , &sched->k_threads_nr ) ); 288 320 289 321 if( nthreads < min ) … … 700 732 uint32_t pref_nr; // number of owned processes in cluster cxy 701 733 702 assert( (cluster_is_ undefined( cxy ) == false), "illegal cluster index" );734 assert( (cluster_is_active( cxy ) ), "illegal cluster index" ); 703 735 704 736 // get extended pointer on root and lock for local process list in cluster -
trunk/kernel/kern/cluster.h
r635 r637 4 4 * authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 112 112 uint32_t nb_fbf_channels; /*! number of FBF channels */ 113 113 114 char cluster_info[CONFIG_MAX_CLUSTERS_X][CONFIG_MAX_CLUSTERS_Y]; 114 // number of cores for each cluster in the mesh 115 uint8_t cluster_info[CONFIG_MAX_CLUSTERS_X][CONFIG_MAX_CLUSTERS_Y]; 115 116 116 117 // local parameters … … 162 163 * in the local boot-info structure <info> build by the boot-loader. 163 164 * 1) the cluster_info_init() function is called first, to initialize the structural 164 * constants, and cannot use the TXT0 kernel terminal. 165 * 2) the cluster_manager_init() function initialize various complex structures: 165 * constants, including the cluster_info[x][y] array. 166 * It cannot use the TXT0 kernel terminal. 167 * 2) the cluster_manager_init() function initializes various complex structures: 166 168 * - the local DQDT nodes, 167 169 * - the PPM, KHM, and KCM allocators, … … 169 171 * - the local RPC FIFO, 170 172 * - the process manager. 171 * It does NOT initialise the local device descriptors.172 173 * It can use the TXT0 kernel terminal. 173 174 ****************************************************************************************** … … 178 179 179 180 /****************************************************************************************** 180 * This function checks the validity of a cluster identifier. 181 ****************************************************************************************** 182 * @ cxy : cluster identifier to be checked. 183 * @ returns true if the identified cluster does not exist. 184 *****************************************************************************************/ 185 bool_t cluster_is_undefined( cxy_t cxy ); 186 187 /****************************************************************************************** 188 * This function uses the local cluster_info[][] array in cluster descriptor, 189 * and returns true when the cluster identified by the <cxy> argument is active. 190 ****************************************************************************************** 191 * @ cxy : cluster identifier. 181 * This debug function displays the current values stored in the cluster_info[][] array 182 * of a remote cluster identified by the <cxy> argument. 183 * It can be called by a thread running in any cluster. 184 ****************************************************************************************** 185 * @ cxy : remote cluster identifier. 186 *****************************************************************************************/ 187 void cluster_info_display( cxy_t cxy ); 188 189 /****************************************************************************************** 190 * This function access the local cluster_info[][] array and returns true when the 191 * cluster identified by the <cxy> argument is active (contains a kernel instance). 192 ****************************************************************************************** 193 * @ cxy : checked cluster identifier. 192 194 * @ return true if cluster contains a kernel instance. 193 195 *****************************************************************************************/ … … 300 302 * This function displays on the kernel terminal TXT0 all user processes registered 301 303 * in the cluster defined by the <cxy> argument. 302 * It can be called by a thread running in any cluster, because is use remote accesses 303 * to scan the xlist of registered processes. 304 * It can be called by a thread running in any cluster. 304 305 ****************************************************************************************** 305 306 * @ cxy : cluster identifier. … … 310 311 311 312 /****************************************************************************************** 312 * This function uses the local boot_inforeturns the core local index that has the lowest usage in local cluster. 313 *****************************************************************************************/ 314 lid_t cluster_select_local_core( void ); 313 * This function selects the core that has the lowest usage in a - possibly remote - 314 * cluster identified by the <cxy> argument. 315 * It can be called by a thread running in any cluster. 316 ****************************************************************************************** 317 * @ cxy : target cluster identifier. 318 * @ return the selected core local index. 319 *****************************************************************************************/ 320 lid_t cluster_select_local_core( cxy_t cxy ); 315 321 316 322 -
trunk/kernel/kern/do_syscall.c
r626 r637 95 95 96 96 sys_get_config, // 40 97 sys_get_core ,// 4197 sys_get_core_id, // 41 98 98 sys_get_cycle, // 42 99 99 sys_display, // 43 … … 108 108 sys_sync, // 51 109 109 sys_fsync, // 52 110 sys_get_best_core, // 53 111 sys_get_nb_cores, // 54 110 112 }; 111 113 … … 160 162 161 163 case SYS_GET_CONFIG: return "GET_CONFIG"; // 40 162 case SYS_GET_CORE : return "GET_CORE";// 41164 case SYS_GET_CORE_ID: return "GET_CORE_ID"; // 41 163 165 case SYS_GET_CYCLE: return "GET_CYCLE"; // 42 164 166 case SYS_DISPLAY: return "DISPLAY"; // 43 … … 172 174 case SYS_EXIT: return "EXIT"; // 50 173 175 case SYS_SYNC: return "SYNC"; // 51 174 case SYS_FSYNC: return "FSYNc"; // 52 176 case SYS_FSYNC: return "FSYNC"; // 52 177 case SYS_GET_BEST_CORE: return "GET_BEST_CORE"; // 53 178 case SYS_GET_NB_CORES: return "GET_NB_CORES"; // 54 175 179 176 180 default: return "undefined"; -
trunk/kernel/kern/dqdt.c
r632 r637 2 2 * dqdt.c - Distributed Quaternary Decision Tree implementation. 3 3 * 4 * Author : Alain Greiner (2016,2017,2018 )4 * Author : Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 55 55 56 56 // display node content 57 nolock_printk("- level %d / cluster %x : threads = %x / pages = %x / clusters %d / cores %d\n", 58 node.level, GET_CXY( node_xp ), node.threads, node.pages, node.clusters, node.cores ); 57 nolock_printk("- [%d,%x] : threads %x / pages %x / clusters %d / cores %d / parent_cxy %x\n", 58 node.level, GET_CXY( node_xp ), 59 node.threads, node.pages, 60 node.clusters, node.cores, 61 GET_CXY( node.parent ) ); 59 62 60 63 // recursive call on children if node is not terminal … … 116 119 xptr_t parent_xp ) 117 120 { 118 assert( (level < 5) , __FUNCTION__, "illegal DQDT level %d\n", level );121 assert( (level <= 5) , __FUNCTION__, "illegal DQDT level %d\n", level ); 119 122 120 123 uint32_t node_x; // node X coordinate … … 147 150 148 151 #if DEBUG_DQDT_INIT 149 printk("\n[ DBG] %s: cxy(%d,%d) / level %d / mask %x / half %d / ptr %x\n",152 printk("\n[%s] thread[%x,%x] : cxy(%d,%d) / level %d / mask %x / half %d / ptr %x\n", 150 153 __FUNCTION__, node_x, node_y, level, mask, half, node_ptr ); 151 154 #endif … … 336 339 void dqdt_init( void ) 337 340 { 338 // get x_size & y_size from cluster manager339 cluster_t * cluster = &cluster_manager;341 // get x_size & y_size 342 cluster_t * cluster = LOCAL_CLUSTER; 340 343 uint32_t x_size = cluster->x_size; 341 344 uint32_t y_size = cluster->y_size; … … 349 352 uint32_t level_max = bits_log2( size_ext ); 350 353 351 // each CP0register the DQDT root in local cluster manager354 // all CP0s register the DQDT root in local cluster manager 352 355 cluster->dqdt_root_xp = XPTR( 0 , &cluster->dqdt_tbl[level_max] ); 353 356 357 // only CP0 in cluster 0 build the DQDT 358 if( local_cxy == 0 ) 359 { 360 354 361 #if DEBUG_DQDT_INIT 355 if( local_cxy == 0 ) 356 printk("\n[ DBG] %s : x_size = %d / y_size = %d / level_max = %d\n",357 __FUNCTION__, x_size, y_size, level_max );362 thread_t * this = CURRENT_THREAD; 363 printk("\n[%s] thread[%x,%x] enters : x_size = %d / y_size = %d / level_max = %d\n", 364 __FUNCTION__, this->process->pid, this->trdid, x_size, y_size, level_max ); 358 365 #endif 359 366 … … 362 369 363 370 #if DEBUG_DQDT_INIT 364 if( local_cxy == 0 ) dqdt_display(); 365 #endif 366 371 dqdt_display(); 372 #endif 373 374 } 367 375 } // end dqdt_init() 368 376 … … 516 524 } 517 525 526 /////////////////////////////////// 527 xptr_t dqdt_get_root( cxy_t cxy, 528 uint32_t level ) 529 { 530 xptr_t node_xp; 531 cxy_t node_cxy; 532 dqdt_node_t * node_ptr; 533 uint32_t current_level; 534 535 assert( (level <= 5) , __FUNCTION__, "illegal DQDT level %d\n", level ); 536 537 #if DEBUG_DQDT_GET_ROOT 538 thread_t * this = CURRENT_THREAD; 539 printk("\n[%s] thread[%x,%x] enters / cxy %x / level %d\n", 540 __FUNCTION__, this->process->pid, this->trdid, cxy, level ); 541 #endif 542 543 // check macro-cluster 544 if( cluster_is_active( cxy ) ) 545 { 546 // initialise node_xp and current_level 547 node_xp = XPTR( cxy , &LOCAL_CLUSTER->dqdt_tbl[0] ); 548 current_level = 0; 549 550 // traverse the quad-tree from bottom to root 551 while( current_level < level ) 552 { 553 node_cxy = GET_CXY( node_xp ); 554 node_ptr = GET_PTR( node_xp ); 555 556 node_xp = hal_remote_l64( XPTR( node_cxy , &node_ptr->parent ) ); 557 current_level++; 558 } 559 } 560 else 561 { 562 node_xp = XPTR_NULL; 563 } 564 565 #if DEBUG_DQDT_GET_ROOT 566 printk("\n[%s] thread[%x,%x] exit / root_xp[%x,%x]\n", 567 __FUNCTION__, this->process->pid, this->trdid, GET_CXY( node_xp ), GET_PTR( node_xp ) ); 568 #endif 569 570 return node_xp; 571 572 } 518 573 519 574 ///////////////////////////////////////////////////////////////////////////////////// … … 584 639 585 640 586 ////////////////////////////////////////// 587 cxy_t dqdt_get_cluster_for_ process( void)641 /////////////////////////////////////////////////// 642 cxy_t dqdt_get_cluster_for_thread( xptr_t root_xp ) 588 643 { 589 644 // call recursive function 590 cxy_t cxy = dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , false );591 592 #if DEBUG_DQDT_SELECT_FOR_ PROCESS645 cxy_t cxy = dqdt_select_cluster( root_xp , false ); 646 647 #if DEBUG_DQDT_SELECT_FOR_THREAD 593 648 uint32_t cycle = hal_get_cycles(); 594 649 if( cycle > DEBUG_DQDT_SELECT_FOR_PROCESS ) … … 600 655 } 601 656 602 ///////////////////////////////////////// 603 cxy_t dqdt_get_cluster_for_memory( void)657 /////////////////////////////////////////////////// 658 cxy_t dqdt_get_cluster_for_memory( xptr_t root_xp ) 604 659 { 605 660 // call recursive function 606 cxy_t cxy = dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , true );661 cxy_t cxy = dqdt_select_cluster( root_xp , true ); 607 662 608 663 #if DEBUG_DQDT_SELECT_FOR_MEMORY -
trunk/kernel/kern/dqdt.h
r632 r637 2 2 * kern/dqdt.h - Distributed Quad Decision Tree 3 3 * 4 * Author : Alain Greiner (2016,2017,2018 )4 * Author : Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 31 31 /**************************************************************************************** 32 32 * This DQDT infrastructure maintains a topological description of ressources usage 33 * in each cluster: number of threads , and number of physical pages allocated.33 * in each cluster: number of threads per core, and number of physical pages allocated. 34 34 * 35 * - If X_SIZE or Y_SIZE are equal to 1, it makes the assumption that the cluster 36 * topology is a one dimensionnal vector, an build the smallest one-dimensionnal 37 * quad-tree covering this one-dimensionnal vector. If the number of clusters 38 * is not a power of 4, the tree is truncated as required. 39 * 40 * TODO : the mapping for the one dimensionnal topology is not implemented yet [AG]. 41 * 42 * - If both Y_SIZE and Y_SIZE are larger than 1, it makes the assumption that 43 * the clusters topology is a 2D mesh. The [X,Y] coordinates of a cluster are 44 * obtained from the CXY identifier using the Rrelevant macros. 45 * X = CXY >> Y_WIDTH / Y = CXY & ((1<<Y_WIDTH)-1) 46 * - If the mesh X_SIZE and Y_SIZE dimensions are not equal, or are not power of 2, 47 * or the mesh contains "holes" reported in the cluster_info[x][y] array, 48 * we build the smallest two dimensionnal quad-tree covering all clusters, 49 * and this tree is truncated as required. 50 * - The mesh size is supposed to contain at most 32 * 32 clusters. 51 * Therefore, it can exist at most 6 DQDT nodes in a given cluster: 52 * . Level 0 nodes exist on all clusters and have no children. 53 * . Level 1 nodes exist when both X and Y coordinates are multiple of 2 54 * . Level 2 nodes exist when both X and Y coordinates are multiple of 4 55 * . Level 3 nodes exist when both X and Y coordinates are multiple of 8 56 * . Level 4 nodes exist when both X and Y coordinates are multiple of 16 57 * . Level 5 nodes exist when both X and Y coordinates are multiple of 32 58 * - For nodes other than level 0, the placement is defined as follow: 59 * . The root node is placed in the cluster containing the core executing 60 * the dqdt_init() function. 61 * . An intermediate node (representing a given sub-tree) is placed in one 62 * cluster covered by the subtree, pseudo-randomly selected. 35 * It is organized as a quad-tree, where the leaf cells are the clusters, organised 36 * as a 2D mesh. Each node in the quad-tree (including the root and the leaf cells, 37 * covers a "macro-cluster", that is a square array of clusters where the number 38 * in the macro-cluster is a power of 4, and the macro-cluster side is a power of two. 39 * Each node contains informations on ressources usage (physical memory and cores) 40 * in the covered macro-cluster. 41 * This quad-tree can be truncated, if the physical mesh X_SIZE and Y_SIZE dimensions 42 * are not equal, or are not power of 2, or if the physical mesh contains "holes". 43 * The mesh size is supposed to contain at most 32*32 clusters in this implementation. 44 * . Level 0 nodes exist in all clusters and have no children. 45 * . Level 1 nodes can be placed in any cluster of the covered 2*2 macro-cluster. 46 * . Level 2 nodes can be placed in any cluster of the covered 4*4 macro-cluster. 47 * . Level 3 nodes can be placed in any cluster of the covered 8*8 macro-cluster. 48 * . Level 4 nodes can be placed in any cluster of the covered 16*16 macro-cluster. 49 * . Level 5 nodes can be placed in any cluster of the covered 32*32 macro-cluster. 50 * The root node is placed in the cluster containing the core executing the dqdt_init() 51 * function. Other (non level 0) nodes are placed pseudo-randomly. 63 52 ***************************************************************************************/ 64 53 … … 66 55 * This structure describes a node of the DQDT. 67 56 * The max number of children is 4, but it can be smaller for some nodes. 68 * Level 0 nodes are the clusters, and have no children. 69 * The root node has no parent. 57 * Level 0 nodes have no children. The root node has no parent. 70 58 ***************************************************************************************/ 71 59 … … 74 62 uint32_t level; /*! node level */ 75 63 uint32_t arity; /*! actual children number in this node */ 76 uint32_t threads; /*! current number of threads in macro-cluster*/77 uint32_t pages; /*! current number of pages in macro-cluster*/64 uint32_t threads; /*! number of threads in macro-cluster */ 65 uint32_t pages; /*! number of allocated pages in macro-cluster */ 78 66 uint32_t cores; /*! number of active cores in macro cluster */ 79 uint32_t clusters; /*! number of active cluster in macro cluster*/67 uint32_t clusters; /*! number of active clusters in macro cluster */ 80 68 xptr_t parent; /*! extended pointer on parent node */ 81 69 xptr_t children[2][2]; /*! extended pointers on children nodes */ … … 87 75 * This function recursively initializes the DQDT structure from informations 88 76 * stored in cluster manager (x_size, y_size and cluster_info[x][y]. 89 * It is executed in all clusters by the local CP0, to compute level_max and register77 * It is called in all clusters by the local CP0, to compute level_max and register 90 78 * the DQDT root node in each cluster manager, but only CPO in cluster 0 build actually 91 79 * the quad-tree covering all active clusters. … … 102 90 ***************************************************************************************/ 103 91 void dqdt_increment_threads( void ); 92 104 93 void dqdt_decrement_threads( void ); 105 94 … … 121 110 122 111 /**************************************************************************************** 123 * This function can be called in any cluster. It traverses the DQDT tree 124 * from the root to the bottom, to analyse the computing load and select the cluster 125 * with the lowest number ot threads to place a new process. 112 * This function returns an extended pointer on the dqdt node that is the root of 113 * the sub-tree covering the macro-cluster defined by the <level> argument and 114 * containing the cluster defined by the <cxy> argument. It returns XPTR_NULL if 115 * this macro-cluster is undefined (when the cxy cluster contains no core). 126 116 **************************************************************************************** 117 * @ cxy : cluster identifier. 118 * @ level : level of the sub-tree. 119 * @ returns root_xp if success / return XPTR_NULL if no active core in macro_cluster. 120 ***************************************************************************************/ 121 xptr_t dqdt_get_root( cxy_t cxy, 122 uint32_t level ); 123 124 /**************************************************************************************** 125 * This function can be called in any cluster. It traverses the DQDT tree from the 126 * local root of a macro-cluster, defined by the <root_xp> argument, to the bottom. 127 * It analyses the computing load & select the cluster containing the lowest number 128 * ot threads. 129 **************************************************************************************** 130 * @ root_xp : extended pointer on DQDT node root. 127 131 * @ returns the cluster identifier with the lowest computing load. 128 132 ***************************************************************************************/ 129 cxy_t dqdt_get_cluster_for_ process( void);133 cxy_t dqdt_get_cluster_for_thread( xptr_t root_xp ); 130 134 131 135 /**************************************************************************************** 132 * This function can be called in any cluster. It traverses the DQDT tree 133 * from the root to the bottom, to analyse the memory load and select the cluster 134 * with the lowest memory load for dynamic memory allocation with no locality constraint. 136 * This function can be called in any cluster. It traverses the DQDT tree from the 137 * local root of a macro-cluster, defined by the <root_xp> argument, to the bottom. 138 * It analyses the memory load & select the cluster with the lowest number of allocated 139 * physical pages. 135 140 **************************************************************************************** 141 * @ root_xp : extended pointer on DQDT node root. 136 142 * @ returns the cluster identifier with the lowest memory load. 137 143 ***************************************************************************************/ 138 cxy_t dqdt_get_cluster_for_memory( void);144 cxy_t dqdt_get_cluster_for_memory( xptr_t root_xp ); 139 145 140 146 /**************************************************************************************** 141 147 * This function displays on kernel TXT0 the DQDT state for all nodes in the quad-tree. 142 * It traverses the quadtree from root to bottom, and can be called by a thread143 * running in any cluster148 * It traverses the quadtree from the global root to bottom. 149 * It can be called by a thread running in any cluster 144 150 ***************************************************************************************/ 145 151 void dqdt_display( void ); -
trunk/kernel/kern/kernel_init.c
r635 r637 1008 1008 1009 1009 ///////////////////////////////////////////////////////////////////////////////// 1010 // STEP 2 : core[0] initializes the clu ter manager,1011 // including the physical memory allocator .1010 // STEP 2 : core[0] initializes the cluster manager, 1011 // including the physical memory allocators. 1012 1012 ///////////////////////////////////////////////////////////////////////////////// 1013 1013 … … 1102 1102 1103 1103 //////////////////////////////////////////////////////////////////////////////// 1104 // STEP 5 : core[0] initialize sthe distibuted LAPIC descriptor.1105 // core[0] initialize sthe internal chdev descriptors1104 // STEP 5 : core[0] initialize the distibuted LAPIC descriptor. 1105 // core[0] initialize the internal chdev descriptors 1106 1106 // core[0] initialize the local external chdev descriptors 1107 1107 //////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/kern/process.c
r635 r637 1909 1909 1910 1910 // select a core in local cluster to execute the main thread 1911 lid = cluster_select_local_core( );1911 lid = cluster_select_local_core( local_cxy ); 1912 1912 1913 1913 // initialize pthread attributes for main thread -
trunk/kernel/kern/rpc.c
r635 r637 1053 1053 1054 1054 // select one core 1055 core_lid = cluster_select_local_core( );1055 core_lid = cluster_select_local_core( local_cxy ); 1056 1056 1057 1057 // call local kernel function -
trunk/kernel/kern/scheduler.h
r564 r637 41 41 { 42 42 busylock_t lock; /*! lock protecting scheduler state */ 43 uint 16_t u_threads_nr; /*! total number of attached user threads */44 uint 16_t k_threads_nr; /*! total number of attached kernel threads */43 uint32_t u_threads_nr; /*! total number of attached user threads */ 44 uint32_t k_threads_nr; /*! total number of attached kernel threads */ 45 45 list_entry_t u_root; /*! root of list of user threads */ 46 46 list_entry_t k_root; /*! root of list of kernel threads */ -
trunk/kernel/kern/thread.c
r635 r637 247 247 else 248 248 { 249 core_lid = cluster_select_local_core( );249 core_lid = cluster_select_local_core( local_cxy ); 250 250 } 251 251 … … 375 375 printk("\n[%s] CPU & FPU contexts created\n", 376 376 __FUNCTION__, thread->trdid ); 377 hal_vmm_display( process, true );377 hal_vmm_display( XPTR( local_cxy , process ) , true ); 378 378 #endif 379 379 … … 418 418 419 419 // select a target core in local cluster 420 core_lid = cluster_select_local_core( );420 core_lid = cluster_select_local_core( local_cxy ); 421 421 422 422 #if (DEBUG_THREAD_USER_FORK & 1) … … 724 724 printk("\n[%s] thread[%x,%x] set CPU context & jump to user code / cycle %d\n", 725 725 __FUNCTION__, process->pid, thread->trdid, cycle ); 726 hal_vmm_display( process, true );726 hal_vmm_display( XPTR( local_cxy , process ) , true ); 727 727 #endif 728 728 … … 1332 1332 // check trdid argument 1333 1333 if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 1334 cluster_is_ undefined( target_cxy ) )return XPTR_NULL;1334 cluster_is_active( target_cxy ) == false ) return XPTR_NULL; 1335 1335 1336 1336 // get root of list of process descriptors in target cluster -
trunk/kernel/kernel_config.h
r635 r637 68 68 #define DEBUG_ELF_LOAD 0 69 69 70 #define DEBUG_DQDT_GET_ROOT 0 70 71 #define DEBUG_DQDT_INIT 0 72 #define DEBUG_DQDT_SELECT_FOR_THREAD 0 73 #define DEBUG_DQDT_SELECT_FOR_MEMORY 0 74 #define DEBUG_DQDT_UPDATE_PAGES 0 71 75 #define DEBUG_DQDT_UPDATE_THREADS 0 72 #define DEBUG_DQDT_SELECT_FOR_PROCESS 073 #define DEBUG_DQDT_UPDATE_PAGES 074 #define DEBUG_DQDT_SELECT_FOR_MEMORY 075 76 76 77 #define DEBUG_FATFS_ADD_DENTRY 0 … … 170 171 #define DEBUG_RWLOCK_CXY 0 171 172 172 #define DEBUG_SCHED_HANDLE_SIGNALS 2173 #define DEBUG_SCHED_HANDLE_SIGNALS 0 173 174 #define DEBUG_SCHED_YIELD 0 174 175 #define DEBUG_SCHED_RPC_ACTIVATE 0 … … 176 177 #define DEBUG_SEM 0 177 178 178 #define DEBUG_SYSCALLS_ERROR 2179 #define DEBUG_SYSCALLS_ERROR 2 179 180 180 181 #define DEBUG_SYS_BARRIER 0 … … 190 191 #define DEBUG_SYS_GETCWD 0 191 192 #define DEBUG_SYS_GETPID 0 193 #define DEBUG_SYS_GET_BEST_CORE 0 194 #define DEBUG_SYS_GET_CORE_ID 0 195 #define DEBUG_SYS_GET_NB_CORES 0 192 196 #define DEBUG_SYS_ISATTY 0 193 197 #define DEBUG_SYS_IS_FG 0 … … 456 460 457 461 #define CONFIG_INSTRUMENTATION_SYSCALLS 0 458 #define CONFIG_INSTRUMENTATION_PGFAULTS 1459 #define CONFIG_INSTRUMENTATION_FOOTPRINT 1462 #define CONFIG_INSTRUMENTATION_PGFAULTS 0 463 #define CONFIG_INSTRUMENTATION_FOOTPRINT 0 460 464 461 465 -
trunk/kernel/mm/mapper.c
r635 r637 442 442 if ( page_xp == XPTR_NULL ) return -1; 443 443 444 // compute cluster and pointers on page in mapper 445 xptr_t map_xp = ppm_page2base( page_xp ); 446 uint8_t * map_ptr = GET_PTR( map_xp ); 447 cxy_t map_cxy = GET_CXY( map_xp ); 444 // compute extended pointer in kernel mapper 445 xptr_t map_xp = ppm_page2base( page_xp ) + page_offset; 448 446 449 447 #if (DEBUG_MAPPER_MOVE_USER & 1) … … 458 456 if( to_buffer ) 459 457 { 460 hal_copy_to_uspace( map_cxy , map_ptr + page_offset , buf_ptr, page_bytes );458 hal_copy_to_uspace( buf_ptr , map_xp , page_bytes ); 461 459 462 460 #if DEBUG_MAPPER_MOVE_USER & 1 … … 464 462 printk("\n[%s] thread[%x,%x] moved %d bytes / mapper %s (%x,%x) -> user buffer(%x,%x)\n", 465 463 __FUNCTION__, this->process->pid, this->trdid, page_bytes, 466 name, map_cxy, map_ptr + page_offset, local_cxy, buf_ptr );464 name, GET_CXY(map_xp), GET_PTR(map_xp), local_cxy, buf_ptr ); 467 465 #endif 468 466 … … 471 469 { 472 470 ppm_page_do_dirty( page_xp ); 473 hal_copy_from_uspace( map_ cxy , map_ptr + page_offset, buf_ptr , page_bytes );471 hal_copy_from_uspace( map_xp , buf_ptr , page_bytes ); 474 472 475 473 #if DEBUG_MAPPER_MOVE_USER & 1 … … 477 475 printk("\n[%s] thread[%x,%x] moved %d bytes / user buffer(%x,%x) -> mapper %s (%x,%x)\n", 478 476 __FUNCTION__, this->process->pid, this->trdid, page_bytes, 479 local_cxy, buf_ptr, name, map_cxy, map_ptr + page_offset);477 local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) ); 480 478 mapper_display_page( mapper_xp , page_id, 128 ); 481 479 #endif -
trunk/kernel/mm/ppm.c
r636 r637 533 533 page_xp = XPTR( page_cxy , page_ptr ); 534 534 535 536 535 // get local pointer on PPM (same in all clusters) 537 536 ppm_t * ppm = &LOCAL_CLUSTER->ppm; … … 568 567 buddy_index = current_index ^ (1 << current_order); 569 568 buddy_ptr = pages_tbl + buddy_index; 569 570 // get buddy order 571 buddy_order = hal_remote_l32( XPTR( page_cxy , &buddy_ptr->order ) ); 570 572 571 573 // exit loop if buddy not found -
trunk/kernel/syscalls/shared_include/shared_mman.h
r623 r637 51 51 typedef struct mmap_attr_s 52 52 { 53 void * addr; /*! requested virtual address (unused : should be NULL)*/53 void * addr; /*! buffer for allocated vseg base address (return value) */ 54 54 unsigned int length; /*! requested vseg size (bytes) */ 55 55 unsigned int prot; /*! access modes */ -
trunk/kernel/syscalls/shared_include/syscalls_numbers.h
r626 r637 29 29 * It must be kept consistent with the array defined in do_syscalls.c 30 30 *****************************************************************************************/ 31 typedef enum { 31 typedef enum 32 { 32 33 SYS_THREAD_EXIT = 0, 33 34 SYS_THREAD_YIELD = 1, … … 75 76 76 77 SYS_GET_CONFIG = 40, 77 SYS_GET_CORE 78 SYS_GET_CORE_ID = 41, 78 79 SYS_GET_CYCLE = 42, 79 80 SYS_DISPLAY = 43, … … 88 89 SYS_SYNC = 51, 89 90 SYS_FSYNC = 52, 91 SYS_GET_BEST_CORE = 53, 92 SYS_GET_NB_CORES = 54, 90 93 91 SYSCALLS_NR = 5 3,94 SYSCALLS_NR = 55, 92 95 93 96 } syscalls_t; -
trunk/kernel/syscalls/sys_barrier.c
r635 r637 33 33 #include <remote_barrier.h> 34 34 35 #if DEBUG_SYS_BARRIER36 35 ////////////////////////////////////////////////////// 37 36 static char * sys_barrier_op_str( uint32_t operation ) … … 42 41 else return "undefined"; 43 42 } 44 #endif45 43 46 44 ////////////////////////////////// … … 74 72 75 73 #if DEBUG_SYSCALLS_ERROR 76 printk("\n[ERROR] in %s : unmapped barrier %x / thread %x / process %x\n",77 __FUNCTION__ , vaddr , this->trdid , process->pid );74 printk("\n[ERROR] in %s for %s : unmapped barrier %x / thread[%x,%x]\n", 75 __FUNCTION__, sys_barrier_op_str(operation), vaddr, process->pid, this->trdid ); 78 76 #endif 79 77 this->errno = error; … … 94 92 95 93 #if DEBUG_SYSCALLS_ERROR 96 printk("\n[ERROR] in %s : unmapped barrier attributes %x / thread %x / process %x\n",97 __FUNCTION__ , attr , this->trdid , process->pid );94 printk("\n[ERROR] in %s for INIT : unmapped barrier attributes %x / thread[%x,%x]\n", 95 __FUNCTION__ , attr , process->pid , this->trdid ); 98 96 #endif 99 97 this->errno = EINVAL; … … 102 100 103 101 // copy barrier attributes into kernel space 104 hal_copy_from_uspace( local_cxy, 105 &k_attr, 106 (void*)attr, 102 hal_copy_from_uspace( XPTR( local_cxy , &k_attr ), 103 (void *)attr, 107 104 sizeof(pthread_barrierattr_t) ); 108 105 … … 111 108 112 109 #if DEBUG_SYSCALLS_ERROR 113 printk("\n[ERROR] in %s : wrong arguments / count %d / x_size %d / y_size %d / nthreads %x\n",110 printk("\n[ERROR] in %s for INIT : count (%d) != x_size (%d) * y_size (%d) * nthreads (%x)\n", 114 111 __FUNCTION__, count, k_attr.x_size, k_attr.y_size, k_attr.nthreads ); 115 112 #endif … … 131 128 132 129 #if DEBUG_SYSCALLS_ERROR 133 printk("\n[ERROR] in %s : cannot create barrier %x / thread %x / process %x\n",134 __FUNCTION__ , vaddr , this->trdid , process->pid );130 printk("\n[ERROR] in %s for INIT : cannot create barrier %x / thread[%x,%x]\n", 131 __FUNCTION__ , vaddr , process->pid , this->trdid ); 135 132 #endif 136 133 this->errno = ENOMEM; … … 148 145 149 146 #if DEBUG_SYSCALLS_ERROR 150 printk("\n[ERROR] in %s : barrier %x not registered / thread %x / process %x\n",151 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );147 printk("\n[ERROR] in %s for WAIT : barrier %x not registered / thread[%x,%x]\n", 148 __FUNCTION__ , (intptr_t)vaddr , process->pid, this->trdid ); 152 149 #endif 153 150 this->errno = EINVAL; … … 169 166 170 167 #if DEBUG_SYSCALLS_ERROR 171 printk("\n[ERROR] in %s : barrier %x not registered / thread %x / process %x\n",172 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );168 printk("\n[ERROR] in %s for DESTROY : barrier %x not registered / thread[%x,%x]\n", 169 __FUNCTION__ , (intptr_t)vaddr , process->pid, this->trdid ); 173 170 #endif 174 171 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_chdir.c
r610 r637 2 2 * sys_chdir.c - kernel function implementing the "chdir" syscall. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018, 2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 75 75 76 76 // copy pathname in kernel space 77 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 77 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 78 pathname, 79 CONFIG_VFS_MAX_PATH_LENGTH ); 78 80 79 81 #if DEBUG_SYS_CHDIR -
trunk/kernel/syscalls/sys_chmod.c
r566 r637 2 2 * sys_chmod.c - Change file access rights. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) 2015 UPMC Sorbonne Universites … … 47 47 48 48 #if DEBUG_SYSCALLS_ERROR 49 50 49 printk("\n[ERROR] in %s : pathname too long / thread %x in process %x\n", 50 __FUNCTION__, this->trdid, process->pid ); 51 51 #endif 52 52 this->errno = ENFILE; … … 55 55 56 56 // copy pathname in kernel space 57 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 57 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 58 pathname, 59 CONFIG_VFS_MAX_PATH_LENGTH ); 58 60 59 61 printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__ ); -
trunk/kernel/syscalls/sys_display.c
r635 r637 122 122 123 123 // copy string to kernel space 124 hal_strcpy_from_uspace( kbuf , string , 512 ); 124 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 125 string, 126 512 ); 125 127 126 128 // print message on TXT0 kernel terminal … … 136 138 137 139 // check cxy argument 138 if( cluster_is_ undefined( cxy ))140 if( cluster_is_active( cxy ) == false ) 139 141 { 140 142 … … 172 174 173 175 // check cxy argument 174 if( cluster_is_ undefined( cxy ))176 if( cluster_is_active( cxy ) == false ) 175 177 { 176 178 … … 213 215 214 216 // check cxy argument 215 if( cluster_is_ undefined( cxy ))217 if( cluster_is_active( cxy ) == false ) 216 218 { 217 219 … … 323 325 324 326 // copy pathname in kernel space 325 hal_strcpy_from_uspace( kbuf , path , CONFIG_VFS_MAX_PATH_LENGTH ); 327 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 328 path, 329 CONFIG_VFS_MAX_PATH_LENGTH ); 326 330 327 331 // compute root inode for pathname … … 447 451 uint32_t cxy = (uint32_t)arg0; 448 452 449 if( cluster_is_ undefined( cxy ))453 if( cluster_is_active( cxy ) == false ) 450 454 { 451 455 -
trunk/kernel/syscalls/sys_exec.c
r635 r637 89 89 90 90 // copy the array of pointers to kernel buffer 91 hal_copy_from_uspace( local_cxy, 92 k_pointers, 91 hal_copy_from_uspace( XPTR( local_cxy , k_pointers ), 93 92 u_pointers, 94 93 CONFIG_PPM_PAGE_SIZE ); … … 109 108 110 109 // copy the user string to kernel buffer 111 hal_copy_from_uspace( local_cxy, 112 k_buf_ptr, 110 hal_copy_from_uspace( XPTR( local_cxy , k_buf_ptr ), 113 111 k_pointers[index], 114 112 length ); … … 199 197 200 198 // copy pathname in exec_info structure (kernel space) 201 hal_strcpy_from_uspace( exec_info.path , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 199 hal_strcpy_from_uspace( XPTR( local_cxy , exec_info.path ), 200 pathname, 201 CONFIG_VFS_MAX_PATH_LENGTH ); 202 202 203 203 #if DEBUG_SYS_EXEC -
trunk/kernel/syscalls/sys_fork.c
r635 r637 105 105 else // DQDT placement 106 106 { 107 child_cxy = dqdt_get_cluster_for_ process();107 child_cxy = dqdt_get_cluster_for_thread( LOCAL_CLUSTER->dqdt_root_xp ); 108 108 } 109 109 -
trunk/kernel/syscalls/sys_get_config.c
r635 r637 108 108 109 109 // copy to user space 110 hal_copy_to_uspace( local_cxy, &k_x_size, x_size, sizeof(uint32_t) );111 hal_copy_to_uspace( local_cxy, &k_y_size, y_size, sizeof(uint32_t) );112 hal_copy_to_uspace( local_cxy, &k_ncores, ncores, sizeof(uint32_t) );110 hal_copy_to_uspace( x_size, XPTR( local_cxy , &k_x_size ), sizeof(uint32_t) ); 111 hal_copy_to_uspace( y_size, XPTR( local_cxy , &k_y_size ), sizeof(uint32_t) ); 112 hal_copy_to_uspace( ncores, XPTR( local_cxy , &k_ncores ), sizeof(uint32_t) ); 113 113 114 114 hal_fence(); -
trunk/kernel/syscalls/sys_get_cycle.c
r635 r637 45 45 process_t * process = this->process; 46 46 47 #if (DEBUG_SYS_GET_CYCLE || CONFIG_INSTRUMENTATION_SYSCALLS) 48 uint64_t tm_start = hal_get_cycles(); 49 #endif 50 47 51 // check buffer in user space 48 52 error = vmm_get_vseg( process , (intptr_t)cycle , &vseg ); … … 63 67 64 68 // copy to user space 65 hal_copy_to_uspace( local_cxy, &k_cycle, cycle, sizeof(uint64_t) ); 69 hal_copy_to_uspace( cycle, 70 XPTR( local_cxy , &k_cycle ), 71 sizeof(uint64_t) ); 72 73 #if (DEBUG_SYS_GET_CYCLE || CONFIG_INSTRUMENTATION_SYSCALLS) 74 uint64_t tm_end = hal_get_cycles(); 75 #endif 76 77 #if DEBUG_SYS_GET_CYCLE 78 if( DEBUG_SYS_GET_CYCLE < tm_end ) 79 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 80 __FUNCTION__ , process->pid, this->trdid, (uint32_t)tm_end ); 81 #endif 82 83 #if CONFIG_INSTRUMENTATION_SYSCALLS 84 hal_atomic_add( &syscalls_cumul_cost[SYS_GET_CYCLE] , tm_end - tm_start ); 85 hal_atomic_add( &syscalls_occurences[SYS_GET_CYCLE] , 1 ); 86 #endif 66 87 67 88 return 0; -
trunk/kernel/syscalls/sys_getcwd.c
r610 r637 2 2 * sys_getcwd.c - kernel function implementing the "getcwd" syscall. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 97 97 98 98 // copy kernel buffer to user space 99 hal_strcpy_to_uspace( buffer , first , CONFIG_VFS_MAX_PATH_LENGTH ); 99 hal_strcpy_to_uspace( buffer, 100 XPTR( local_cxy , first ), 101 CONFIG_VFS_MAX_PATH_LENGTH ); 100 102 101 103 hal_fence(); -
trunk/kernel/syscalls/sys_is_fg.c
r635 r637 90 90 91 91 // copy to user space 92 hal_copy_to_uspace( local_cxy, &is_txt_owner, is_fg, sizeof(uint32_t) ); 92 hal_copy_to_uspace( is_fg, 93 XPTR( local_cxy , &is_txt_owner ), 94 sizeof(uint32_t) ); 93 95 94 96 hal_fence(); -
trunk/kernel/syscalls/sys_mkdir.c
r610 r637 60 60 61 61 // copy pathname in kernel space 62 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 62 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 63 pathname, 64 CONFIG_VFS_MAX_PATH_LENGTH ); 63 65 64 66 #if DEBUG_SYS_MKDIR -
trunk/kernel/syscalls/sys_mkfifo.c
r566 r637 2 2 * sys_mkfifo.c - creates a named FIFO file. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 33 33 uint32_t mode __attribute__((unused)) ) 34 34 { 35 error_t error;36 35 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 37 36 … … 39 38 process_t * process = this->process; 40 39 40 #if (DEBUG_SYS_MKFIFO || CONFIG_INSTRUMENTATION_SYSCALLS) 41 uint64_t tm_start = hal_get_cycles(); 42 #endif 43 44 #if DEBUG_SYS_MKFIFO 45 if( DEBUG_SYS_MKFIFO < tm_end ) 46 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n", 47 __FUNCTION__, process->pid, this->trdid, pathname, (uint32_t)tm_end ); 48 #endif 49 41 50 // check fd_array not full 42 51 if( process_fd_array_full() ) 43 52 { 44 printk("\n[ERROR] in %s : file descriptor array full for process %x\n", 45 __FUNCTION__ , process->pid ); 53 54 #if DEBUG_SYSCALLS_ERROR 55 printk("\n[ERROR] in %s : file descriptor array full for process %x\n", 56 __FUNCTION__ , process->pid ); 57 #endif 46 58 this->errno = ENFILE; 47 59 return -1; … … 51 63 if( hal_strlen_from_uspace( pathname ) >= CONFIG_VFS_MAX_PATH_LENGTH ) 52 64 { 53 printk("\n[ERROR] in %s : pathname too long\n", __FUNCTION__ ); 65 66 #if DEBUG_SYSCALLS_ERROR 67 printk("\n[ERROR] in %s : pathname too long\n", __FUNCTION__ ); 68 #endif 54 69 this->errno = ENFILE; 55 70 return -1; … … 57 72 58 73 // copy pathname in kernel space 59 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 74 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 75 pathname, 76 CONFIG_VFS_MAX_PATH_LENGTH ); 60 77 61 78 printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__ ); 62 79 return -1; 63 80 64 if( error ) 65 { 66 printk("\n[ERROR] in %s : cannot create named FIFO %s\n", 67 __FUNCTION__ , kbuf ); 68 this->errno = error; 69 return -1; 70 } 81 #if (DEBUG_SYS_MKFIFO || CONFIG_INSTRUMENTATION_SYSCALLS) 82 uint64_t tm_end = hal_get_cycles(); 83 #endif 71 84 72 return 0; 85 #if DEBUG_SYS_MKFIFO 86 if( DEBUG_SYS_MKFIFO < tm_end ) 87 printk("\n[%s] thread[%x,%x] exit for <%s> / cycle %d\n", 88 __FUNCTION__, process->pid, this->trdid, pathname, (uint32_t)tm_end ); 89 #endif 90 91 #if CONFIG_INSTRUMENTATION_SYSCALLS 92 hal_atomic_add( &syscalls_cumul_cost[SYS_MKFIFO] , tm_end - tm_start ); 93 hal_atomic_add( &syscalls_occurences[SYS_MKFIFO] , 1 ); 94 #endif 73 95 74 96 } // end sys_mkfifo() -
trunk/kernel/syscalls/sys_mmap.c
r635 r637 41 41 { 42 42 vseg_t * vseg; 43 cxy_t vseg_cxy; 44 vseg_type_t vseg_type; 43 cxy_t vseg_cxy; // target cluster for the vseg 44 vseg_type_t vseg_type; // vseg type 45 45 mmap_attr_t k_attr; // attributes copy in kernel space 46 46 xptr_t mapper_xp; 47 error_t error;48 47 reg_t save_sr; // required to enable IRQs 49 48 … … 62 61 63 62 // check user buffer (containing attributes) is mapped 64 error = vmm_get_vseg( process , (intptr_t)attr , &vseg ); 65 66 if( error ) 63 if( vmm_get_vseg( process , (intptr_t)attr , &vseg ) ) 67 64 { 68 65 … … 76 73 77 74 // copy attributes from user space to kernel space 78 hal_copy_from_uspace( local_cxy, 79 &k_attr, 75 hal_copy_from_uspace( XPTR( local_cxy , &k_attr ), 80 76 attr, 81 77 sizeof(mmap_attr_t) ); … … 119 115 120 116 // test mmap type : can be FILE / ANON / REMOTE 117 // to define vseg_type & vseg_cxy 121 118 122 119 /////////////////////////////////////////////////////////// MAP_FILE … … 126 123 #if (DEBUG_SYS_MMAP & 1) 127 124 if ( DEBUG_SYS_MMAP < tm_start ) 128 printk("\n[%s] thread[%x,%x] map file : fdid %d / offset %d / %dbytes\n",125 printk("\n[%s] thread[%x,%x] type file : fdid %d / offset %x / %x bytes\n", 129 126 __FUNCTION__, process->pid, this->trdid, fdid, offset, length ); 130 127 #endif 131 128 132 // FIXME: handle concurent delete of file by another thread closing it129 // FIXME: handle concurent delete of file by another thread 133 130 134 131 if( fdid >= CONFIG_PROCESS_FILE_MAX_NR ) … … 228 225 #if (DEBUG_SYS_MMAP & 1) 229 226 if ( DEBUG_SYS_MMAP < tm_start ) 230 printk("\n[%s] thread[%x,%x] map anon / %dbytes / cluster %x\n",227 printk("\n[%s] thread[%x,%x] type anon / %x bytes / cluster %x\n", 231 228 __FUNCTION__, process->pid, this->trdid, length, vseg_cxy ); 232 229 #endif … … 242 239 #if (DEBUG_SYS_MMAP & 1) 243 240 if ( DEBUG_SYS_MMAP < tm_start ) 244 printk("\n[%s] thread[%x,%x] map remote / %d bytes /cluster %x\n",241 printk("\n[%s] thread[%x,%x] type remote / %x bytes / target cluster %x\n", 245 242 __FUNCTION__, process->pid, this->trdid, length, vseg_cxy ); 246 243 #endif 247 244 248 if( cluster_is_ undefined( vseg_cxy ))245 if( cluster_is_active( vseg_cxy ) == false ) 249 246 { 250 247 … … 266 263 process_t * ref_ptr = GET_PTR( ref_xp ); 267 264 268 // create the vseg in reference cluster265 // register vseg in reference VSL 269 266 if( local_cxy == ref_cxy ) 270 267 { … … 306 303 } 307 304 308 // copy vseg base address to user space 309 hal_copy_to_uspace( local_cxy, 310 &vseg->min, 311 &attr->addr, 305 // copy vseg base address to user space mmap_attr_t 306 hal_copy_to_uspace( &attr->addr, 307 XPTR( ref_cxy , &vseg->min ), 312 308 sizeof(intptr_t) ); 313 309 hal_fence(); … … 324 320 #if DEBUG_SYS_MMAP 325 321 if ( DEBUG_SYS_MMAP < tm_end ) 326 printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / base %x / size % d/ cycle %d\n",322 printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / base %x / size %x / cycle %d\n", 327 323 __FUNCTION__, process->pid, this->trdid, 328 324 vseg_type_str(vseg->type), vseg->cxy, vseg->min, length, (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_open.c
r625 r637 77 77 78 78 // copy pathname in kernel space 79 hal_strcpy_from_uspace( kbuf, pathname , CONFIG_VFS_MAX_PATH_LENGTH );79 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ) , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 80 80 81 81 #if DEBUG_SYS_OPEN -
trunk/kernel/syscalls/sys_opendir.c
r635 r637 85 85 86 86 // copy pathname in kernel space 87 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 87 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 88 pathname, 89 CONFIG_VFS_MAX_PATH_LENGTH ); 88 90 89 91 #if DEBUG_SYS_OPENDIR … … 174 176 175 177 // set ident value in user buffer 176 hal_copy_to_uspace( local_cxy, 177 &ident, 178 dirp, 178 hal_copy_to_uspace( dirp, 179 XPTR( local_cxy , &ident ), 179 180 sizeof(intptr_t) ); 180 181 -
trunk/kernel/syscalls/sys_place_fork.c
r623 r637 40 40 process_t * process = this->process; 41 41 42 #if (DEBUG_SYS_PLACE_FORK || CONFIG_INSTRUMENTATION_SYSCALLS) 43 uint64_t tm_start = hal_get_cycles(); 44 #endif 45 46 #if DEBUG_SYS_PLACE_FORK 47 if( DEBUG_SYS_PLACE_FORK < tm_start ) 48 printk("\n[%s] thread[%x,%x] enter / cxy %x / cycle %d\n", 49 __FUNCTION__, process->pid, this->trdid, cxy, (uint32_t)tm_start ); 50 #endif 51 42 52 // check cxy argument 43 if( cluster_is_ undefined( cxy ))53 if( cluster_is_active( cxy ) == false ) 44 54 { 45 55 … … 56 66 this->fork_cxy = cxy; 57 67 68 #if (DEBUG_SYS_PLACE_FORK || CONFIG_INSTRUMENTATION_SYSCALLS) 69 uint64_t tm_end = hal_get_cycles(); 70 #endif 71 72 #if DEBUG_SYS_PLACE_FORK 73 if( DEBUG_SYS_PLACE_FORK < tm_end ) 74 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 75 __FUNCTION__ , process->pid, this->trdid, (uint32_t)tm_end ); 76 #endif 77 78 #if CONFIG_INSTRUMENTATION_SYSCALLS 79 hal_atomic_add( &syscalls_cumul_cost[SYS_PLACE_FORK] , tm_end - tm_start ); 80 hal_atomic_add( &syscalls_occurences[SYS_PLACE_FORK] , 1 ); 81 #endif 82 58 83 return 0; 59 84 -
trunk/kernel/syscalls/sys_readdir.c
r635 r637 112 112 113 113 // copy dirent pointer to user buffer 114 hal_copy_to_uspace( local_cxy, 115 &direntp, 116 buffer, 114 hal_copy_to_uspace( buffer, 115 XPTR( local_cxy , &direntp ), 117 116 sizeof(void *) ); 118 117 -
trunk/kernel/syscalls/sys_rename.c
r613 r637 2 2 * sys_rename.c - Rename a file or a directory. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 75 75 76 76 // copy old name an new name in kernel space 77 hal_strcpy_from_uspace( k_old, old , CONFIG_VFS_MAX_PATH_LENGTH );78 hal_strcpy_from_uspace( k_new, new , CONFIG_VFS_MAX_PATH_LENGTH );77 hal_strcpy_from_uspace( XPTR( local_cxy , k_old ) , old , CONFIG_VFS_MAX_PATH_LENGTH ); 78 hal_strcpy_from_uspace( XPTR( local_cxy , k_new ) , new , CONFIG_VFS_MAX_PATH_LENGTH ); 79 79 80 80 #if DEBUG_SYS_RENAME -
trunk/kernel/syscalls/sys_rmdir.c
r604 r637 2 2 * sys_rmdir.c - Remove a directory from file system. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) 2015 UPMC Sorbonne Universites … … 42 42 process_t * process = this->process; 43 43 44 #if (DEBUG_SYS_RMDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 45 uint64_t tm_start = hal_get_cycles(); 46 #endif 47 44 48 // check pathname length 45 49 if( hal_strlen_from_uspace( pathname ) >= CONFIG_VFS_MAX_PATH_LENGTH ) … … 54 58 55 59 // copy pathname in kernel space 56 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 60 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 61 pathname, 62 CONFIG_VFS_MAX_PATH_LENGTH ); 57 63 58 64 // get cluster and local pointer on reference process -
trunk/kernel/syscalls/sys_sem.c
r635 r637 58 58 process_t * process = this->process; 59 59 60 #if (DEBUG_SYS_SEM || CONFIG_INSTRUMENTATION_SYSCALLS) 61 uint64_t tm_start = hal_get_cycles(); 62 #endif 63 60 64 #if DEBUG_SYS_SEM 61 uint64_t tm_start;62 uint64_t tm_end;63 tm_start = hal_get_cycles();64 65 if( DEBUG_SYS_SEM < tm_start ) 65 66 printk("\n[DBG] %s : thread %x in process %x enter for %s / cycle %d\n", … … 137 138 138 139 // return value to user 139 hal_copy_to_uspace( local_cxy, 140 ¤t, 141 current_value, 140 hal_copy_to_uspace( current_value, 141 XPTR( local_cxy , ¤t ), 142 142 sizeof(uint32_t) ); 143 143 } … … 224 224 hal_fence(); 225 225 226 #if (DEBUG_SYS_SEM || CONFIG_INSTRUMENTATION_SYSCALLS) 227 uint64_t tm_end = hal_get_cycles(); 228 #endif 229 226 230 #if DEBUG_SYS_SEM 227 tm_end = hal_get_cycles();228 231 if( DEBUG_SYS_SEM < tm_end ) 229 232 printk("\n[DBG] %s : thread %x in process %x exit for %s / cost = %d / cycle %d\n", … … 232 235 #endif 233 236 237 #if CONFIG_INSTRUMENTATION_SYSCALLS 238 hal_atomic_add( &syscalls_cumul_cost[SYS_SEM] , tm_end - tm_start ); 239 hal_atomic_add( &syscalls_occurences[SYS_SEM] , 1 ); 240 #endif 241 234 242 return 0; 235 243 -
trunk/kernel/syscalls/sys_stat.c
r635 r637 80 80 81 81 // copy pathname in kernel space 82 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 82 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 83 pathname, 84 CONFIG_VFS_MAX_PATH_LENGTH ); 83 85 84 86 #if DEBUG_SYS_STAT … … 121 123 122 124 // copy k_stat to u_stat 123 hal_copy_to_uspace( local_cxy, 124 &k_stat, 125 u_stat, 125 hal_copy_to_uspace( u_stat, 126 XPTR( local_cxy , &k_stat ), 126 127 sizeof(struct stat) ); 127 128 -
trunk/kernel/syscalls/sys_thread_create.c
r635 r637 66 66 67 67 #if DEBUG_SYS_THREAD_CREATE 68 tm_start = hal_get_cycles();69 68 if( DEBUG_SYS_THREAD_CREATE < tm_start ) 70 69 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", … … 73 72 74 73 // check trdid buffer in user space 75 error = vmm_get_vseg( process , (intptr_t)trdid_ptr , &vseg ); 76 77 if ( error ) 74 if( vmm_get_vseg( process , (intptr_t)trdid_ptr , &vseg ) ) 78 75 { 79 76 … … 89 86 if( user_attr != NULL ) 90 87 { 91 error = vmm_get_vseg( process , (intptr_t)user_attr , &vseg ); 92 93 if( error ) 88 if( vmm_get_vseg( process , (intptr_t)user_attr , &vseg ) ) 94 89 { 95 90 … … 102 97 } 103 98 104 hal_copy_from_uspace( local_cxy, 105 &kern_attr, 99 hal_copy_from_uspace( XPTR( local_cxy , &kern_attr ), 106 100 user_attr, 107 101 sizeof(pthread_attr_t) ); … … 109 103 110 104 // check start_func in user space 111 error = vmm_get_vseg( process , (intptr_t)start_func , &vseg ); 112 113 if( error ) 105 if( vmm_get_vseg( process , (intptr_t)start_func , &vseg ) ) 114 106 { 115 107 … … 125 117 if( start_args != NULL ) 126 118 { 127 error = vmm_get_vseg( process , (intptr_t)start_args , &vseg ); 128 129 if( error ) 119 if( vmm_get_vseg( process , (intptr_t)start_args , &vseg ) ) 130 120 { 131 121 … … 145 135 if( kern_attr.attributes & PT_ATTR_CLUSTER_DEFINED ) 146 136 { 147 if( cluster_is_ undefined( kern_attr.cxy ))137 if( cluster_is_active( kern_attr.cxy ) == false ) 148 138 { 149 139 … … 159 149 else 160 150 { 161 child_cxy = dqdt_get_cluster_for_ process();151 child_cxy = dqdt_get_cluster_for_thread( LOCAL_CLUSTER->dqdt_root_xp ); 162 152 } 163 153 } … … 165 155 { 166 156 kern_attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED; 167 child_cxy = dqdt_get_cluster_for_process();157 child_cxy = dqdt_get_cluster_for_thread( LOCAL_CLUSTER->dqdt_root_xp ); 168 158 } 169 159 … … 209 199 // returns trdid to user space 210 200 trdid = hal_remote_l32( XPTR( child_cxy , &child_ptr->trdid ) ); 211 hal_copy_to_uspace( local_cxy, 212 &trdid, 213 trdid_ptr, 201 hal_copy_to_uspace( trdid_ptr, 202 XPTR( local_cxy , &trdid ), 214 203 sizeof(pthread_t) ); 215 204 -
trunk/kernel/syscalls/sys_thread_detach.c
r566 r637 2 2 * sys_thread_detach.c - detach a joinable thread 3 3 * 4 * Authors Alain Greiner (2016,2017 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 * Copyright (c) 2011,2012UPMC Sorbonne Universites6 * Copyright (c) UPMC Sorbonne Universites 7 7 * 8 8 * This file is part of ALMOS-MKH. … … 48 48 49 49 // check trdid argument 50 if( (target_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || cluster_is_undefined( target_cxy ) ) 50 if( (target_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 51 (cluster_is_active( target_cxy ) == false) ) 51 52 { 52 53 printk("\n[ERROR] in %s : illegal trdid argument\n", __FUNCTION__ ); -
trunk/kernel/syscalls/sys_thread_join.c
r633 r637 2 2 * sys_thread_join.c - passive wait on the end of a given thread. 3 3 * 4 * Authors Alain Greiner (2016,2017 )5 * 6 * Copyright (c) 2011,2012UPMC Sorbonne Universites4 * Authors Alain Greiner (2016,2017,2018,2019) 5 * 6 * Copyright (c) UPMC Sorbonne Universites 7 7 * 8 8 * This file is part of ALMOS-MKH. … … 72 72 73 73 // check trdid argument 74 if( (target_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || cluster_is_undefined(target_cxy) ) 74 if( (target_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 75 (cluster_is_active(target_cxy) == false) ) 75 76 { 76 77 -
trunk/kernel/syscalls/sys_thread_wakeup.c
r566 r637 1 1 /* 2 * sys_thread_wakeup.c - wakeup all indicated threads2 * sys_thread_wakeup.c - wakeup indicated thread 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 36 36 process_t * process = this->process; 37 37 38 #if (DEBUG_SYS_THREAD_WAKEUP || CONFIG_INSTRUMENTATION_SYSCALLS) 39 uint64_t tm_start = hal_get_cycles(); 40 #endif 41 38 42 #if DEBUG_SYS_THREAD_WAKEUP 39 uint64_t tm_start;40 uint64_t tm_end;41 tm_start = hal_get_cycles();42 43 if( DEBUG_SYS_THREAD_WAKEUP < tm_start ) 43 printk("\n[ DBG] %s :thread %x in process enter to activate thread %x / cycle %d\n",44 printk("\n[%s] thread %x in process enter to activate thread %x / cycle %d\n", 44 45 __FUNCTION__, this->trdid, process->pid, trdid, (uint32_t)tm_start ); 45 46 #endif … … 50 51 51 52 // check trdid argument 52 if( (target_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || cluster_is_undefined( target_cxy ) ) 53 if( (target_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 54 (cluster_is_active( target_cxy ) == false) ) 53 55 { 54 56 … … 78 80 thread_unblock( thread_xp , THREAD_BLOCKED_GLOBAL ); 79 81 82 #if (DEBUG_SYS_THREAD_WAKEUP || CONFIG_INSTRUMENTATION_SYSCALLS) 83 uint64_t tm_end = hal_get_cycles(); 84 #endif 85 86 80 87 #if DEBUG_SYS_THREAD_WAKEUP 81 tm_end = hal_get_cycles();82 88 if( DEBUG_SYS_THREAD_WAKEUP < tm_end ) 83 printk("\n[ DBG] %s :thread %x in process %x exit / thread %x activated / cycle %d\n",89 printk("\n[%s] thread %x in process %x exit / thread %x activated / cycle %d\n", 84 90 __FUNCTION__ , this->trdid, process->pid, trdid, (uint32_t)tm_end ); 91 #endif 92 93 #if CONFIG_INSTRUMENTATION_SYSCALLS 94 hal_atomic_add( &syscalls_cumul_cost[SYS_THREAD_WAKEUP] , tm_end - tm_start ); 95 hal_atomic_add( &syscalls_occurences[SYS_THREAD_WAKEUP] , 1 ); 85 96 #endif 86 97 -
trunk/kernel/syscalls/sys_timeofday.c
r635 r637 50 50 process_t * process = this->process; 51 51 52 #if (DEBUG_SYS_TIMEOFDAY || CONFIG_INSTRUMENTATION_SYSCALLS) 53 uint64_t tm_start = hal_get_cycles(); 54 #endif 55 56 #if DEBUG_SYS_TIMEOFDAY 57 if( DEBUG_SYS_TIMEOFDAY < tm_start ) 58 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 59 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_start ); 60 #endif 61 52 62 // check tz (non supported / must be null) 53 63 if( tz ) … … 82 92 83 93 // copy values to user space 84 hal_copy_to_uspace( local_cxy, 85 &k_tv, 86 tv, 94 hal_copy_to_uspace( tv, 95 XPTR( local_cxy , &k_tv ), 87 96 sizeof(struct timeval) ); 88 97 89 98 hal_fence(); 90 99 100 #if (DEBUG_SYS_TIMEOFDAY || CONFIG_INSTRUMENTATION_SYSCALLS) 101 uint64_t tm_end = hal_get_cycles(); 102 #endif 103 104 #if DEBUG_SYS_TIMEOFDAY 105 if( DEBUG_SYS_TIMEOFDAY < tm_end ) 106 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 107 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_end ); 108 #endif 109 110 #if CONFIG_INSTRUMENTATION_SYSCALLS 111 hal_atomic_add( &syscalls_cumul_cost[SYS_TIMEOFDAY] , tm_end - tm_start ); 112 hal_atomic_add( &syscalls_occurences[SYS_TIMEOFDAY] , 1 ); 113 #endif 114 91 115 return 0; 92 116 -
trunk/kernel/syscalls/sys_trace.c
r566 r637 2 2 * sys_trace.c - activate / desactivate the context switches trace for a given core 3 3 * 4 * Author Alain Greiner (c) (2016,2017,2018 )4 * Author Alain Greiner (c) (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 40 40 process_t * process = this->process; 41 41 42 #if (DEBUG_SYS_TRACE || CONFIG_INSTRUMENTATION_SYSCALLS) 43 uint64_t tm_start = hal_get_cycles(); 44 #endif 45 42 46 #if DEBUG_SYS_TRACE 43 uint64_t tm_start;44 uint64_t tm_end;45 tm_start = hal_get_cycles();46 47 if( DEBUG_SYS_TRACE < tm_start ) 47 printk("\n[ DBG] %s : thread %d enter / process %x/ cycle = %d\n",48 __FUNCTION__, this , this->process->pid, (uint32_t)tm_start );48 printk("\n[%s] thread[%x,%x] enters / cycle = %d\n", 49 __FUNCTION__, this->process->pid, this->trdid, (uint32_t)tm_start ); 49 50 #endif 50 51 51 52 // check cluster identifier 52 if( cluster_is_ undefined( cxy ))53 if( cluster_is_active( cxy ) == false ) 53 54 { 54 55 … … 85 86 hal_fence(); 86 87 87 #if DEBUG_SYS_TRACE 88 tm_end = hal_get_cycles(); 89 if( DEBUG_SYS_TRACE < tm_end ) 90 printk("\n[DBG] %s : thread %x exit / process %x / cost = %d / cycle %d\n", 91 __FUNCTION__, this, this->process->pid, (uint32_t)(tm_end - tm_start) , (uint32_t)tm_end ); 88 #if (DEBUG_SYS_TRACE || CONFIG_INSTRUMENTATION_SYSCALLS) 89 uint64_t tm_end = hal_get_cycles(); 92 90 #endif 93 91 92 #if DEBUG_SYS_TRACE 93 if( DEBUG_SYS_TRACE < tm_end ) 94 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 95 __FUNCTION__, this->process->pid, this->trdid, (uint32_t)tm_end ); 96 #endif 97 98 #if CONFIG_INSTRUMENTATION_SYSCALLS 99 hal_atomic_add( &syscalls_cumul_cost[SYS_TRACE] , tm_end - tm_start ); 100 hal_atomic_add( &syscalls_occurences[SYS_TRACE] , 1 ); 101 #endif 94 102 return 0; 95 103 -
trunk/kernel/syscalls/sys_unlink.c
r610 r637 2 2 * sys_unlink.c - unlink a file or directorya from VFS 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 60 60 61 61 // copy pathname in kernel space 62 hal_strcpy_from_uspace( kbuf , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 62 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), 63 pathname, 64 CONFIG_VFS_MAX_PATH_LENGTH ); 63 65 64 66 #if DEBUG_SYS_UNLINK -
trunk/kernel/syscalls/sys_wait.c
r635 r637 53 53 pid_t pid = process->pid; 54 54 55 55 56 #if DEBUG_SYS_WAIT 56 uint64_t 57 uint64_t cycle = hal_get_cycles(); 57 58 if( DEBUG_SYS_WAIT < cycle ) 58 59 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", … … 153 154 #endif 154 155 // return child termination state to parent process 155 hal_copy_to_uspace( local_cxy, 156 &child_state, 157 status, 156 hal_copy_to_uspace( status, 157 XPTR( local_cxy , &child_state ), 158 158 sizeof(int) ); 159 159 return child_pid; … … 192 192 193 193 // never executed 194 return -1;194 return 0; 195 195 196 196 } // end sys_wait() -
trunk/kernel/syscalls/syscalls.h
r626 r637 210 210 /****************************************************************************************** 211 211 * [13] This function map physical memory (or a file) in the calling thread virtual space. 212 * The <attr> argument is a pointer on a structure for arguments (see shared_ syscalls.h).212 * The <attr> argument is a pointer on a structure for arguments (see shared_mman.h). 213 213 * The user defined virtual address (MAP_FIXED flag) is not supported. 214 214 * TODO : the access rights checking is not implemented yet [AG] … … 560 560 561 561 /****************************************************************************************** 562 * [41] This function implements the non-standard get_core () syscall.562 * [41] This function implements the non-standard get_core_id() syscall. 563 563 * It returns in <cxy> and <lid> the calling core cluster and local index. 564 564 ****************************************************************************************** … … 567 567 * @ return 0 if success / return -1 if illegal arguments 568 568 *****************************************************************************************/ 569 int sys_get_core ( uint32_t * cxy,570 uint32_t * lid );569 int sys_get_core_id( uint32_t * cxy, 570 uint32_t * lid ); 571 571 572 572 /****************************************************************************************** … … 696 696 int sys_fsync( uint32_t file_id ); 697 697 698 /****************************************************************************************** 699 * [53] This function implements the non-standard "get_best_core" syscall. 700 * It selects, in a macro-cluster specified by the <base_cxy> and <level> arguments, 701 * the core that has the lowest load. 702 * When an active core has been found in the target macro-cluster, it writes into the 703 * <cxy> and <lid> buffers the cluster identifier and the core local index, and return 0. 704 * It returns -1 in case of illegal arguments (level / cxy / lid). 705 * It returns +1 if there is no active core in specified macro-cluster. 706 ****************************************************************************************** 707 * @ base_cxy : [in] any cluster identifier in macro-cluster. 708 * @ level : [in] macro-cluster level in [1,2,3,4,5]. 709 * @ cxy : [out] selected core cluster identifier. 710 * @ lid : [out] selected core local index in cluster. 711 * @ return 0 if success / -1 if illegal arguments / +1 if no core in macro-clusters. 712 *****************************************************************************************/ 713 int sys_get_best_core( uint32_t base_cxy, 714 uint32_t level, 715 uint32_t * cxy, 716 uint32_t * lid ); 717 718 /****************************************************************************************** 719 * [54] This function implements the non-standard "get_nb_cores" syscall. 720 * It writes in the <ncores> buffer the number of cores in the target cluster <cxy>. 721 ****************************************************************************************** 722 * @ cxy : [in] target cluster identifier. 723 * @ ncores : [out] number of cores / 0 if cluster cxy undefined in architecture. 724 * @ return 0 if success / return -1 if illegal "ncores" arguments. 725 *****************************************************************************************/ 726 int sys_get_nb_cores( uint32_t cxy, 727 uint32_t * ncores ); 728 698 729 #endif // _SYSCALLS_H_
Note: See TracChangeset
for help on using the changeset viewer.