Changeset 683 for trunk/kernel/libk
- Timestamp:
- Jan 13, 2021, 12:36:17 AM (4 years ago)
- Location:
- trunk/kernel/libk
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/libk/elf.c
r671 r683 161 161 { 162 162 type = VSEG_TYPE_CODE; 163 process->vmm.code_vpn_base = vbase >> CONFIG_PPM_PAGE_ SHIFT;163 process->vmm.code_vpn_base = vbase >> CONFIG_PPM_PAGE_ORDER; 164 164 } 165 165 else // found DATA segment 166 166 { 167 167 type = VSEG_TYPE_DATA; 168 process->vmm.data_vpn_base = vbase >> CONFIG_PPM_PAGE_ SHIFT;168 process->vmm.data_vpn_base = vbase >> CONFIG_PPM_PAGE_ORDER; 169 169 } 170 170 … … 215 215 { 216 216 uint32_t new_offset; // unused, required by vfs_lseek() 217 kmem_req_t req; // kmem request for program header218 217 Elf_Ehdr header; // local buffer for .elf header 219 218 void * segs_base; // pointer on buffer for segment descriptors array … … 278 277 279 278 // allocate memory for segment descriptors array 280 req.type = KMEM_KCM; 281 req.order = bits_log2(segs_size); 282 req.flags = AF_KERNEL; 283 segs_base = kmem_alloc( &req ); 279 segs_base = kmem_alloc( bits_log2(segs_size) , AF_NONE ); 284 280 285 281 if( segs_base == NULL ) … … 295 291 { 296 292 printk("\n[ERROR] in %s : cannot seek for descriptors array\n", __FUNCTION__ ); 297 req.ptr = segs_base; 298 kmem_free( &req ); 293 kmem_free( segs_base , bits_log2(segs_size) ); 299 294 return -1; 300 295 } … … 314 309 { 315 310 printk("\n[ERROR] in %s : cannot read segments descriptors\n", __FUNCTION__ ); 316 req.ptr = segs_base; 317 kmem_free( &req ); 311 kmem_free( segs_base , bits_log2(segs_size) ); 318 312 return -1; 319 313 } … … 331 325 if( error ) 332 326 { 333 req.ptr = segs_base;334 kmem_free( &req);327 printk("\n[ERROR] in %s : cannot register segments descriptors\n", __FUNCTION__ ); 328 kmem_free( segs_base , bits_log2(segs_size) ); 335 329 return -1; 336 330 } … … 343 337 344 338 // release allocated memory for program header 345 req.ptr = segs_base; 346 kmem_free(&req); 339 kmem_free( segs_base , bits_log2(segs_size) ); 347 340 348 341 #if DEBUG_ELF_LOAD -
trunk/kernel/libk/grdxt.c
r671 r683 40 40 uint32_t ix3_width ) 41 41 { 42 43 assert( __FUNCTION__, (rt != NULL), 44 "pointer on radix tree is NULL\n" ); 45 42 46 void ** root; 43 kmem_req_t req;44 47 45 48 rt->ix1_width = ix1_width; … … 48 51 49 52 // allocates first level array 50 req.type = KMEM_KCM; 51 req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 52 req.flags = AF_KERNEL | AF_ZERO; 53 root = kmem_alloc( &req ); 53 uint32_t order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 54 root = kmem_alloc( order , AF_ZERO ); 54 55 55 56 if( root == NULL ) … … 68 69 void grdxt_destroy( grdxt_t * rt ) 69 70 { 70 kmem_req_t req; 71 72 assert( __FUNCTION__, (rt != NULL), 73 "pointer on radix tree is NULL\n" ); 74 75 uint32_t order; 71 76 72 77 uint32_t w1 = rt->ix1_width; … … 81 86 uint32_t ix2; 82 87 uint32_t ix3; 83 84 assert( __FUNCTION__, (rt != NULL) , "pointer on radix tree is NULL\n" );85 88 86 89 for( ix1=0 ; ix1 < (uint32_t)(1 << w1) ; ix1++ ) … … 106 109 107 110 // release level 3 array 108 req.type = KMEM_KCM; 109 req.ptr = ptr3; 110 kmem_free( &req ); 111 order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 ); 112 kmem_free( ptr3 , order ); 111 113 } 112 114 113 115 // release level 2 array 114 req.type = KMEM_KCM; 115 req.ptr = ptr2; 116 kmem_free( &req ); 116 order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 ); 117 kmem_free( ptr2 , order ); 117 118 } 118 119 119 120 // release level 1 array 120 req.type = KMEM_KCM; 121 req.ptr = ptr1; 122 kmem_free( &req ); 121 order = w1 + ( (sizeof(void*) == 4) ? 2 : 3 ); 122 kmem_free( ptr1 , order ); 123 123 124 124 } // end grdxt_destroy() … … 129 129 void * value ) 130 130 { 131 kmem_req_t req;131 uint32_t order; 132 132 133 133 uint32_t w1 = rt->ix1_width; … … 136 136 137 137 // Check key value 138 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 138 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), 139 "illegal key value %x\n", key ); 139 140 140 141 // compute indexes … … 155 156 { 156 157 // allocate memory for level 2 array 157 req.type = KMEM_KCM; 158 req.order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 ); 159 req.flags = AF_KERNEL | AF_ZERO; 160 ptr2 = kmem_alloc( &req ); 158 order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 ); 159 ptr2 = kmem_alloc( order , AF_ZERO ); 161 160 162 161 if( ptr2 == NULL) return -1; … … 173 172 { 174 173 // allocate memory for level 3 array 175 req.type = KMEM_KCM; 176 req.order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 ); 177 req.flags = AF_KERNEL | AF_ZERO; 178 ptr3 = kmem_alloc( &req ); 174 order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 ); 175 ptr3 = kmem_alloc( order , AF_ZERO ); 179 176 180 177 if( ptr3 == NULL) return -1; … … 202 199 203 200 // Check key value 204 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 201 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), 202 "illegal key value %x\n", key ); 205 203 206 204 // compute indexes … … 244 242 245 243 // Check key value 246 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 244 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), 245 "illegal key value %x\n", key ); 247 246 248 247 void ** ptr1 = rt->root; … … 284 283 285 284 // Check key value 286 assert( __FUNCTION__, ((start_key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", start_key ); 285 assert( __FUNCTION__, ((start_key >> (w1 + w2 + w3)) == 0 ), 286 "illegal key value %x\n", start_key ); 287 287 288 288 // compute max indexes … … 338 338 uint32_t ix3_width ) 339 339 { 340 341 assert( __FUNCTION__, (rt_xp != XPTR_NULL), 342 "extended pointer on radix tree is NULL\n" ); 343 340 344 void ** root; 341 kmem_req_t req;342 345 343 346 // get cluster and local pointer … … 351 354 352 355 // allocates first level array 353 req.type = KMEM_KCM; 354 req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 355 req.flags = AF_KERNEL | AF_ZERO; 356 root = kmem_remote_alloc( rt_cxy , &req ); 356 uint32_t order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 357 root = kmem_remote_alloc( rt_cxy , order , AF_ZERO ); 357 358 358 359 if( root == NULL ) … … 372 373 void grdxt_remote_destroy( xptr_t rt_xp ) 373 374 { 374 kmem_req_t req; 375 376 assert( __FUNCTION__, (rt_xp != XPTR_NULL), 377 "extended pointer on radix tree is NULL\n" ); 378 379 uint32_t order; 375 380 376 381 uint32_t w1; … … 422 427 423 428 // release level 3 array 424 req.type = KMEM_KCM; 425 req.ptr = ptr3; 426 kmem_remote_free( rt_cxy , &req ); 429 order = w3 + ((sizeof(void*) == 4) ? 2 : 3 ); 430 kmem_remote_free( rt_cxy , ptr3 , order ); 427 431 } 428 432 429 433 // release level 2 array 430 req.type = KMEM_KCM; 431 req.ptr = ptr2; 432 kmem_remote_free( rt_cxy , &req ); 434 order = w2 + ((sizeof(void*) == 4) ? 2 : 3 ); 435 kmem_remote_free( rt_cxy , ptr2 , order ); 433 436 } 434 437 435 438 // release level 1 array 436 req.type = KMEM_KCM; 437 req.ptr = ptr1; 438 kmem_remote_free( rt_cxy , &req ); 439 order = w1 + ((sizeof(void*) == 4) ? 2 : 3 ); 440 kmem_remote_free( rt_cxy , ptr1 , order ); 439 441 440 442 } // end grdxt_remote_destroy() … … 445 447 void * value ) 446 448 { 447 kmem_req_t req;449 uint32_t order; 448 450 449 451 // get cluster and local pointer on remote rt descriptor … … 507 509 { 508 510 // allocate memory in remote cluster 509 req.type = KMEM_KCM; 510 req.order = w2 + ((sizeof(void*) == 4) ? 2 : 3 ); 511 req.flags = AF_ZERO | AF_KERNEL; 512 ptr2 = kmem_remote_alloc( rt_cxy , &req ); 511 order = w2 + ((sizeof(void*) == 4) ? 2 : 3 ); 512 ptr2 = kmem_remote_alloc( rt_cxy , order , AF_ZERO ); 513 513 514 514 if( ptr2 == NULL ) return -1; … … 538 538 { 539 539 // allocate memory in remote cluster 540 req.type = KMEM_KCM; 541 req.order = w3 + ((sizeof(void*) == 4) ? 2 : 3 ); 542 req.flags = AF_ZERO | AF_KERNEL; 543 ptr3 = kmem_remote_alloc( rt_cxy , &req ); 540 order = w3 + ((sizeof(void*) == 4) ? 2 : 3 ); 541 ptr3 = kmem_remote_alloc( rt_cxy , order , AF_ZERO ); 544 542 545 543 if( ptr3 == NULL ) return -1; -
trunk/kernel/libk/remote_barrier.c
r671 r683 2 2 * remote_barrier.c - POSIX barrier implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 84 84 { 85 85 generic_barrier_t * gen_barrier_ptr; // local pointer on generic barrier descriptor 86 void * barrier; // local pointer on implementation barrier descriptor 87 kmem_req_t req; // kmem request 86 void * barrier; // local pointer on impl barrier descriptor 88 87 89 88 // get pointer on local process_descriptor … … 96 95 97 96 // allocate memory for generic barrier descriptor 98 req.type = KMEM_KCM; 99 req.order = bits_log2( sizeof(generic_barrier_t) ); 100 req.flags = AF_ZERO | AF_KERNEL; 101 gen_barrier_ptr = kmem_remote_alloc( ref_cxy , &req ); 102 97 gen_barrier_ptr = kmem_remote_alloc( ref_cxy, 98 bits_log2(sizeof(generic_barrier_t)), 99 AF_KERNEL ); 103 100 if( gen_barrier_ptr == NULL ) 104 101 { … … 108 105 109 106 // create implementation specific barrier descriptor 110 if( attr == NULL ) // simple barrier implementation107 if( attr == NULL ) // simple barrier 111 108 { 112 109 // create simple barrier descriptor 113 110 barrier = simple_barrier_create( count ); 114 115 if( barrier == NULL ) return -1; 116 } 117 else // QDT barrier implementation 111 } 112 else // QDT barrier 118 113 { 119 114 uint32_t x_size = attr->x_size; … … 126 121 printk("\n[ERROR] in %s : count(%d) != x_size(%d) * y_size(%d) * nthreads(%d)\n", 127 122 __FUNCTION__, count, x_size, y_size, nthreads ); 123 kmem_remote_free( ref_cxy, 124 gen_barrier_ptr, 125 bits_log2(sizeof(generic_barrier_t)) ); 128 126 return -1; 129 127 } … … 131 129 // create DQT barrier descriptor 132 130 barrier = dqt_barrier_create( x_size , y_size , nthreads ); 133 134 if( barrier == NULL ) return -1; 131 } 132 133 if( barrier == NULL ) 134 { 135 printk("\n[ERROR] in %s : cannot create impl barrier\n", __FUNCTION__ ); 136 kmem_remote_free( ref_cxy, 137 gen_barrier_ptr, 138 bits_log2(sizeof(generic_barrier_t)) ); 139 return -1; 135 140 } 136 141 … … 157 162 void generic_barrier_destroy( xptr_t gen_barrier_xp ) 158 163 { 159 kmem_req_t req; // kmem request160 161 164 // get pointer on local process_descriptor 162 165 process_t * process = CURRENT_THREAD->process; … … 191 194 remote_busylock_release( lock_xp ); 192 195 193 // release memory allocated to barrier descriptor194 req.type = KMEM_KCM;195 req.ptr = gen_barrier_ptr;196 kmem_remote_free( ref_cxy , &req);196 // release memory allocated to generic barrier descriptor 197 kmem_remote_free( gen_barrier_cxy, 198 gen_barrier_ptr, 199 bits_log2(sizeof(generic_barrier_t)) ); 197 200 198 201 } // end generic_barrier_destroy() … … 246 249 simple_barrier_t * simple_barrier_create( uint32_t count ) 247 250 { 248 kmem_req_t req;249 251 simple_barrier_t * barrier; 250 252 … … 258 260 259 261 // allocate memory for simple barrier descriptor 260 req.type = KMEM_KCM; 261 req.order = bits_log2( sizeof(simple_barrier_t) ); 262 req.flags = AF_ZERO | AF_KERNEL; 263 barrier = kmem_remote_alloc( ref_cxy , &req ); 264 262 barrier = kmem_remote_alloc( ref_cxy, 263 bits_log2(sizeof(simple_barrier_t)), 264 AF_ZERO ); 265 265 if( barrier == NULL ) 266 266 { … … 291 291 void simple_barrier_destroy( xptr_t barrier_xp ) 292 292 { 293 kmem_req_t req;294 295 293 // get barrier cluster and local pointer 296 294 cxy_t barrier_cxy = GET_CXY( barrier_xp ); … … 298 296 299 297 // release memory allocated for barrier descriptor 300 req.type = KMEM_KCM;301 req.ptr = barrier_ptr;302 kmem_remote_free( barrier_cxy , &req);298 kmem_remote_free( barrier_cxy, 299 barrier_ptr, 300 bits_log2(sizeof(simple_barrier_t)) ); 303 301 304 302 #if DEBUG_BARRIER_DESTROY … … 471 469 uint32_t y; // Y coordinate in QDT mesh 472 470 uint32_t l; // level coordinate 473 kmem_req_t req; // kmem request474 471 475 472 // compute number of DQT levels, depending on the mesh size … … 478 475 479 476 // check x_size and y_size arguments 480 assert( __FUNCTION__, (z <= 16) , "DQT mesh size larger than (16*16)\n"); 477 assert( __FUNCTION__, (z <= 16), 478 "DQT mesh size larger than (16*16)\n"); 481 479 482 480 // check size of an array of 5 DQT nodes 483 assert( __FUNCTION__, (sizeof(dqt_node_t) * 5 <= 512 ), "array of DQT nodes larger than 512 bytes\n"); 481 assert( __FUNCTION__, (sizeof(dqt_node_t) * 5 <= 512 ), 482 "array of DQT nodes larger than 512 bytes\n"); 484 483 485 484 // check size of DQT barrier descriptor 486 assert( __FUNCTION__, (sizeof(dqt_barrier_t) <= 0x4000 ), "DQT barrier descriptor larger than 4 pages\n"); 485 assert( __FUNCTION__, (sizeof(dqt_barrier_t) <= 0x4000 ), 486 "DQT barrier descriptor larger than 4 pages\n"); 487 487 488 488 // get pointer on client thread and process descriptors … … 502 502 503 503 // 1. allocate 4 small pages for the DQT barrier descriptor in reference cluster 504 req.type = KMEM_PPM; 505 req.order = 2; // 4 small pages == 16 Kbytes 506 req.flags = AF_ZERO | AF_KERNEL; 507 barrier = kmem_remote_alloc( ref_cxy , &req ); 508 504 barrier = kmem_remote_alloc( ref_cxy, 505 CONFIG_PPM_PAGE_ORDER + 2, // 4 small pages 506 AF_ZERO ); 509 507 if( barrier == NULL ) 510 508 { … … 536 534 { 537 535 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier 538 xptr_t local_array_xp; // xptr o fnodes array in cluster cxy536 xptr_t local_array_xp; // xptr on nodes array in cluster cxy 539 537 540 538 // allocate memory in existing clusters only 541 539 if( LOCAL_CLUSTER->cluster_info[x][y] ) 542 540 { 543 req.type = KMEM_KCM; 544 req.order = 9; // 512 bytes 545 req.flags = AF_ZERO | AF_KERNEL; 546 547 void * ptr = kmem_remote_alloc( cxy , &req ); 541 void * ptr = kmem_remote_alloc( cxy , 9 , AF_ZERO ); // 512 bytes 548 542 549 543 if( ptr == NULL ) … … 729 723 void dqt_barrier_destroy( xptr_t barrier_xp ) 730 724 { 731 kmem_req_t req; // kmem request732 725 uint32_t x; 733 726 uint32_t y; 734 735 727 736 728 // get DQT barrier descriptor cluster and local pointer … … 767 759 void * buf = GET_PTR( buf_xp ); 768 760 769 assert( __FUNCTION__, (cxy == GET_CXY(buf_xp)) , "bad extended pointer on dqt_nodes array\n" ); 770 771 req.type = KMEM_KCM; 772 req.ptr = buf; 773 kmem_remote_free( cxy , &req ); 761 kmem_remote_free( cxy , buf , 9 ); // 512 bytes 774 762 775 763 #if DEBUG_BARRIER_DESTROY … … 785 773 786 774 // 2. release memory allocated for barrier descriptor in ref cluster 787 req.type = KMEM_PPM;788 req.ptr = barrier_ptr;789 kmem_remote_free( barrier_cxy , &req );775 kmem_remote_free( barrier_cxy, 776 barrier_ptr, 777 CONFIG_PPM_PAGE_ORDER + 2 ); // 4 small pages 790 778 791 779 #if DEBUG_BARRIER_DESTROY -
trunk/kernel/libk/remote_buf.c
r671 r683 34 34 remote_buf_t * remote_buf_alloc( cxy_t cxy ) 35 35 { 36 kmem_req_t req; 37 38 req.type = KMEM_KCM; 39 req.order = bits_log2( sizeof(remote_buf_t) ); 40 req.flags = AF_ZERO; 41 return kmem_remote_alloc( cxy , &req ); 36 return kmem_remote_alloc( cxy, 37 bits_log2(sizeof(remote_buf_t)), 38 AF_ZERO ); 42 39 } 43 40 … … 50 47 assert( __FUNCTION__ , (order < 32) , "order cannot be larger than 31" ); 51 48 52 kmem_req_t req;53 49 uint8_t * data; 54 50 … … 57 53 58 54 // allocate the data buffer 59 if( order >= CONFIG_PPM_PAGE_SHIFT ) // use KMEM_PPM 60 { 61 req.type = KMEM_PPM; 62 req.order = order - CONFIG_PPM_PAGE_SHIFT; 63 req.flags = AF_NONE; 64 data = kmem_remote_alloc( buf_cxy , &req ); 65 66 if( data == NULL ) return -1; 67 } 68 else // use KMEM_KCM 69 { 70 req.type = KMEM_KCM; 71 req.order = order; 72 req.flags = AF_NONE; 73 data = kmem_remote_alloc( buf_cxy , &req ); 74 75 if( data == NULL ) return -1; 76 } 55 data = kmem_remote_alloc( buf_cxy , order , AF_NONE ); 56 57 if( data == NULL ) return -1; 77 58 78 59 // initialize buffer descriptor … … 90 71 void remote_buf_release_data( xptr_t buf_xp ) 91 72 { 92 kmem_req_t req;93 73 94 74 assert( __FUNCTION__ , (buf_xp != XPTR_NULL) , "buf_xp cannot be NULL" ); … … 102 82 103 83 // release memory allocated for data buffer if required 104 if( data_ptr != NULL ) 105 { 106 if( order >= CONFIG_PPM_PAGE_SHIFT ) // use KMEM_PPM 107 { 108 req.type = KMEM_PPM; 109 req.ptr = data_ptr; 110 kmem_remote_free( buf_cxy , &req ); 111 } 112 else // use KMEM_KCM 113 { 114 req.type = KMEM_KCM; 115 req.ptr = data_ptr; 116 kmem_remote_free( buf_cxy , &req ); 117 } 118 } 84 if( data_ptr != NULL ) kmem_remote_free( buf_cxy , data_ptr , order ); 85 119 86 } // end remote_buf_release_data() 120 87 … … 125 92 assert( __FUNCTION__ , (buf_xp != XPTR_NULL) , "buf_xp cannot be NULL" ); 126 93 127 kmem_req_t req;128 129 94 remote_buf_t * buf_ptr = GET_PTR( buf_xp ); 130 95 cxy_t buf_cxy = GET_CXY( buf_xp ); … … 134 99 135 100 // release remote_buf descriptor 136 req.type = KMEM_KCM; 137 req.ptr = buf_ptr; 138 kmem_remote_free( buf_cxy , &req ); 101 kmem_remote_free( buf_cxy , buf_ptr , bits_log2(sizeof(remote_buf_t)) ); 139 102 140 103 } // end remote_buf_destroy() … … 404 367 } // end remote_buf_status() 405 368 406 369 /////////////////////////////////////////////// 370 void remote_buf_display( const char * func_str, 371 xptr_t buf_xp, 372 uint32_t nbytes, 373 uint32_t offset ) 374 { 375 if( nbytes > 256 ) 376 { 377 printk("\n[WARNING] in %s : no more than 256 bytes\n", __FUNCTION__ ); 378 nbytes = 256; 379 } 380 381 uint8_t string[128]; // for header 382 uint8_t local_data[256]; // local data buffer 383 384 cxy_t cxy = GET_CXY( buf_xp ); 385 remote_buf_t * ptr = GET_PTR( buf_xp ); 386 387 uint32_t order = hal_remote_l32( XPTR( cxy , &ptr->order )); 388 uint32_t rid = hal_remote_l32( XPTR( cxy , &ptr->rid )); 389 uint32_t wid = hal_remote_l32( XPTR( cxy , &ptr->wid )); 390 uint32_t sts = hal_remote_l32( XPTR( cxy , &ptr->sts )); 391 uint8_t * data = hal_remote_lpt( XPTR( cxy , &ptr->data )); 392 393 // make a local copy of data buffer 394 hal_remote_memcpy( XPTR( local_cxy , local_data ), 395 XPTR( cxy , data + offset ), 396 nbytes ); 397 398 // build header 399 snprintk( (char*)string , 128 , 400 "in %s remote buffer [%x,%x] : size %d / rid %d / wid %d / sts %d ", 401 func_str , cxy , ptr , 1<<order , rid , wid , sts ); 402 403 // display buffer on TXT0 404 putb( (char*)string , local_data , nbytes ); 405 406 } // end remote_buf_display() -
trunk/kernel/libk/remote_buf.h
r671 r683 176 176 uint32_t remote_buf_status( xptr_t buf_xp ); 177 177 178 /************************************************************************************ 179 * This debug function displays on the kernel terminal the current state of a remote 180 * buffer identified by the <buf_xp> argument : order / rid / wid / sts. 181 * If the <nbytes> argument is not nul, and not larger than 256, it displays up to 182 * 256 bytes of the data buffer, from <offset> to (offset + nbytes -1). 183 ************************************************************************************ 184 * @ func_str : [in] calling function name (displayed in header). 185 * @ buf_xp : [in] extended pointer pointer on remote buffer descriptor. 186 * @ nbytes : [in] number of data bytes to display. 187 * @ offset : [in] index of first displayed byte in data buffer. 188 ***********************************************************************************/ 189 void remote_buf_display( const char * func_str, 190 xptr_t buf_xp, 191 uint32_t nbytes, 192 uint32_t offset ); 193 178 194 #endif /* _REMOTE_BUFFER_H_ */ -
trunk/kernel/libk/remote_condvar.c
r635 r683 2 2 * remote_condvar.c - remote kernel condition variable implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 86 86 { 87 87 remote_condvar_t * condvar_ptr; 88 kmem_req_t req;89 88 90 89 // get pointer on local process descriptor … … 98 97 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 99 98 100 req.type = KMEM_KCM; 101 req.order = bits_log2( sizeof(remote_condvar_t) ); 102 req.flags = AF_ZERO | AF_KERNEL; 103 condvar_ptr = kmem_alloc( &req ); 99 // allocate memory for condvar descriptor 100 condvar_ptr = kmem_alloc( bits_log2(sizeof(remote_condvar_t)) , AF_ZERO ); 104 101 105 102 if( condvar_ptr == NULL ) … … 130 127 void remote_condvar_destroy( xptr_t condvar_xp ) 131 128 { 132 kmem_req_t req;133 134 129 // get pointer on local process descriptor 135 130 process_t * process = CURRENT_THREAD->process; … … 162 157 163 158 // release memory allocated for condvar descriptor 164 req.type = KMEM_KCM; 165 req.ptr = condvar_ptr; 166 kmem_remote_free( ref_cxy , &req ); 159 kmem_remote_free( ref_cxy , condvar_ptr , bits_log2(sizeof(remote_condvar_t)) ); 167 160 168 161 } // end remote_convar_destroy() -
trunk/kernel/libk/remote_condvar.h
r635 r683 2 2 * remote_condvar.h: POSIX condition variable definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/libk/remote_fifo.c
r657 r683 42 42 fifo->wr_id = 0; 43 43 fifo->rd_id = 0; 44 for( slot = 0 ; slot < CONFIG_R EMOTE_FIFO_SLOTS ; slot++ )44 for( slot = 0 ; slot < CONFIG_RPC_FIFO_SLOTS ; slot++ ) 45 45 { 46 46 fifo->valid[slot] = 0; … … 69 69 70 70 // wait until allocated slot is empty in remote FIFO 71 // max retry = CONFIG_R EMOTE_FIFO_MAX_ITERATIONS71 // max retry = CONFIG_RPC_FIFO_MAX_ITERATIONS 72 72 // return error if watchdog is reached 73 73 while( 1 ) 74 74 { 75 75 // return error if contention detected by watchdog 76 if( watchdog > CONFIG_R EMOTE_FIFO_MAX_ITERATIONS ) return EBUSY;76 if( watchdog > CONFIG_RPC_FIFO_MAX_ITERATIONS ) return EBUSY; 77 77 78 78 // read remote rd_id value … … 84 84 85 85 // exit waiting loop as soon as fifo not full 86 if ( nslots < CONFIG_R EMOTE_FIFO_SLOTS ) break;86 if ( nslots < CONFIG_RPC_FIFO_SLOTS ) break; 87 87 88 88 // retry later if fifo full: … … 97 97 98 98 // compute actual write slot pointer 99 ptw = wr_id % CONFIG_R EMOTE_FIFO_SLOTS;99 ptw = wr_id % CONFIG_RPC_FIFO_SLOTS; 100 100 101 101 // copy item to fifo … … 123 123 124 124 // compute actual read slot pointer 125 uint32_t ptr = rd_id % CONFIG_R EMOTE_FIFO_SLOTS;125 uint32_t ptr = rd_id % CONFIG_RPC_FIFO_SLOTS; 126 126 127 127 // wait slot filled by the writer … … 158 158 else nslots = (0xFFFFFFFF - rd_id) + wr_id; 159 159 160 return ( nslots >= CONFIG_R EMOTE_FIFO_SLOTS );160 return ( nslots >= CONFIG_RPC_FIFO_SLOTS ); 161 161 } 162 162 -
trunk/kernel/libk/remote_fifo.h
r563 r683 36 36 * that is used for - RPC based - inter cluster communications. 37 37 * Each FIF0 slot can contain one 64 bits integer (or one extended pointer). 38 * The number of slots is defined by the CONFIG_R EMOTE_FIFO_SLOTS parameter.38 * The number of slots is defined by the CONFIG_RPC_FIFO_SLOTS parameter. 39 39 * - The write accesses are implemented using a lock-free algorithm, as it uses 40 40 * a ticket based mechanism to handle concurrent access between multiple writers. … … 45 45 * and RPC threads cannot have local index LTID = 0. 46 46 * 47 * WARNING : Each FIFO requires 12 + (12 * CONFIG_R EMOTE_FIFO_SLOTS) bytes.47 * WARNING : Each FIFO requires 12 + (12 * CONFIG_RPC_FIFO_SLOTS) bytes. 48 48 ***********************************************************************************/ 49 49 … … 53 53 volatile uint32_t wr_id; /*! write slot index */ 54 54 volatile uint32_t rd_id; /*! read slot index */ 55 volatile uint32_t valid[CONFIG_R EMOTE_FIFO_SLOTS]; /*! empty slot if 0 */56 uint64_t data[CONFIG_R EMOTE_FIFO_SLOTS]; /*! fifo slot content */55 volatile uint32_t valid[CONFIG_RPC_FIFO_SLOTS]; /*! empty slot if 0 */ 56 uint64_t data[CONFIG_RPC_FIFO_SLOTS]; /*! fifo slot content */ 57 57 } 58 58 remote_fifo_t; … … 84 84 * the slot is empty, using a descheduling policy without blocking if required. 85 85 * It implements a watchdog, returning when the item has been successfully 86 * registered, or after CONFIG_R EMOTE_FIFO_MAX_ITERATIONS failures.86 * registered, or after CONFIG_RPC_FIFO_MAX_ITERATIONS failures. 87 87 ************************************************************************************ 88 88 * @ fifo : extended pointer to the remote fifo. -
trunk/kernel/libk/remote_mutex.c
r635 r683 2 2 * remote_mutex.c - POSIX mutex implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020:) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 85 85 { 86 86 remote_mutex_t * mutex_ptr; 87 kmem_req_t req;88 87 89 88 // get pointer on local process descriptor … … 98 97 99 98 // allocate memory for mutex descriptor in reference cluster 100 req.type = KMEM_KCM; 101 req.order = bits_log2( sizeof(remote_mutex_t) ); 102 req.flags = AF_ZERO | AF_KERNEL; 103 mutex_ptr = kmem_remote_alloc( ref_cxy , &req ); 99 mutex_ptr = kmem_remote_alloc( ref_cxy , bits_log2(sizeof(remote_mutex_t)) , AF_ZERO ); 104 100 105 101 if( mutex_ptr == NULL ) … … 145 141 void remote_mutex_destroy( xptr_t mutex_xp ) 146 142 { 147 kmem_req_t req;148 149 143 // get pointer on local process descriptor 150 144 process_t * process = CURRENT_THREAD->process; … … 171 165 172 166 // release memory allocated for mutex descriptor 173 req.type = KMEM_KCM; 174 req.ptr = mutex_ptr; 175 kmem_remote_free( mutex_cxy , &req ); 167 kmem_remote_free( mutex_cxy , mutex_ptr , bits_log2(sizeof(remote_mutex_t)) ); 176 168 177 169 } // end remote_mutex_destroy() -
trunk/kernel/libk/remote_sem.c
r671 r683 2 2 * remote_sem.c - POSIX unnamed semaphore implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 86 86 uint32_t value ) 87 87 { 88 kmem_req_t req;89 88 remote_sem_t * sem_ptr; 90 89 … … 100 99 101 100 // allocate memory for new semaphore in reference cluster 102 req.type = KMEM_KCM; 103 req.order = bits_log2( sizeof(remote_sem_t) ); 104 req.flags = AF_ZERO | AF_KERNEL; 105 sem_ptr = kmem_remote_alloc( ref_cxy, &req ); 101 sem_ptr = kmem_remote_alloc( ref_cxy , bits_log2(sizeof(remote_sem_t)) , AF_ZERO ); 106 102 107 103 if( sem_ptr == NULL ) … … 144 140 void remote_sem_destroy( xptr_t sem_xp ) 145 141 { 146 kmem_req_t req;147 148 142 // get pointer on local process descriptor 149 143 process_t * process = CURRENT_THREAD->process; … … 176 170 177 171 // release memory allocated for semaphore descriptor 178 req.type = KMEM_KCM; 179 req.ptr = sem_ptr; 180 kmem_remote_free( sem_cxy , &req ); 172 kmem_remote_free( sem_cxy , sem_ptr , bits_log2(sizeof(remote_sem_t)) ); 181 173 182 174 } // end remote_sem_destroy() -
trunk/kernel/libk/remote_sem.h
r581 r683 2 2 * remote_sem.h - POSIX unnamed semaphore definition. 3 3 * 4 * Author Alain Greiner (2016,2017,2018)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/libk/user_dir.c
r671 r683 2 2 * user_dir.c - kernel DIR related operations implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 105 105 list_entry_t root; // root of temporary list of allocated pages 106 106 uint32_t page_id; // page index in list of physical pages 107 kmem_req_t req; // kmem request descriptor108 107 ppn_t fake_ppn; // unused, but required by hal_gptlock_pte() 109 108 uint32_t fake_attr; // unused, but required by hal_gptlock_pte() 110 109 error_t error; 110 111 #if DEBUG_USER_DIR_CREATE || DEBUG_USER_DIR_ERROR 112 uint32_t cycle = (uint32_t)hal_get_cycles(); 113 thread_t * this = CURRENT_THREAD; 114 #endif 111 115 112 116 // get cluster, local pointer, and pid of reference process … … 115 119 ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) ); 116 120 117 #if DEBUG_USER_DIR 118 uint32_t cycle = (uint32_t)hal_get_cycles(); 119 thread_t * this = CURRENT_THREAD; 120 if( cycle > DEBUG_USER_DIR ) 121 #if DEBUG_USER_DIR_CREATE 122 if( DEBUG_USER_DIR_CREATE < cycle ) 121 123 printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) and process %x / cycle %d\n", 122 124 __FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, ref_pid, cycle ); … … 133 135 134 136 // allocate memory for a local user_dir descriptor 135 req.type = KMEM_KCM; 136 req.order = bits_log2( sizeof(user_dir_t) ); 137 req.flags = AF_ZERO | AF_KERNEL; 138 dir = kmem_alloc( &req ); 137 dir = kmem_alloc( bits_log2(sizeof(user_dir_t)) , AF_ZERO ); 139 138 140 139 if( dir == NULL ) 141 140 { 142 printk("\n[ERROR] in %s : cannot allocate user_dir_t in cluster %x\n", 143 __FUNCTION__, local_cxy ); 141 142 #if DEBUG_USER_DIR_ERROR 143 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate user_dir_t in cluster %x / cycle %d\n", 144 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); 145 #endif 144 146 return NULL; 145 147 } 146 148 147 // Build and initialize the dirent array as a list of pages.148 // For each iteration in this while loop:149 // First loop to build and initialize the dirent array 150 // as a temporary list of pages. For each iteration : 149 151 // - allocate one physical 4 Kbytes (64 dirent slots) 150 152 // - call the relevant FS specific function to scan the directory mapper, … … 162 164 { 163 165 // allocate one physical page 164 req.type = KMEM_PPM; 165 req.order = 0; 166 req.flags = AF_ZERO; 167 base = kmem_alloc( &req ); 166 base = kmem_alloc( CONFIG_PPM_PAGE_ORDER , AF_ZERO ); 168 167 169 168 if( base == NULL ) 170 169 { 171 printk("\n[ERROR] in %s : cannot allocate page in cluster %x\n", 172 __FUNCTION__, ref_cxy ); 170 171 #if DEBUG_USER_DIR_ERROR 172 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate page in cluster %x / cycle %d\n", 173 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); 174 #endif 173 175 goto user_dir_create_failure; 174 176 } … … 184 186 if( error ) 185 187 { 186 printk("\n[ERROR] in %s : cannot initialise dirent array in cluster %x\n", 187 __FUNCTION__, ref_cxy ); 188 189 #if DEBUG_USER_DIR_ERROR 190 printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize dirent array in cluster %x / cycle %d\n", 191 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); 192 #endif 188 193 goto user_dir_create_failure; 189 194 } … … 204 209 } // end while 205 210 206 #if DEBUG_USER_DIR 207 if( cycle > DEBUG_USER_DIR)211 #if DEBUG_USER_DIR_CREATE 212 if( DEBUG_USER_DIR_CREATE < cycle ) 208 213 printk("\n[%s] thread[%x,%x] initialised dirent array / %d entries\n", 209 214 __FUNCTION__, this->process->pid, this->trdid, total_dirents, cycle ); … … 241 246 if( vseg == NULL ) 242 247 { 243 printk("\n[ERROR] in %s : cannot create vseg for user_dir in cluster %x\n", 244 __FUNCTION__, ref_cxy); 248 249 #if DEBUG_USER_DIR_ERROR 250 printk("\n[ERROR] in %s : thread[%x,%x] cannot create vseg in cluster %x / cycle %d\n", 251 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); 252 #endif 245 253 goto user_dir_create_failure; 246 254 } 247 255 248 #if DEBUG_USER_DIR 249 if( cycle > DEBUG_USER_DIR)256 #if DEBUG_USER_DIR_CREATE 257 if( DEBUG_USER_DIR_CREATE < cycle ) 250 258 printk("\n[%s] thread[%x,%x] allocated vseg ANON / base %x / size %x\n", 251 259 __FUNCTION__, this->process->pid, this->trdid, vseg->min, vseg->max - vseg->min ); … … 269 277 vpn_base = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) ); 270 278 271 // scan the list ofallocated physical pages to map279 // Second loop on the allocated physical pages to map 272 280 // all physical pages in the reference process GPT 281 // The pages are mapped in the user process GPT, but 282 // are removed from the temporary list 283 273 284 page_id = 0; 285 274 286 while( list_is_empty( &root ) == false ) 275 287 { … … 290 302 if( error ) 291 303 { 292 printk("\n[ERROR] in %s : cannot map vpn %x in GPT\n", 293 __FUNCTION__, vpn ); 294 304 305 #if DEBUG_USER_DIR_ERROR 306 printk("\n[ERROR] in %s : thread[%x,%x] cannot map vpn %x in cluster %x / cycle %d\n", 307 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, cycle ); 308 #endif 295 309 // delete the vseg 296 310 intptr_t base = (intptr_t)hal_remote_lpt( XPTR( ref_cxy , &vseg->min ) ); … … 298 312 299 313 // release the user_dir descriptor 300 req.type = KMEM_KCM; 301 req.ptr = dir; 302 kmem_free( &req ); 314 kmem_free( dir , bits_log2(sizeof(user_dir_t)) ); 303 315 return NULL; 304 316 } … … 310 322 ppn ); 311 323 312 #if DEBUG_USER_DIR 313 if( cycle > DEBUG_USER_DIR)324 #if DEBUG_USER_DIR_CREATE 325 if( DEBUG_USER_DIR_CREATE < cycle ) 314 326 printk("\n[%s] thread[%x,%x] mapped vpn %x to ppn %x\n", 315 327 __FUNCTION__, this->process->pid, this->trdid, vpn + page_id, ppn ); … … 329 341 dir->current = 0; 330 342 dir->entries = total_dirents; 331 dir->ident = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_ SHIFT);343 dir->ident = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_ORDER); 332 344 333 345 // build extended pointers on root and lock of user_dir xlist in ref process … … 347 359 remote_queuelock_release( lock_xp ); 348 360 349 #if DEBUG_USER_DIR 350 cycle = (uint32_t)hal_get_cycles(); 351 if( cycle > DEBUG_USER_DIR ) 361 #if DEBUG_USER_DIR_CREATE 362 if( DEBUG_USER_DIR_CREATE < cycle ) 352 363 printk("\n[%s] thread[%x,%x] created user_dir (%x,%x) / %d entries / cycle %d\n", 353 364 __FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, total_dirents, cycle ); … … 358 369 user_dir_create_failure: 359 370 360 // release local user_dir_t structure 361 req.type = KMEM_KCM; 362 req.ptr = dir; 363 kmem_free( &req ); 364 365 // release local physical pages 371 // release user_dir_t structure 372 kmem_free( dir , bits_log2(sizeof(user_dir_t)) ); 373 374 // release physical pages 366 375 while( list_is_empty( &root ) == false ) 367 376 { 377 // get page descriptor 368 378 page = LIST_FIRST( &root , page_t , list ); 369 379 … … 371 381 base = GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) ); 372 382 373 req.type = KMEM_PPM; 374 req.ptr = base; 375 kmem_free( &req ); 383 // release the page 384 kmem_free( base , CONFIG_PPM_PAGE_ORDER ); 376 385 } 377 386 … … 402 411 cluster = LOCAL_CLUSTER; 403 412 413 #if DEBUG_USER_DIR_DESTROY 414 uint32_t cycle = (uint32_t)hal_get_cycles(); 415 #endif 416 404 417 // get cluster, local pointer, and PID of reference user process 405 418 ref_cxy = GET_CXY( ref_xp ); … … 407 420 ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) ); 408 421 409 #if DEBUG_USER_DIR 410 uint32_t cycle = (uint32_t)hal_get_cycles(); 411 if( cycle > DEBUG_USER_DIR ) 422 #if DEBUG_USER_DIR_DESTROY 423 if( DEBUG_USER_DIR_DESTROY < cycle ) 412 424 printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) and process %x / cycle %d\n", 413 425 __FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, ref_pid, cycle ); … … 475 487 hal_atomic_add( &responses , 1 ); 476 488 477 #if (DEBUG_USER_DIR & 1) 478 uint32_t cycle = (uint32_t)hal_get_cycles(); 479 if( cycle > DEBUG_USER_DIR ) 489 #if (DEBUG_USER_DIR_DESTROY & 1) 490 if( DEBUG_USER_DIR_DESTROY < cycle ) 480 491 printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n", 481 492 __FUNCTION__, this->process->pid, this->trdid, process_cxy ); … … 496 507 497 508 // release local user_dir_t structure 498 kmem_req_t req; 499 req.type = KMEM_KCM; 500 req.ptr = dir; 501 kmem_free( &req ); 502 503 #if DEBUG_USER_DIR 509 kmem_free( dir , bits_log2(sizeof(user_dir_t)) ); 510 511 #if DEBUG_USER_DIR_DESTROY 504 512 cycle = (uint32_t)hal_get_cycles(); 505 if( cycle > DEBUG_USER_DIR)513 if( DEBUG_USER_DIR_DESTROY < cycle ) 506 514 printk("\n[%s] thread[%x,%x] deleted user_dir (%x,%x) / cycle %d\n", 507 515 __FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, cycle ); -
trunk/kernel/libk/user_dir.h
r651 r683 2 2 * user_dir.h - DIR related operations definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites
Note: See TracChangeset
for help on using the changeset viewer.