Changeset 625 for trunk/kernel/kern/thread.c
- Timestamp:
- Apr 10, 2019, 10:09:39 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/thread.c
r624 r625 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 29 29 #include <hal_special.h> 30 30 #include <hal_remote.h> 31 #include <hal_vmm.h> 31 32 #include <memcpy.h> 32 33 #include <printk.h> … … 96 97 97 98 ///////////////////////////////////////////////////////////////////////////////////// 98 // This static function releases the physical memory for a thread descriptor.99 // It is called by the three functions:100 // - thread_user_create()101 // - thread_user_fork()102 // - thread_kernel_create()103 /////////////////////////////////////////////////////////////////////////////////////104 // @ thread : pointer on thread descriptor.105 /////////////////////////////////////////////////////////////////////////////////////106 static void thread_release( thread_t * thread )107 {108 kmem_req_t req;109 110 xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) );111 112 req.type = KMEM_PAGE;113 req.ptr = GET_PTR( base_xp );114 kmem_free( &req );115 }116 117 /////////////////////////////////////////////////////////////////////////////////////118 99 // This static function initializes a thread descriptor (kernel or user). 119 100 // It can be called by the four functions: … … 122 103 // - thread_kernel_create() 123 104 // - thread_idle_init() 105 // The "type" and "trdid" fields must have been previously set. 124 106 // It updates the local DQDT. 125 107 ///////////////////////////////////////////////////////////////////////////////////// 126 // @ thread : pointer on local thread descriptor127 // @ process : pointer on local process descriptor.128 // @ type : thread type.129 // @ func : pointer on thread entry function.130 // @ args : pointer on thread entry function arguments.131 // @ core_lid : target core local index.132 // @ u_stack_base : stack base (user thread only)133 // @ u _stack_size : stack base(user thread only)108 // @ thread : pointer on local thread descriptor 109 // @ process : pointer on local process descriptor. 110 // @ type : thread type. 111 // @ trdid : thread identifier 112 // @ func : pointer on thread entry function. 113 // @ args : pointer on thread entry function arguments. 114 // @ core_lid : target core local index. 115 // @ user_stack_vseg : local pointer on user stack vseg (user thread only) 134 116 ///////////////////////////////////////////////////////////////////////////////////// 135 117 static error_t thread_init( thread_t * thread, 136 118 process_t * process, 137 119 thread_type_t type, 120 trdid_t trdid, 138 121 void * func, 139 122 void * args, 140 123 lid_t core_lid, 141 intptr_t u_stack_base, 142 uint32_t u_stack_size ) 143 { 144 error_t error; 145 trdid_t trdid; // allocated thread identifier 146 147 cluster_t * local_cluster = LOCAL_CLUSTER; 124 vseg_t * user_stack_vseg ) 125 { 126 127 // check type and trdid fields initialized 128 assert( (thread->type == type) , "bad type argument" ); 129 assert( (thread->trdid == trdid) , "bad trdid argument" ); 148 130 149 131 #if DEBUG_THREAD_INIT … … 152 134 if( DEBUG_THREAD_INIT < cycle ) 153 135 printk("\n[%s] thread[%x,%x] enter for thread %x in process %x / cycle %d\n", 154 __FUNCTION__, this->process->pid, this->trdid, thread , process->pid , cycle );136 __FUNCTION__, this->process->pid, this->trdid, thread->trdid, process->pid , cycle ); 155 137 #endif 156 138 … … 159 141 160 142 // Initialize new thread descriptor 161 thread->type = type;162 143 thread->quantum = 0; // TODO 163 144 thread->ticks_nr = 0; // TODO 164 145 thread->time_last_check = 0; // TODO 165 thread->core = & local_cluster->core_tbl[core_lid];146 thread->core = &LOCAL_CLUSTER->core_tbl[core_lid]; 166 147 thread->process = process; 167 168 148 thread->busylocks = 0; 169 149 … … 172 152 #endif 173 153 174 thread->u_stack_base = u_stack_base; 175 thread->u_stack_size = u_stack_size; 154 thread->user_stack_vseg = user_stack_vseg; 176 155 thread->k_stack_base = (intptr_t)thread + desc_size; 177 156 thread->k_stack_size = CONFIG_THREAD_DESC_SIZE - desc_size; 178 179 157 thread->entry_func = func; // thread entry point 180 158 thread->entry_args = args; // thread function arguments … … 185 163 thread->blocked = THREAD_BLOCKED_GLOBAL; 186 164 187 // register new thread in process descriptor, and get a TRDID188 error = process_register_thread( process, thread , &trdid );189 190 if( error )191 {192 printk("\n[ERROR] in %s : thread %x in process %x cannot get TRDID in cluster %x\n"193 " for thread %s in process %x / cycle %d\n",194 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,195 local_cxy, thread_type_str(type), process->pid, (uint32_t)hal_get_cycles() );196 return EINVAL;197 }198 199 // initialize trdid200 thread->trdid = trdid;201 202 165 // initialize sched list 203 166 list_entry_init( &thread->sched_list ); … … 237 200 } // end thread_init() 238 201 239 ////////////////////////////////////////////////// ///////202 ////////////////////////////////////////////////// 240 203 error_t thread_user_create( pid_t pid, 241 204 void * start_func, … … 246 209 error_t error; 247 210 thread_t * thread; // pointer on created thread descriptor 211 trdid_t trdid; // created thred identifier 248 212 process_t * process; // pointer to local process descriptor 249 213 lid_t core_lid; // selected core local index 250 vseg_t * vseg; //stack vseg214 vseg_t * us_vseg; // user stack vseg 251 215 252 216 assert( (attr != NULL) , "pthread attributes must be defined" ); … … 266 230 { 267 231 printk("\n[ERROR] in %s : cannot get process descriptor %x\n", 268 269 return ENOMEM;232 __FUNCTION__ , pid ); 233 return -1; 270 234 } 271 235 … … 284 248 printk("\n[ERROR] in %s : illegal core index attribute = %d\n", 285 249 __FUNCTION__ , core_lid ); 286 return EINVAL;250 return -1; 287 251 } 288 252 } … … 298 262 #endif 299 263 300 // allocate a stack from local VMM301 vseg = vmm_create_vseg( process,302 VSEG_TYPE_STACK,303 0, // size unused304 0, // length unused305 0, // file_offset unused306 0, // file_size unused307 XPTR_NULL, // mapper_xp unused308 local_cxy );309 310 if( vseg == NULL )311 {312 printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );313 return ENOMEM;314 }315 316 #if( DEBUG_THREAD_USER_CREATE & 1)317 if( DEBUG_THREAD_USER_CREATE < cycle )318 printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n",319 __FUNCTION__, vseg->vpn_base, vseg->vpn_size );320 #endif321 322 264 // allocate memory for thread descriptor 323 265 thread = thread_alloc(); … … 325 267 if( thread == NULL ) 326 268 { 327 printk("\n[ERROR] in %s : cannot create new thread \n", __FUNCTION__ );328 vmm_delete_vseg( process->pid , vseg->min);329 return ENOMEM;269 printk("\n[ERROR] in %s : cannot create new thread in cluster %x\n", 270 __FUNCTION__, local_cxy ); 271 return -1; 330 272 } 331 273 … … 336 278 #endif 337 279 280 // set type in thread descriptor 281 thread->type = THREAD_USER; 282 283 // register new thread in process descriptor, and get a TRDID 284 error = process_register_thread( process, thread , &trdid ); 285 286 if( error ) 287 { 288 printk("\n[ERROR] in %s : cannot register new thread in process %x\n", 289 __FUNCTION__, pid ); 290 thread_destroy( thread ); 291 return -1; 292 } 293 294 // set trdid in thread descriptor 295 thread->trdid = trdid; 296 297 #if( DEBUG_THREAD_USER_CREATE & 1) 298 if( DEBUG_THREAD_USER_CREATE < cycle ) 299 printk("\n[%s] new thread %x registered in process %x\n", 300 __FUNCTION__, trdid, pid ); 301 #endif 302 303 // allocate a stack from local VMM 304 us_vseg = vmm_create_vseg( process, 305 VSEG_TYPE_STACK, 306 LTID_FROM_TRDID( trdid ), 307 0, // size unused 308 0, // file_offset unused 309 0, // file_size unused 310 XPTR_NULL, // mapper_xp unused 311 local_cxy ); 312 313 if( us_vseg == NULL ) 314 { 315 printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ ); 316 process_remove_thread( thread ); 317 thread_destroy( thread ); 318 return -1; 319 } 320 321 #if( DEBUG_THREAD_USER_CREATE & 1) 322 if( DEBUG_THREAD_USER_CREATE < cycle ) 323 printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n", 324 __FUNCTION__, us_vseg->vpn_base, us_vseg->vpn_size ); 325 #endif 326 338 327 // initialize thread descriptor 339 328 error = thread_init( thread, 340 329 process, 341 330 THREAD_USER, 331 trdid, 342 332 start_func, 343 333 start_arg, 344 334 core_lid, 345 vseg->min, 346 vseg->max - vseg->min ); 335 us_vseg ); 347 336 if( error ) 348 337 { 349 338 printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ ); 350 vmm_delete_vseg( process->pid , vseg->min ); 351 thread_release( thread ); 352 return EINVAL; 339 vmm_remove_vseg( process , us_vseg ); 340 process_remove_thread( thread ); 341 thread_destroy( thread ); 342 return -1; 353 343 } 354 344 355 345 #if( DEBUG_THREAD_USER_CREATE & 1) 356 346 if( DEBUG_THREAD_USER_CREATE < cycle ) 357 printk("\n[%s] new thread descriptor initialised / trdid %x\n",358 __FUNCTION__, thread->trdid );347 printk("\n[%s] new thread %x in process %x initialised\n", 348 __FUNCTION__, thread->trdid, process->pid ); 359 349 #endif 360 350 … … 369 359 { 370 360 printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ ); 371 vmm_delete_vseg( process->pid , vseg->min ); 372 thread_release( thread ); 373 return ENOMEM; 361 vmm_remove_vseg( process , us_vseg ); 362 process_remove_thread( thread ); 363 thread_destroy( thread ); 364 return -1; 374 365 } 375 366 hal_cpu_context_init( thread ); … … 379 370 { 380 371 printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ ); 381 vmm_delete_vseg( process->pid , vseg->min ); 382 thread_release( thread ); 383 return ENOMEM; 372 vmm_remove_vseg( process , us_vseg ); 373 process_remove_thread( thread ); 374 thread_destroy( thread ); 375 return -1; 384 376 } 385 377 hal_fpu_context_init( thread ); … … 410 402 { 411 403 error_t error; 412 thread_t * child_ptr; // local pointer on local child thread 404 thread_t * child_ptr; // local pointer on child thread 405 trdid_t child_trdid; // child thread identifier 413 406 lid_t core_lid; // selected core local index 414 415 407 thread_t * parent_ptr; // local pointer on remote parent thread 416 408 cxy_t parent_cxy; // parent thread cluster 417 409 process_t * parent_process; // local pointer on parent process 418 410 xptr_t parent_gpt_xp; // extended pointer on parent thread GPT 419 420 void * func; // parent thread entry_func 421 void * args; // parent thread entry_args 422 intptr_t base; // parent thread u_stack_base 423 uint32_t size; // parent thread u_stack_size 424 uint32_t flags; // parent_thread flags 425 vpn_t vpn_base; // parent thread stack vpn_base 426 vpn_t vpn_size; // parent thread stack vpn_size 427 reg_t * uzone; // parent thread pointer on uzone 428 429 vseg_t * vseg; // child thread STACK vseg 411 void * parent_func; // parent thread entry_func 412 void * parent_args; // parent thread entry_args 413 uint32_t parent_flags; // parent_thread flags 414 vseg_t * parent_us_vseg; // parent thread user stack vseg 415 vseg_t * child_us_vseg; // child thread user stack vseg 430 416 431 417 #if DEBUG_THREAD_USER_FORK … … 433 419 thread_t * this = CURRENT_THREAD; 434 420 if( DEBUG_THREAD_USER_FORK < cycle ) 435 printk("\n[%s] thread[%x,%x] enter /child_process %x / cycle %d\n",421 printk("\n[%s] thread[%x,%x] enter for child_process %x / cycle %d\n", 436 422 __FUNCTION__, this->process->pid, this->trdid, child_process->pid, cycle ); 437 423 #endif … … 439 425 // select a target core in local cluster 440 426 core_lid = cluster_select_local_core(); 427 428 #if (DEBUG_THREAD_USER_FORK & 1) 429 if( DEBUG_THREAD_USER_FORK < cycle ) 430 printk("\n[%s] thread[%x,%x] selected core [%x,%d]\n", 431 __FUNCTION__, this->process->pid, this->trdid, local_cxy, core_lid ); 432 #endif 441 433 442 434 // get cluster and local pointer on parent thread descriptor … … 444 436 parent_ptr = GET_PTR( parent_thread_xp ); 445 437 446 // get relevant fields from parent thread 447 func = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func )); 448 args = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args )); 449 base = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base )); 450 size = (uint32_t)hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->u_stack_size )); 451 flags = hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->flags )); 452 uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current )); 453 454 vpn_base = base >> CONFIG_PPM_PAGE_SHIFT; 455 vpn_size = size >> CONFIG_PPM_PAGE_SHIFT; 438 // get relevant infos from parent thread 439 parent_func = (void *) hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_func )); 440 parent_args = (void *) hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_args )); 441 parent_flags = (uint32_t)hal_remote_l32( XPTR(parent_cxy,&parent_ptr->flags )); 442 parent_us_vseg = (vseg_t *)hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->user_stack_vseg )); 456 443 457 444 // get pointer on parent process in parent thread cluster … … 459 446 &parent_ptr->process ) ); 460 447 461 // getextended pointer on parent GPT in parent thread cluster448 // build extended pointer on parent GPT in parent thread cluster 462 449 parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt ); 450 451 #if (DEBUG_THREAD_USER_FORK & 1) 452 if( DEBUG_THREAD_USER_FORK < cycle ) 453 printk("\n[%s] thread[%x,%x] get parent GPT\n", 454 __FUNCTION__, this->process->pid, this->trdid ); 455 #endif 463 456 464 457 // allocate memory for child thread descriptor 465 458 child_ptr = thread_alloc(); 459 466 460 if( child_ptr == NULL ) 467 461 { 468 printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ ); 462 printk("\n[ERROR] in %s : cannot allocate new thread\n", 463 __FUNCTION__ ); 469 464 return -1; 470 465 } 466 467 #if (DEBUG_THREAD_USER_FORK & 1) 468 if( DEBUG_THREAD_USER_FORK < cycle ) 469 printk("\n[%s] thread[%x,%x] allocated new thread descriptor %x\n", 470 __FUNCTION__, this->process->pid, this->trdid, child_ptr ); 471 #endif 472 473 // set type in thread descriptor 474 child_ptr->type = THREAD_USER; 475 476 // register new thread in process descriptor, and get a TRDID 477 error = process_register_thread( child_process, child_ptr , &child_trdid ); 478 479 if( error ) 480 { 481 printk("\n[ERROR] in %s : cannot register new thread in process %x\n", 482 __FUNCTION__, child_process->pid ); 483 thread_destroy( child_ptr ); 484 return -1; 485 } 486 487 // set trdid in thread descriptor 488 child_ptr->trdid = child_trdid; 489 490 #if (DEBUG_THREAD_USER_FORK & 1) 491 if( DEBUG_THREAD_USER_FORK < cycle ) 492 printk("\n[%s] thread[%x,%x] registered child thread %x in child process %x\n", 493 __FUNCTION__, this->process->pid, this->trdid, child_trdid, child_process->pid ); 494 #endif 495 496 // get an user stack vseg from local VMM allocator 497 child_us_vseg = vmm_create_vseg( child_process, 498 VSEG_TYPE_STACK, 499 LTID_FROM_TRDID( child_trdid ), 500 0, // size unused 501 0, // file_offset unused 502 0, // file_size unused 503 XPTR_NULL, // mapper_xp unused 504 local_cxy ); 505 if( child_us_vseg == NULL ) 506 { 507 printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ ); 508 process_remove_thread( child_ptr ); 509 thread_destroy( child_ptr ); 510 return -1; 511 } 512 513 #if (DEBUG_THREAD_USER_FORK & 1) 514 if( DEBUG_THREAD_USER_FORK < cycle ) 515 printk("\n[%s] thread[%x,%x] created an user stack vseg / vpn_base %x / %d pages\n", 516 __FUNCTION__, this->process->pid, this->trdid, 517 child_us_vseg->vpn_base, child_us_vseg->vpn_size ); 518 #endif 471 519 472 520 // initialize thread descriptor … … 474 522 child_process, 475 523 THREAD_USER, 476 func, 477 args, 524 child_trdid, 525 parent_func, 526 parent_args, 478 527 core_lid, 479 base, 480 size ); 528 child_us_vseg ); 481 529 if( error ) 482 530 { 483 531 printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ ); 484 thread_release( child_ptr ); 485 return EINVAL; 532 vmm_remove_vseg( child_process , child_us_vseg ); 533 process_remove_thread( child_ptr ); 534 thread_destroy( child_ptr ); 535 return -1; 486 536 } 487 537 … … 492 542 #endif 493 543 494 // return child pointer495 *child_thread = child_ptr;496 497 544 // set detached flag if required 498 if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED; 499 500 // update uzone pointer in child thread descriptor 501 child_ptr->uzone_current = (char *)((intptr_t)uzone + 502 (intptr_t)child_ptr - 503 (intptr_t)parent_ptr ); 504 505 506 // allocate CPU context for child thread 545 if( parent_flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED; 546 547 // allocate a CPU context for child thread 507 548 if( hal_cpu_context_alloc( child_ptr ) ) 508 549 { 509 550 printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ ); 510 thread_release( child_ptr ); 551 vmm_remove_vseg( child_process , child_us_vseg ); 552 process_remove_thread( child_ptr ); 553 thread_destroy( child_ptr ); 511 554 return -1; 512 555 } 513 556 514 // allocate FPU context for child thread557 // allocate a FPU context for child thread 515 558 if( hal_fpu_context_alloc( child_ptr ) ) 516 559 { 517 560 printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ ); 518 thread_release( child_ptr ); 561 vmm_remove_vseg( child_process , child_us_vseg ); 562 process_remove_thread( child_ptr ); 563 thread_destroy( child_ptr ); 519 564 return -1; 520 565 } … … 526 571 #endif 527 572 528 // create and initialize STACK vseg 529 vseg = vseg_alloc(); 530 vseg_init( vseg, 531 VSEG_TYPE_STACK, 532 base, 533 size, 534 vpn_base, 535 vpn_size, 536 0, 0, XPTR_NULL, // not a file vseg 537 local_cxy ); 538 539 // register STACK vseg in local child VSL 540 vmm_attach_vseg_to_vsl( &child_process->vmm , vseg ); 541 542 #if (DEBUG_THREAD_USER_FORK & 1) 543 if( DEBUG_THREAD_USER_FORK < cycle ) 544 printk("\n[%s] thread[%x,%x] created stack vseg for thread %x in process %x\n", 545 __FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid ); 546 #endif 547 548 // copy all valid STACK GPT entries 549 vpn_t vpn; 550 bool_t mapped; 551 ppn_t ppn; 552 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 573 // scan parent GPT, and copy all valid entries 574 // associated to user stack vseg into child GPT 575 vpn_t parent_vpn; 576 vpn_t child_vpn; 577 bool_t mapped; 578 ppn_t ppn; 579 vpn_t parent_vpn_base = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_base ) ); 580 vpn_t parent_vpn_size = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_size ) ); 581 vpn_t child_vpn_base = child_us_vseg->vpn_base; 582 for( parent_vpn = parent_vpn_base , child_vpn = child_vpn_base ; 583 parent_vpn < (parent_vpn_base + parent_vpn_size) ; 584 parent_vpn++ , child_vpn++ ) 553 585 { 554 586 error = hal_gpt_pte_copy( &child_process->vmm.gpt, 587 child_vpn, 555 588 parent_gpt_xp, 556 vpn,589 parent_vpn, 557 590 true, // set cow 558 591 &ppn, … … 560 593 if( error ) 561 594 { 562 vmm_detach_vseg_from_vsl( &child_process->vmm , vseg );563 thread_release( child_ptr );564 595 printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ ); 596 vmm_remove_vseg( child_process , child_us_vseg ); 597 process_remove_thread( child_ptr ); 598 thread_destroy( child_ptr ); 565 599 return -1; 566 600 } 567 601 568 // increment pending forks counter for the page if mapped602 // increment pending forks counter for a mapped page 569 603 if( mapped ) 570 604 { … … 574 608 page_t * page_ptr = GET_PTR( page_xp ); 575 609 576 // getextended pointers on forks and lock fields610 // build extended pointers on forks and lock fields 577 611 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 578 612 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); … … 586 620 // release lock protecting page 587 621 remote_busylock_release( lock_xp ); 622 } 623 } 588 624 589 625 #if (DEBUG_THREAD_USER_FORK & 1) 590 cycle = (uint32_t)hal_get_cycles();591 626 if( DEBUG_THREAD_USER_FORK < cycle ) 592 printk("\n[%s] thread[%x,%x] copied one PTE to child GPT : vpn %x / forks %d\n", 593 __FUNCTION__, this->process->pid, this->trdid, 594 vpn, hal_remote_l32( XPTR( page_cxy , &page_ptr->forks) ) ); 595 #endif 596 597 } 598 } 599 600 // set COW flag for all mapped entries of STAK vseg in parent thread GPT 627 printk("\n[%s] thread[%x,%x] copied all stack vseg PTEs to child GPT\n", 628 __FUNCTION__, this->process->pid, this->trdid ); 629 #endif 630 631 // set COW flag for all mapped entries of user stack vseg in parent GPT 601 632 hal_gpt_set_cow( parent_gpt_xp, 602 vpn_base, 603 vpn_size ); 604 633 parent_vpn_base, 634 parent_vpn_size ); 635 636 #if (DEBUG_THREAD_USER_FORK & 1) 637 if( DEBUG_THREAD_USER_FORK < cycle ) 638 printk("\n[%s] thread[%x,%x] set the COW flag for stack vseg in parent GPT\n", 639 __FUNCTION__, this->process->pid, this->trdid ); 640 #endif 641 642 // return child pointer 643 *child_thread = child_ptr; 644 605 645 #if DEBUG_THREAD_USER_FORK 606 646 cycle = (uint32_t)hal_get_cycles(); 607 647 if( DEBUG_THREAD_USER_FORK < cycle ) 608 printk("\n[%s] thread[%x,%x] exit / child_thread %x / cycle %d\n", 609 __FUNCTION__, this->process->pid, this->trdid, child_ptr, cycle ); 648 printk("\n[%s] thread[%x,%x] exit / created thread[%x,%x] / cycle %d\n", 649 __FUNCTION__, this->process->pid, this->trdid, 650 child_ptr->process->pid, child_ptr->trdid, cycle ); 610 651 #endif 611 652 … … 660 701 661 702 // allocate an user stack vseg for main thread 662 vseg_t * vseg = vmm_create_vseg( process,663 VSEG_TYPE_STACK,664 0, // size unused665 0, // length unused666 0, // file_offset unused667 0, // file_size unused668 XPTR_NULL, // mapper_xp unused669 local_cxy );670 if( vseg == NULL )703 vseg_t * us_vseg = vmm_create_vseg( process, 704 VSEG_TYPE_STACK, 705 LTID_FROM_TRDID( thread->trdid ), 706 0, // length unused 707 0, // file_offset unused 708 0, // file_size unused 709 XPTR_NULL, // mapper_xp unused 710 local_cxy ); 711 if( us_vseg == NULL ) 671 712 { 672 713 printk("\n[ERROR] in %s : cannot create stack vseg for main thread\n", __FUNCTION__ ); … … 675 716 676 717 // update user stack in thread descriptor 677 thread->u_stack_base = vseg->min; 678 thread->u_stack_size = vseg->max - vseg->min; 718 thread->user_stack_vseg = us_vseg; 679 719 680 720 // release FPU ownership if required … … 710 750 error_t error; 711 751 thread_t * thread; // pointer on new thread descriptor 752 trdid_t trdid; // new thread identifier 712 753 713 754 thread_t * this = CURRENT_THREAD; … … 737 778 } 738 779 780 // set type in thread descriptor 781 thread->type = type; 782 783 // register new thread in local kernel process descriptor, and get a TRDID 784 error = process_register_thread( &process_zero , thread , &trdid ); 785 786 if( error ) 787 { 788 printk("\n[ERROR] in %s : cannot register thread in kernel process\n", __FUNCTION__ ); 789 return -1; 790 } 791 792 // set trdid in thread descriptor 793 thread->trdid = trdid; 794 739 795 // initialize thread descriptor 740 796 error = thread_init( thread, 741 797 &process_zero, 742 798 type, 799 trdid, 743 800 func, 744 801 args, 745 802 core_lid, 746 0 , 0); // no user stack for a kernel thread803 NULL ); // no user stack for a kernel thread 747 804 748 805 if( error ) // release allocated memory for thread descriptor 749 806 { 750 printk("\n[ERROR] in %s : thread %x in process %x\n" 751 " cannot initialize thread descriptor\n", 752 __FUNCTION__, this->trdid, this->process->pid ); 753 thread_release( thread ); 807 printk("\n[ERROR] in %s : cannot initialize thread descriptor\n", __FUNCTION__ ); 808 thread_destroy( thread ); 754 809 return ENOMEM; 755 810 } … … 763 818 " cannot create CPU context\n", 764 819 __FUNCTION__, this->trdid, this->process->pid ); 765 thread_ release( thread );820 thread_destroy( thread ); 766 821 return EINVAL; 767 822 } … … 791 846 lid_t core_lid ) 792 847 { 848 trdid_t trdid; 849 error_t error; 793 850 794 851 // check arguments … … 796 853 assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" ); 797 854 855 // set type in thread descriptor 856 thread->type = THREAD_IDLE; 857 858 // register idle thread in local kernel process descriptor, and get a TRDID 859 error = process_register_thread( &process_zero , thread , &trdid ); 860 861 assert( (error == 0), "cannot register idle_thread in kernel process" ); 862 863 // set trdid in thread descriptor 864 thread->trdid = trdid; 865 798 866 // initialize thread descriptor 799 error_t error = thread_init( thread, 800 &process_zero, 801 type, 802 func, 803 args, 804 core_lid, 805 0 , 0 ); // no user stack for a kernel thread 806 807 assert( (error == 0), "cannot create thread idle" ); 867 error = thread_init( thread, 868 &process_zero, 869 THREAD_IDLE, 870 trdid, 871 func, 872 args, 873 core_lid, 874 NULL ); // no user stack for a kernel thread 875 876 assert( (error == 0), "cannot initialize idle_thread" ); 808 877 809 878 // allocate & initialize CPU context if success 810 879 error = hal_cpu_context_alloc( thread ); 811 880 812 881 assert( (error == 0), "cannot allocate CPU context" ); 813 882 814 883 hal_cpu_context_init( thread ); … … 816 885 } // end thread_idle_init() 817 886 818 /////////////////////////////////////////////////////////////////////////////////////// 819 // TODO: check that all memory dynamically allocated during thread execution 820 // has been released => check vmm destroy for MMAP vsegs [AG] 821 /////////////////////////////////////////////////////////////////////////////////////// 822 void thread_destroy( thread_t * thread ) 823 { 824 reg_t save_sr; 825 826 process_t * process = thread->process; 827 core_t * core = thread->core; 887 //////////////////////////////////////////// 888 uint32_t thread_destroy( thread_t * thread ) 889 { 890 reg_t save_sr; 891 uint32_t count; 892 893 thread_type_t type = thread->type; 894 process_t * process = thread->process; 895 core_t * core = thread->core; 828 896 829 897 #if DEBUG_THREAD_DESTROY … … 835 903 #endif 836 904 837 // check busylocks counter905 // check calling thread busylocks counter 838 906 thread_assert_can_yield( thread , __FUNCTION__ ); 839 907 840 // update intrumentation values908 // update target process instrumentation counter 841 909 process->vmm.pgfault_nr += thread->info.pgfault_nr; 842 910 843 // release memory allocated for CPU context and FPU context 911 // remove thread from process th_tbl[] 912 count = process_remove_thread( thread ); 913 914 // release memory allocated for CPU context and FPU context if required 844 915 hal_cpu_context_destroy( thread ); 845 if ( thread->type == THREAD_USER )hal_fpu_context_destroy( thread );916 hal_fpu_context_destroy( thread ); 846 917 918 // release user stack vseg (for an user thread only) 919 if( type == THREAD_USER ) vmm_remove_vseg( process , thread->user_stack_vseg ); 920 847 921 // release FPU ownership if required 848 922 hal_disable_irq( &save_sr ); … … 857 931 thread->signature = 0; 858 932 859 // release memory for thread descriptor 860 thread_release( thread ); 933 // release memory for thread descriptor (including kernel stack) 934 kmem_req_t req; 935 xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) ); 936 937 req.type = KMEM_PAGE; 938 req.ptr = GET_PTR( base_xp ); 939 kmem_free( &req ); 861 940 862 941 #if DEBUG_THREAD_DESTROY … … 866 945 __FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle ); 867 946 #endif 947 948 return count; 868 949 869 950 } // end thread_destroy() … … 993 1074 cxy_t target_cxy; // target thread cluster 994 1075 thread_t * target_ptr; // pointer on target thread 1076 process_t * target_process; // pointer on arget process 1077 pid_t target_pid; // target process identifier 995 1078 xptr_t target_flags_xp; // extended pointer on target thread <flags> 996 1079 xptr_t target_join_lock_xp; // extended pointer on target thread <join_lock> … … 1006 1089 target_ptr = GET_PTR( target_xp ); 1007 1090 1008 // get target thread identifier s, and attached flag1091 // get target thread identifier, attached flag, and process PID 1009 1092 target_trdid = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) ); 1010 1093 target_ltid = LTID_FROM_TRDID( target_trdid ); 1011 1094 target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 1012 1095 target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 ); 1096 target_process = hal_remote_lpt( XPTR( target_cxy , &target_ptr->process ) ); 1097 target_pid = hal_remote_l32( XPTR( target_cxy , &target_process->pid ) ); 1098 1099 // check target PID 1100 assert( (pid == target_pid), 1101 "unconsistent pid and target_xp arguments" ); 1013 1102 1014 1103 // get killer thread pointers … … 1027 1116 // must be deleted by the parent process sys_wait() function 1028 1117 assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)), 1029 "t harget thread cannot be the main thread\n" );1118 "target thread cannot be the main thread" ); 1030 1119 1031 1120 // check killer thread can yield … … 1151 1240 void thread_idle_func( void ) 1152 1241 { 1242 1243 #if DEBUG_THREAD_IDLE 1244 uint32_t cycle; 1245 #endif 1246 1153 1247 while( 1 ) 1154 1248 { … … 1161 1255 1162 1256 #if DEBUG_THREAD_IDLE 1163 { 1164 uint32_t cycle = (uint32_t)hal_get_cycles(); 1257 cycle = (uint32_t)hal_get_cycles(); 1165 1258 if( DEBUG_THREAD_IDLE < cycle ) 1166 1259 printk("\n[%s] idle thread on core[%x,%d] goes to sleep / cycle %d\n", 1167 1260 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); 1168 }1169 1261 #endif 1170 1262 … … 1172 1264 1173 1265 #if DEBUG_THREAD_IDLE 1174 { 1175 uint32_t cycle = (uint32_t)hal_get_cycles(); 1266 cycle = (uint32_t)hal_get_cycles(); 1176 1267 if( DEBUG_THREAD_IDLE < cycle ) 1177 1268 printk("\n[%s] idle thread on core[%x,%d] wake up / cycle %d\n", 1178 1269 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); 1179 }1180 1270 #endif 1181 1271 … … 1183 1273 1184 1274 #if DEBUG_THREAD_IDLE 1185 { 1186 uint32_t cycle = (uint32_t)hal_get_cycles(); 1275 cycle = (uint32_t)hal_get_cycles(); 1187 1276 if( DEBUG_THREAD_IDLE < cycle ) 1188 1277 sched_display( CURRENT_THREAD->core->lid ); 1189 }1190 1278 #endif 1191 1279 // search a runable thread
Note: See TracChangeset
for help on using the changeset viewer.