Changeset 347 for soft/giet_vm
- Timestamp:
- Jun 29, 2014, 12:27:21 PM (10 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/giet_boot/boot.c
r345 r347 14 14 // and where there is one physical memory bank per cluster. 15 15 // 16 // This code is executed in the boot phase by proc[0] andperforms the following tasks:17 // - load into memory the giet_vm binary files, contained ina FAT32 file system,16 // This code, executed in the boot phase by proc[0,0,0] performs the following tasks: 17 // - load into memory the binary files, from a FAT32 file system, 18 18 // - build the various page tables (one page table per vspace) 19 19 // - initialize the shedulers (one scheduler per processor) … … 74 74 #include <tty_driver.h> 75 75 #include <xcu_driver.h> 76 #include < ioc_driver.h>76 #include <bdv_driver.h> 77 77 #include <dma_driver.h> 78 78 #include <cma_driver.h> … … 140 140 extern fat32_fs_t fat; 141 141 142 // Page table addresses arrays 142 // Page tables base addresses, sizes, and PT2 allocators: 143 // For each vspace, it can exist one page table per cluster, 144 // but only one virtual base address per vspace 145 143 146 __attribute__((section (".bootdata"))) 144 volatile paddr_t _ptabs_paddr[GIET_NB_VSPACE_MAX];147 unsigned int _ptabs_vaddr[GIET_NB_VSPACE_MAX]; 145 148 146 149 __attribute__((section (".bootdata"))) 147 volatile unsigned int _ptabs_vaddr[GIET_NB_VSPACE_MAX]; 148 149 // Next free PT2 index array 150 paddr_t _ptabs_paddr[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE]; 151 150 152 __attribute__((section (".bootdata"))) 151 volatile unsigned int _next_free_pt2[GIET_NB_VSPACE_MAX] = 152 { [0 ... GIET_NB_VSPACE_MAX - 1] = 0 }; 153 154 // Max PT2 index 153 unsigned int _ptabs_max_pt2[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE]; 154 155 155 __attribute__((section (".bootdata"))) 156 volatile unsigned int _max_pt2[GIET_NB_VSPACE_MAX] = 157 { [0 ... GIET_NB_VSPACE_MAX - 1] = 0 }; 156 unsigned int _ptabs_next_pt2[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE]; 158 157 159 158 // Scheduler pointers array (virtual addresses) 160 // indexed by (x,y,lpid) : (( x << Y_WIDTH) + y)*NB_PROCS_MAX+ lpid159 // indexed by (x,y,lpid) : (((x << Y_WIDTH) + y) * NB_PROCS_MAX) + lpid 161 160 __attribute__((section (".bootdata"))) 162 static_scheduler_t* _schedulers[ NB_PROCS_MAX<<(X_WIDTH+Y_WIDTH)];161 static_scheduler_t* _schedulers[1<<X_WIDTH][1<<Y_WIDTH][NB_PROCS_MAX]; 163 162 164 163 … … 291 290 // boot_add_pte() 292 291 // This function registers a new PTE in the page table defined 293 // by the vspace_id argument, and updates both PT1 and PT2.294 // A new PT2 is used whenrequired.292 // by the vspace_id argument, and the (x,y) coordinates. 293 // It updates both the PT1 and PT2, and a new PT2 is used if required. 295 294 // As the set of PT2s is implemented as a fixed size array (no dynamic 296 295 // allocation), this function checks a possible overflow of the PT2 array. 297 296 ////////////////////////////////////////////////////////////////////////////// 298 297 void boot_add_pte(unsigned int vspace_id, 298 unsigned int x, 299 unsigned int y, 299 300 unsigned int vpn, 300 301 unsigned int flags, … … 304 305 unsigned int ix1; 305 306 unsigned int ix2; 306 paddr_t pt1_pbase; // PT1 physical base address 307 paddr_t pt2_pbase = 0; // PT2 physical base address 308 paddr_t pte_paddr; // PTE physucal address 307 paddr_t pt2_pbase; // PT2 physical base address 308 paddr_t pte_paddr; // PTE physical address 309 309 unsigned int pt2_id; // PT2 index 310 310 unsigned int ptd; // PTD : entry in PT1 311 unsigned int max_pt2; // max number of PT2s for a given vspace312 311 313 312 ix1 = vpn >> 9; // 11 bits 314 313 ix2 = vpn & 0x1FF; // 9 bits 315 314 316 // check that the _max_pt2[vspace_id] has been set 317 max_pt2 = _max_pt2[vspace_id]; 315 // get page table physical base address and size 316 paddr_t pt1_pbase = _ptabs_paddr[vspace_id][x][y]; 317 unsigned int max_pt2 = _ptabs_max_pt2[vspace_id][x][y]; 318 318 319 319 if (max_pt2 == 0) … … 325 325 } 326 326 327 328 // get page table physical base address329 pt1_pbase = _ptabs_paddr[vspace_id];330 331 327 // get ptd in PT1 332 328 ptd = _physical_read(pt1_pbase + 4 * ix1); 333 329 334 if ((ptd & PTE_V) == 0) // invalid PTD: compute PT2 base address,330 if ((ptd & PTE_V) == 0) // undefined PTD: compute PT2 base address, 335 331 // and set a new PTD in PT1 336 332 { 337 pt2_id = _ next_free_pt2[vspace_id];333 pt2_id = _ptabs_next_pt2[vspace_id][x][y]; 338 334 if (pt2_id == max_pt2) 339 335 { 340 336 _puts("\n[BOOT ERROR] in boot_add_pte() function\n"); 341 _puts("the length of the ptab vobj is too small\n"); 342 337 _puts("the length of the PTAB vobj is too small\n"); 343 338 _puts(" max_pt2 = "); 344 339 _putd( max_pt2 ); … … 347 342 _putd( pt2_id ); 348 343 _puts("\n"); 349 350 344 _exit(); 351 345 } 352 else 353 { 354 pt2_pbase = pt1_pbase + PT1_SIZE + PT2_SIZE * pt2_id; 355 ptd = PTE_V | PTE_T | (unsigned int) (pt2_pbase >> 12); 356 _physical_write( pt1_pbase + 4 * ix1, ptd); 357 _next_free_pt2[vspace_id] = pt2_id + 1; 358 } 346 347 pt2_pbase = pt1_pbase + PT1_SIZE + PT2_SIZE * pt2_id; 348 ptd = PTE_V | PTE_T | (unsigned int) (pt2_pbase >> 12); 349 _physical_write( pt1_pbase + 4 * ix1, ptd); 350 _ptabs_next_pt2[vspace_id][x][y] = pt2_id + 1; 359 351 } 360 352 else // valid PTD: compute PT2 base address … … 392 384 393 385 394 ///////////////////////////////////////////////////////////////////// 395 // This function build the page table for a given vspace. 386 //////////////////////////////////////////////////////////////////////// 387 // This function build the page table(s) for a given vspace. 388 // It build as many pages tables as the number of vobjs having 389 // the PTAB type in the vspace, because page tables can be replicated. 396 390 // The physical base addresses for all vsegs (global and private) 397 391 // must have been previously computed and stored in the mapping. 398 // It initializes the MWMR channels. 399 ///////////////////////////////////////////////////////////////////// 392 // 393 // General rule regarding local / shared vsegs: 394 // - shared vsegs are mapped in all page tables 395 // - local vsegs are mapped only in the "local" page table 396 //////////////////////////////////////////////////////////////////////// 400 397 void boot_vspace_pt_build(unsigned int vspace_id) 401 398 { 402 unsigned int vseg_id; 399 unsigned int ptab_id; // global index for a vseg containing a PTAB 400 unsigned int priv_id; // global index for a private vseg in a vspace 401 unsigned int glob_id; // global index for a global vseg 403 402 unsigned int npages; 404 403 unsigned int ppn; … … 408 407 unsigned int verbose = 0; // can be used to activate trace in add_pte() 409 408 410 mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 411 mapping_vspace_t * vspace = _get_vspace_base(header); 412 mapping_vseg_t * vseg = _get_vseg_base(header); 413 414 // private segments 415 for (vseg_id = vspace[vspace_id].vseg_offset; 416 vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); 417 vseg_id++) 418 { 419 vpn = vseg[vseg_id].vbase >> 12; 420 ppn = (unsigned int) (vseg[vseg_id].pbase >> 12); 421 422 npages = vseg[vseg_id].length >> 12; 423 if ((vseg[vseg_id].length & 0xFFF) != 0) npages++; 424 425 flags = PTE_V; 426 if (vseg[vseg_id].mode & C_MODE_MASK) flags |= PTE_C; 427 if (vseg[vseg_id].mode & X_MODE_MASK) flags |= PTE_X; 428 if (vseg[vseg_id].mode & W_MODE_MASK) flags |= PTE_W; 429 if (vseg[vseg_id].mode & U_MODE_MASK) flags |= PTE_U; 409 mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 410 mapping_vspace_t * vspace = _get_vspace_base(header); 411 mapping_vseg_t * vseg = _get_vseg_base(header); 412 mapping_vobj_t * vobj = _get_vobj_base(header); 413 mapping_pseg_t * pseg = _get_pseg_base(header); 414 mapping_cluster_t * cluster = _get_cluster_base(header); 415 416 // external loop on private vsegs to find all PTAB vobjs in vspace 417 for (ptab_id = vspace[vspace_id].vseg_offset; 418 ptab_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); 419 ptab_id++) 420 { 421 // get global index of first vobj in vseg 422 unsigned int vobj_id = vseg[ptab_id].vobj_offset; 423 424 if ( vobj[vobj_id].type == VOBJ_TYPE_PTAB ) 425 { 426 // get cluster coordinates for the PTAB 427 unsigned int ptab_pseg_id = vseg[ptab_id].psegid; 428 unsigned int ptab_cluster_id = pseg[ptab_pseg_id].clusterid; 429 unsigned int x_ptab = cluster[ptab_cluster_id].x; 430 unsigned int y_ptab = cluster[ptab_cluster_id].y; 431 432 // internal loop on private vsegs to build 433 // the (vspace_id, x_ptab, y_ptab) page table 434 for (priv_id = vspace[vspace_id].vseg_offset; 435 priv_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); 436 priv_id++) 437 { 438 // get cluster coordinates for private vseg 439 unsigned int priv_pseg_id = vseg[priv_id].psegid; 440 unsigned int priv_cluster_id = pseg[priv_pseg_id].clusterid; 441 unsigned int x_priv = cluster[priv_cluster_id].x; 442 unsigned int y_priv = cluster[priv_cluster_id].y; 443 444 // only non local or matching private vsegs must be mapped 445 if ( (vseg[priv_id].local == 0 ) || 446 ((x_ptab == x_priv) && (y_ptab == y_priv)) ) 447 { 448 vpn = vseg[priv_id].vbase >> 12; 449 ppn = (unsigned int) (vseg[priv_id].pbase >> 12); 450 npages = vseg[priv_id].length >> 12; 451 if ((vseg[priv_id].length & 0xFFF) != 0) npages++; 452 453 flags = PTE_V; 454 if (vseg[priv_id].mode & C_MODE_MASK) flags |= PTE_C; 455 if (vseg[priv_id].mode & X_MODE_MASK) flags |= PTE_X; 456 if (vseg[priv_id].mode & W_MODE_MASK) flags |= PTE_W; 457 if (vseg[priv_id].mode & U_MODE_MASK) flags |= PTE_U; 430 458 431 // These three flags (Local, Remote and Dirty) are set to 1 to reduce 432 // latency of TLB miss (L/R) and write (D): Avoid hardware update 433 // mechanism for these flags. This optimization can be performed 434 // because GIET_VM does nothing with these flags. 435 436 flags |= PTE_L; 437 flags |= PTE_R; 438 flags |= PTE_D; 459 // The three flags (Local, Remote and Dirty) are set to 1 to reduce 460 // latency of TLB miss (L/R) and write (D): Avoid hardware update 461 // mechanism for these flags because GIET_VM does use these flags. 462 463 flags |= PTE_L; 464 flags |= PTE_R; 465 flags |= PTE_D; 439 466 440 467 #if BOOT_DEBUG_PT 441 _puts(vseg[vseg_id].name); 442 _puts(" : flags = "); 443 _putx(flags); 444 _puts(" / npages = "); 445 _putd(npages); 446 _puts(" / pbase = "); 447 _putl(vseg[vseg_id].pbase); 448 _puts("\n"); 449 #endif 450 // loop on 4K pages 451 for (page_id = 0; page_id < npages; page_id++) 452 { 453 boot_add_pte(vspace_id, vpn, flags, ppn, verbose); 454 vpn++; 455 ppn++; 456 } 457 } 458 459 // global segments 460 for (vseg_id = 0; vseg_id < header->globals; vseg_id++) 461 { 462 vpn = vseg[vseg_id].vbase >> 12; 463 ppn = (unsigned int)(vseg[vseg_id].pbase >> 12); 464 npages = vseg[vseg_id].length >> 12; 465 if ((vseg[vseg_id].length & 0xFFF) != 0) npages++; 466 467 flags = PTE_V; 468 if (vseg[vseg_id].mode & C_MODE_MASK) flags |= PTE_C; 469 if (vseg[vseg_id].mode & X_MODE_MASK) flags |= PTE_X; 470 if (vseg[vseg_id].mode & W_MODE_MASK) flags |= PTE_W; 471 if (vseg[vseg_id].mode & U_MODE_MASK) flags |= PTE_U; 472 473 // Flags set for optimization (as explained above) 474 475 flags |= PTE_L; 476 flags |= PTE_R; 477 flags |= PTE_D; 468 _puts(vseg[priv_id].name); 469 _puts(" : flags = "); 470 _putx(flags); 471 _puts(" / npages = "); 472 _putd(npages); 473 _puts(" / pbase = "); 474 _putl(vseg[priv_id].pbase); 475 _puts("\n"); 476 #endif 477 // loop on 4K pages 478 for (page_id = 0; page_id < npages; page_id++) 479 { 480 boot_add_pte(vspace_id, x_ptab, y_ptab, vpn, flags, ppn, verbose); 481 vpn++; 482 ppn++; 483 } 484 } 485 } // end internal loop on private vsegs 486 487 // internal loop on global vsegs to build the (x_ptab,y_ptab) page table 488 for (glob_id = 0; glob_id < header->globals; glob_id++) 489 { 490 // get cluster coordinates for global vseg 491 unsigned int glob_pseg_id = vseg[glob_id].psegid; 492 unsigned int glob_cluster_id = pseg[glob_pseg_id].clusterid; 493 unsigned int x_glob = cluster[glob_cluster_id].x; 494 unsigned int y_glob = cluster[glob_cluster_id].y; 495 496 // only non local or matching global vsegs must be mapped 497 if ( (vseg[glob_id].local == 0 ) || 498 ((x_ptab == x_glob) && (y_ptab == y_glob)) ) 499 { 500 vpn = vseg[glob_id].vbase >> 12; 501 ppn = (unsigned int)(vseg[glob_id].pbase >> 12); 502 npages = vseg[glob_id].length >> 12; 503 if ((vseg[glob_id].length & 0xFFF) != 0) npages++; 504 505 flags = PTE_V; 506 if (vseg[glob_id].mode & C_MODE_MASK) flags |= PTE_C; 507 if (vseg[glob_id].mode & X_MODE_MASK) flags |= PTE_X; 508 if (vseg[glob_id].mode & W_MODE_MASK) flags |= PTE_W; 509 if (vseg[glob_id].mode & U_MODE_MASK) flags |= PTE_U; 510 511 // Flags set for optimization (as explained above) 512 513 flags |= PTE_L; 514 flags |= PTE_R; 515 flags |= PTE_D; 478 516 479 517 #if BOOT_DEBUG_PT 480 _puts(vseg[vseg_id].name); 481 _puts(" : flags = "); 482 _putx(flags); 483 _puts(" / npages = "); 484 _putd(npages); 485 _puts(" / pbase = "); 486 _putl(vseg[vseg_id].pbase); 487 _puts("\n"); 488 #endif 489 // loop on 4K pages 490 for (page_id = 0; page_id < npages; page_id++) 491 { 492 boot_add_pte(vspace_id, vpn, flags, ppn, verbose); 493 vpn++; 494 ppn++; 495 } 496 } 518 _puts(vseg[glob_id].name); 519 _puts(" : flags = "); 520 _putx(flags); 521 _puts(" / npages = "); 522 _putd(npages); 523 _puts(" / pbase = "); 524 _putl(vseg[glob_id].pbase); 525 _puts("\n"); 526 #endif 527 // loop on 4K pages 528 for (page_id = 0; page_id < npages; page_id++) 529 { 530 boot_add_pte(vspace_id, x_ptab, y_ptab, vpn, flags, ppn, verbose); 531 vpn++; 532 ppn++; 533 } 534 } 535 } // end internal loop on global vsegs 536 537 _puts("\n[BOOT] Page Table for vspace "); 538 _puts( vspace[vspace_id].name ); 539 _puts(" in cluster["); 540 _putd( x_ptab ); 541 _puts(","); 542 _putd( y_ptab ); 543 _puts("] completed at cycle "); 544 _putd( _get_proctime() ); 545 _puts("\n"); 546 547 #if BOOT_DEBUG_PT 548 _puts("vaddr = "); 549 _putx( _ptabs_vaddr[vspace_id] ); 550 _puts(" / paddr = "); 551 _putl( _ptabs_paddr[vspace_id][x_ptab][y_ptab] ); 552 _puts(" / PT2 number = "); 553 _putd( _ptabs_next_pt2[vspace_id][x_ptab][y_ptab] ); 554 _puts("\n"); 555 #endif 556 557 } // end if PTAB 558 } // end first loop on private vsegs 497 559 } // end boot_vspace_pt_build() 498 560 … … 517 579 // Set pbase for a vseg when identity mapping is required. 518 580 // The length of the vseg must be known. 519 // The ordered linked list of vsegs mapped on pseg must beupdated,520 // and overlap with previously mapped vsegs must bechecked.581 // The ordered linked list of vsegs mapped on pseg is updated, 582 // and overlap with previously mapped vsegs is checked. 521 583 /////////////////////////////////////////////////////////////////////////// 522 584 void boot_vseg_set_paddr_ident(mapping_vseg_t * vseg) … … 713 775 // It updates the pbase and the length fields of the vseg. 714 776 // It updates the pbase and vbase fields of all vobjs in the vseg. 715 // It updates the _ptabs_paddr[] and _ptabs_vaddr[] arrays. 777 // It updates the _ptabs_paddr[] and _ptabs_vaddr[], _ptabs_max_pt2[], 778 // and _ptabs_next_pt2[] arrays. 716 779 // It is a global vseg if vspace_id = (-1). 717 780 /////////////////////////////////////////////////////////////////////////// … … 727 790 mapping_vobj_t * vobj = _get_vobj_base(header); 728 791 729 // loop on the vobjs contained in vseg to compute792 // first loop on the vobjs contained in vseg to compute 730 793 // the vseg length, required for mapping. 731 794 cur_length = 0; … … 771 834 vobj[vobj_id].paddr = cur_paddr; 772 835 773 // initialize _ptabs_vaddr[] & boot_ptabs-paddr[] if PTAB836 // initialize _ptabs_vaddr[] , _ptabs-paddr[] , _ptabs_max_pt2[] if PTAB 774 837 if (vobj[vobj_id].type == VOBJ_TYPE_PTAB) 775 838 { … … 788 851 _exit(); 789 852 } 790 // register both physical and virtual page table address 791 _ptabs_vaddr[vspace_id] = vobj[vobj_id].vaddr; 792 _ptabs_paddr[vspace_id] = vobj[vobj_id].paddr; 853 // get cluster coordinates for PTAB 854 unsigned int cluster_xy = (unsigned int)(cur_paddr>>32); 855 unsigned int x = cluster_xy >> Y_WIDTH; 856 unsigned int y = cluster_xy & ((1<<Y_WIDTH)-1); 857 858 // register physical and virtual page table addresses, size, and next PT2 859 _ptabs_vaddr[vspace_id] = vobj[vobj_id].vaddr; 860 _ptabs_paddr[vspace_id][x][y] = vobj[vobj_id].paddr; 861 _ptabs_max_pt2[vspace_id][x][y] = (vobj[vobj_id].length - PT1_SIZE) / PT2_SIZE; 862 _ptabs_next_pt2[vspace_id][x][y] = 0; 793 863 794 864 // reset all valid bits in PT1 … … 797 867 _physical_write(cur_paddr + offset, 0); 798 868 } 799 800 // computing the number of second level pages801 _max_pt2[vspace_id] = (vobj[vobj_id].length - PT1_SIZE) / PT2_SIZE;802 869 } 803 870 … … 809 876 } // end boot_vseg_map() 810 877 811 ///////////////////////////////////////////////////////////////////// 878 /////////////////////////////////////////////////////////////////////////// 812 879 // This function builds the page tables for all virtual spaces 813 880 // defined in the mapping_info data structure, in three steps: … … 816 883 // - step 2 : It computes the physical base address for all private 817 884 // vsegs and all vobjs in each virtual space. 818 // - step 3 : It actually fill the page table for each vspace.885 // - step 3 : It actually fill the page table(s) for each vspace. 819 886 // 820 // Note: It must exist at least one vspace in the mapping_info... 821 ///////////////////////////////////////////////////////////////////// 887 // It must exist at least one vspace in the mapping. 888 // For each vspace, it can exist one page table per cluster. 889 /////////////////////////////////////////////////////////////////////////// 822 890 void boot_pt_init() 823 891 { … … 841 909 #endif 842 910 911 ////////////////////////////////// 843 912 // step 1 : loop on global vsegs 844 913 … … 857 926 } 858 927 928 //////////////////////////////////////////////////////////// 859 929 // step 2 : loop on virtual vspaces to map private vsegs 930 860 931 for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) 861 932 { … … 867 938 #endif 868 939 869 // vsegs with identity mapping constraint first870 940 for (vseg_id = vspace[vspace_id].vseg_offset; 871 941 vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); 872 942 vseg_id++) 873 943 { 874 if (vseg[vseg_id].ident == 1)875 boot_vseg_map(&vseg[vseg_id], vspace_id);876 }877 // unconstrained vsegs second878 for (vseg_id = vspace[vspace_id].vseg_offset;879 vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs);880 vseg_id++)881 {882 if (vseg[vseg_id].ident == 0) 883 944 // private vsegs cannot be identity mapping 945 if (vseg[vseg_id].ident != 0) 946 { 947 _puts("\n[BOOT ERROR] in boot_pt_init() : vspace "); 948 _puts( vspace[vspace_id].name ); 949 _puts(" contains vseg with identity mapping\n"); 950 _exit(); 951 } 952 953 boot_vseg_map(&vseg[vseg_id], vspace_id); 884 954 } 885 955 } … … 917 987 #endif 918 988 989 ///////////////////////////////////////////////////////////// 919 990 // step 3 : loop on the vspaces to build the page tables 920 991 for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) … … 929 1000 boot_vspace_pt_build(vspace_id); 930 1001 931 _puts("\n[BOOT] Page Table for vspace \"");932 _puts( vspace[vspace_id].name );933 _puts("\" completed at cycle ");934 _putd( _get_proctime() );935 _puts("\n");936 937 #if BOOT_DEBUG_PT938 _puts(" vaddr = ");939 _putx( _ptabs_vaddr[vspace_id] );940 _puts(" / paddr = ");941 _putl( _ptabs_paddr[vspace_id] );942 _puts(" / PT2 number = ");943 _putd( _max_pt2[vspace_id] );944 _puts("\n");945 #endif946 1002 } 947 1003 } // end boot_pt_init() … … 972 1028 #endif 973 1029 974 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id] >> 13) );1030 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id][0][0] >> 13) ); 975 1031 976 1032 unsigned int ptab_found = 0; … … 1224 1280 // This function initialises all processors schedulers. 1225 1281 // This is done by processor 0, and the MMU must be activated. 1226 // - In Step 1, it initialises the _schedulers[ gpid] pointers array, and scan1282 // - In Step 1, it initialises the _schedulers[] pointers array, and scan 1227 1283 // the processors to initialise the schedulers, including the 1228 1284 // idle_task context (ltid == 14) and HWI / SWI / PTI vectors. … … 1277 1333 // and the interrupt vectors. 1278 1334 // Implementation note: 1279 // We need to use both proc_id to scan the mapping info structure, 1280 // and lpid to access the schedulers array. 1281 // - the _schedulers[] array of pointers can contain "holes", because 1282 // it is indexed by the global pid = cluster_xy*NB_PROCS_MAX + lpid 1283 // - the mapping info array of processors is contiguous, it is indexed 1284 // by proc_id, and use an offset specific in each cluster. 1335 // We need to use both (proc_id) to scan the mapping info structure, 1336 // and (x,y,lpid) to access the schedulers array. 1285 1337 1286 1338 for (cluster_id = 0 ; cluster_id < X_SIZE*Y_SIZE ; cluster_id++) … … 1288 1340 unsigned int x = cluster[cluster_id].x; 1289 1341 unsigned int y = cluster[cluster_id].y; 1290 unsigned int cluster_xy = (x<<Y_WIDTH) + y;1291 1342 1292 1343 #if BOOT_DEBUG_SCHED … … 1371 1422 { 1372 1423 // set the schedulers pointers array 1373 _schedulers[cluster_xy * NB_PROCS_MAX + lpid] = 1374 (static_scheduler_t*)&psched[lpid]; 1424 _schedulers[x][y][lpid] = (static_scheduler_t*)&psched[lpid]; 1375 1425 1376 1426 #if BOOT_DEBUG_SCHED … … 1411 1461 psched[lpid].context[IDLE_TASK_INDEX][CTX_CR_ID] = 0; 1412 1462 psched[lpid].context[IDLE_TASK_INDEX][CTX_SR_ID] = 0xFF03; 1413 psched[lpid].context[IDLE_TASK_INDEX][CTX_PTPR_ID] = _ptabs_paddr[0] >>13;1463 psched[lpid].context[IDLE_TASK_INDEX][CTX_PTPR_ID] = _ptabs_paddr[0][x][y]>>13; 1414 1464 psched[lpid].context[IDLE_TASK_INDEX][CTX_PTAB_ID] = _ptabs_vaddr[0]; 1415 1465 psched[lpid].context[IDLE_TASK_INDEX][CTX_TTY_ID] = 0; … … 1495 1545 unsigned int y = cluster[cluster_id].y; 1496 1546 unsigned int cluster_xy = (x<<Y_WIDTH) + y; 1497 psched = _schedulers[ cluster_xy * NB_PROCS_MAX];1547 psched = _schedulers[x][y][0]; 1498 1548 1499 1549 // update WTI vector for scheduler[cluster_id][lpid] … … 1515 1565 unsigned int x = cluster[cluster_id].x; 1516 1566 unsigned int y = cluster[cluster_id].y; 1517 unsigned int cluster_xy = (x<<Y_WIDTH) + y; 1518 psched = _schedulers[cluster_xy * NB_PROCS_MAX]; 1567 psched = _schedulers[x][y][0]; 1519 1568 unsigned int slot; 1520 1569 unsigned int entry; … … 1582 1631 // We must set the PTPR depending on the vspace, because the start_vector 1583 1632 // and the stack address are defined in virtual space. 1584 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id] >> 13) );1633 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id][0][0] >> 13) ); 1585 1634 1586 1635 // loop on the tasks in vspace (task_id is the global index) … … 1604 1653 // compute gpid (global processor index) and scheduler base address 1605 1654 unsigned int gpid = cluster_xy * NB_PROCS_MAX + lpid; 1606 psched = _schedulers[ gpid];1655 psched = _schedulers[x][y][lpid]; 1607 1656 1608 1657 // ctx_sr : value required before an eret instruction … … 1610 1659 1611 1660 // ctx_ptpr : page table physical base address (shifted by 13 bit) 1612 unsigned int ctx_ptpr = (unsigned int)(_ptabs_paddr[vspace_id] >> 13);1661 unsigned int ctx_ptpr = (unsigned int)(_ptabs_paddr[vspace_id][x][y] >> 13); 1613 1662 1614 1663 // ctx_ptab : page_table virtual base address … … 1811 1860 void boot_mapping_init() 1812 1861 { 1862 // desactivates IOC interrupt 1813 1863 _ioc_init( 0 ); 1814 1864 1865 // open file "map.bin" 1815 1866 int fd_id = _fat_open( IOC_BOOT_MODE, 1816 1867 "map.bin", 1817 1868 0 ); // no creation 1818 1819 1869 if ( fd_id == -1 ) 1820 1870 { … … 1829 1879 #endif 1830 1880 1881 // get "map.bin" file size (from fat32) and check it 1831 1882 unsigned int size = fat.fd[fd_id].file_size; 1883 1884 if ( size > SEG_BOOT_MAPPING_SIZE ) 1885 { 1886 _puts("\n[BOOT ERROR] : allocated segment too small for map.bin file\n"); 1887 _exit(); 1888 } 1889 1890 // load "map.bin" file into buffer 1832 1891 unsigned int nblocks = size >> 9; 1833 1892 unsigned int offset = size & 0x1FF; … … 1846 1905 _fat_close( fd_id ); 1847 1906 1907 // close file "map.bin" 1848 1908 boot_mapping_check(); 1909 1849 1910 } // end boot_mapping_init() 1850 1911 1851 1912 1852 ////////////////////////////////////////////////////////////////////////////////// 1853 // This function open the .elf file identified by the "pathname" argument. 1854 // It loads the complete file in a dedicated buffer, it copies all loadable 1855 // segments at the memory virtual address defined in the .elf file, 1856 // and close the file. 1857 // Notes: 1858 // - The processor PTPR should contain the value corresponding to the 1859 // vspace containing the .elf file. 1860 // - As this function requires a temporary memory buffer 1861 // to load the complete .elf file before to copy the various segments 1862 // to te proper location, it uses the seg_boot_buffer defined in map.xml. 1863 ////////////////////////////////////////////////////////////////////////////////// 1864 void load_one_elf_file( unsigned int mode, 1913 ///////////////////////////////////////////////////////////////////////////////////// 1914 // This function load all loadable segments for one .elf file, identified 1915 // by the "pathname" argument. Some loadable segments can be copied in several 1916 // clusters: same virtual address but different physical addresses. 1917 // - It open the file. 1918 // - It loads the complete file in a dedicated buffer (seg_boot_buffer). 1919 // - It copies each loadable segments at the virtual address defined in the .elf 1920 // file, making several copies if the target vseg is not local. 1921 // - It closes the file. 1922 // Note: 1923 // - This function is supposed to be executed by processor[0,0,0]. 1924 // We must use physical addresses to reach the destination buffers that 1925 // can be located in remote clusters. We use either a _physical_memcpy(), 1926 // or a _dma_physical_copy() if DMA is available. 1927 // The source seg_boot_buffer must be identity mapping. 1928 ////////////////////////////////////////////////////////////////////////////////////// 1929 void load_one_elf_file( unsigned int is_kernel, // kernel file if non zero 1865 1930 char* pathname, 1866 unsigned int vspace_id ) // to use the proper page_table1931 unsigned int vspace_id ) // to scan the proper vspace 1867 1932 { 1933 mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 1934 mapping_vspace_t * vspace = _get_vspace_base(header); 1935 mapping_vseg_t * vseg = _get_vseg_base(header); 1936 mapping_vobj_t * vobj = _get_vobj_base(header); 1937 1868 1938 unsigned int seg_id; 1869 1939 … … 1881 1951 1882 1952 // open .elf file 1883 int fd_id = _fat_open( mode,1953 int fd_id = _fat_open( IOC_BOOT_MODE, 1884 1954 pathname, 1885 1955 0 ); // no creation … … 1907 1977 1908 1978 // load file in boot_buffer 1909 if( _fat_read( mode,1979 if( _fat_read( IOC_BOOT_MODE, 1910 1980 fd_id, 1911 1981 boot_buffer, … … 1947 2017 unsigned int nsegments = elf_header_ptr->e_phnum; 1948 2018 1949 #if BOOT_DEBUG_ELF 1950 _puts("\n[BOOT DEBUG] File "); 1951 _puts( pathname ); 1952 _puts(" loaded at cycle "); 1953 _putd( _get_proctime() ); 1954 _puts(" / bytes = "); 1955 _putd( nbytes ); 1956 _puts(" / sectors = "); 1957 _putd( nsectors ); 1958 _puts("\n"); 1959 #endif 1960 1961 // Loop on loadable segments in the ELF file 2019 _puts("\n[BOOT] File "); 2020 _puts( pathname ); 2021 _puts(" loaded at cycle "); 2022 _putd( _get_proctime() ); 2023 _puts("\n"); 2024 2025 // Loop on loadable segments in the .elf file 1962 2026 for (seg_id = 0 ; seg_id < nsegments ; seg_id++) 1963 2027 { … … 1970 2034 unsigned int seg_memsz = elf_pht_ptr[seg_id].p_memsz; 1971 2035 2036 #if BOOT_DEBUG_ELF 2037 _puts(" - segment "); 2038 _putd( seg_id ); 2039 _puts(" / vaddr = "); 2040 _putx( seg_vaddr ); 2041 _puts(" / file_size = "); 2042 _putx( seg_filesz ); 2043 _puts("\n"); 2044 #endif 2045 1972 2046 if( seg_memsz < seg_filesz ) 1973 2047 { … … 1976 2050 _puts(" in file "); 1977 2051 _puts( pathname ); 1978 _puts(" has a wrong size\n");2052 _puts(" has a memsz < filesz \n"); 1979 2053 _exit(); 1980 2054 } … … 1989 2063 unsigned int src_vaddr = (unsigned int)boot_buffer + seg_offset; 1990 2064 1991 #if BOOT_DEBUG_ELF 1992 _puts(" - segment "); 1993 _putd( seg_id ); 1994 _puts(" / dst_vaddr = "); 1995 _putx( seg_vaddr ); 1996 _puts(" / src_vaddr = "); 1997 _putx( src_vaddr ); 1998 _puts(" / size = "); 1999 _putx( seg_filesz ); 2000 _puts("\n"); 2001 #endif 2002 2003 // copy the segment from boot buffer to destination buffer 2004 if( NB_DMA_CHANNELS > 0 ) 2005 { 2006 _dma_copy( 0, // DMA cluster index 2007 0, // DMA channel index 2008 vspace_id, 2009 seg_vaddr, 2010 src_vaddr, 2011 seg_filesz ); 2065 // search all vsegs matching the virtual address 2066 unsigned int vseg_first; 2067 unsigned int vseg_last; 2068 unsigned int vseg_id; 2069 unsigned int found = 0; 2070 if ( is_kernel ) 2071 { 2072 vseg_first = 0; 2073 vseg_last = header->globals; 2012 2074 } 2013 2075 else 2014 2076 { 2015 memcpy( (char*)seg_vaddr, 2016 (char*)src_vaddr, 2017 seg_filesz ); 2077 vseg_first = vspace[vspace_id].vseg_offset; 2078 vseg_last = vseg_first + vspace[vspace_id].vsegs; 2079 } 2080 2081 for ( vseg_id = vseg_first ; vseg_id < vseg_last ; vseg_id++ ) 2082 { 2083 if ( seg_vaddr == vseg[vseg_id].vbase ) // matching 2084 { 2085 found = 1; 2086 2087 // get destination buffer physical address and size 2088 paddr_t seg_paddr = vseg[vseg_id].pbase; 2089 unsigned int vobj_id = vseg[vseg_id].vobj_offset; 2090 unsigned int seg_size = vobj[vobj_id].length; 2091 2092 #if BOOT_DEBUG_ELF 2093 _puts(" loaded into vseg "); 2094 _puts( vseg[vseg_id].name ); 2095 _puts(" at paddr = "); 2096 _putl( seg_paddr ); 2097 _puts(" (buffer size = "); 2098 _putx( seg_size ); 2099 _puts(")\n"); 2100 #endif 2101 // check vseg size 2102 if ( seg_size < seg_filesz ) 2103 { 2104 _puts("\n[BOOT ERROR] in load_one_elf_file()\n"); 2105 _puts("vseg "); 2106 _puts( vseg[vseg_id].name ); 2107 _puts(" is to small for loadable segment "); 2108 _putx( seg_vaddr ); 2109 _puts(" in file "); 2110 _puts( pathname ); 2111 _puts(" \n"); 2112 _exit(); 2113 } 2114 2115 // copy the segment from boot buffer to destination buffer 2116 // using DMA channel[0,0,0] if it is available. 2117 if( NB_DMA_CHANNELS > 0 ) 2118 { 2119 _dma_physical_copy( 0, // DMA in cluster[0,0] 2120 0, // DMA channel 0 2121 (paddr_t)seg_paddr, // destination paddr 2122 (paddr_t)src_vaddr, // source paddr 2123 seg_filesz ); // size 2124 } 2125 else 2126 { 2127 _physical_memcpy( (paddr_t)seg_paddr, // destination paddr 2128 (paddr_t)src_vaddr, // source paddr 2129 seg_filesz ); // size 2130 } 2131 } 2132 } // end for vsegs in vspace 2133 2134 // check at least one matching vseg 2135 if ( found == 0 ) 2136 { 2137 _puts("\n[BOOT ERROR] in load_one_elf_file()\n"); 2138 _puts("vseg for loadable segment "); 2139 _putx( seg_vaddr ); 2140 _puts(" in file "); 2141 _puts( pathname ); 2142 _puts(" not found \n"); 2143 _exit(); 2018 2144 } 2019 2145 } 2020 } // end forsegments2146 } // end for loadable segments 2021 2147 2022 2148 // close .elf file … … 2026 2152 2027 2153 2028 ///// /////////////////////////////////////////////////////////////////////////////2154 /////i//////////////////////////////////////////////////////////////////////////////// 2029 2155 // This function uses the map.bin data structure to load the "kernel.elf" file 2030 // as well as the various "application.elf" files .2031 // The "preloader.elf" file is not loaded, because it has been burned in the ROM.2032 // The "boot.elf" file is not loaded, because it has been loaded by the preloader.2156 // as well as the various "application.elf" files into memory. 2157 // - The "preloader.elf" file is not loaded, because it has been burned in the ROM. 2158 // - The "boot.elf" file is not loaded, because it has been loaded by the preloader. 2033 2159 // This function scans all vobjs defined in the map.bin data structure to collect 2034 // all .elf files pathnames, and calls the load_one_elf_file() function to 2035 // load all loadable segments at the virtual address found in the .elf file. 2036 ////////////////////////////////////////////////////////////////////////////////// 2160 // all .elf files pathnames, and calls the load_one_elf_file() for each .elf file. 2161 // As the code can be replicated in several vsegs, the same code can be copied 2162 // in one or several clusters by the load_one_elf_file() function. 2163 ////////////////////////////////////////////////////////////////////////////////////// 2037 2164 void boot_elf_load() 2038 2165 { … … 2063 2190 } 2064 2191 2065 load_one_elf_file( IOC_BOOT_MODE, 2066 vobj[vobj_id].binpath, 2192 // Load the kernel 2193 load_one_elf_file( 1, // kernel file 2194 vobj[vobj_id].binpath, // file pathname 2067 2195 0 ); // vspace 0 2068 2196 2069 _puts("\n[BOOT] File \""); 2070 _puts( vobj[vobj_id].binpath ); 2071 _puts("\" loaded at cycle "); 2072 _putd( _get_proctime() ); 2073 _puts("\n"); 2074 2075 // loop on the vspaces, scanning all vobjs in a vspace, 2197 // loop on the vspaces, scanning all vobjs in the vspace, 2076 2198 // to find the pathname of the .elf file associated to the vspace. 2077 2199 for( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) 2078 2200 { 2079 // Set PTPR depending on the vspace, as seg_data is defined in virtual space.2080 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id] >> 13) );2081 2082 2201 // loop on the vobjs in vspace (vobj_id is the global index) 2083 2202 unsigned int found = 0; … … 2102 2221 } 2103 2222 2104 load_one_elf_file( IOC_BOOT_MODE, 2105 vobj[vobj_id].binpath, 2106 vspace_id ); 2107 2108 _puts("\n[BOOT] File \""); 2109 _puts( vobj[vobj_id].binpath ); 2110 _puts("\" loaded at cycle "); 2111 _putd( _get_proctime() ); 2112 _puts("\n"); 2223 load_one_elf_file( 0, // not a kernel file 2224 vobj[vobj_id].binpath, // file pathname 2225 vspace_id ); // vspace index 2113 2226 2114 2227 } // end for vspaces 2115 2116 // restaure vspace 0 PTPR2117 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0] >> 13) );2118 2228 2119 2229 } // end boot_elf_load() … … 2161 2271 { 2162 2272 unsigned int type = periph[periph_id].type; 2273 unsigned int subtype = periph[periph_id].subtype; 2163 2274 unsigned int channels = periph[periph_id].channels; 2164 2275 … … 2167 2278 case PERIPH_TYPE_IOC: // vci_block_device component 2168 2279 { 2169 // initialize all channels except channel 0 because it has been 2170 // initialized by the preloader. 2171 for (channel_id = 1; channel_id < channels; channel_id++) 2280 if ( subtype == PERIPH_SUBTYPE_BDV ) 2172 2281 { 2173 _ioc_init( channel_id ); 2282 _bdv_lock.value = 0; 2283 #if BOOT_DEBUG_PERI 2284 _puts("- BDV : channels = "); 2285 _putd(channels); 2286 _puts("\n"); 2287 #endif 2174 2288 } 2175 #if BOOT_DEBUG_PERI 2176 _puts("- IOC : channels = "); 2177 _putd(channels); 2178 _puts("\n"); 2179 #endif 2289 else if ( subtype == PERIPH_SUBTYPE_HBA ) 2290 { 2291 // TODO 2292 } 2293 else if ( subtype == PERIPH_SUBTYPE_SPI ) 2294 { 2295 // TODO 2296 } 2180 2297 break; 2181 2298 } … … 2208 2325 case PERIPH_TYPE_TTY: // vci_multi_tty component 2209 2326 { 2210 // nothing to do 2327 for (channel_id = 0; channel_id < channels; channel_id++) 2328 { 2329 _tty_lock[channel_id].value = 0; 2330 _tty_rx_full[channel_id] = 0; 2331 } 2211 2332 #if BOOT_DEBUG_PERI 2212 2333 _puts("- TTY : channels = "); … … 2218 2339 case PERIPH_TYPE_IOB: // vci_io_bridge component 2219 2340 { 2220 #if BOOT_DEBUG_PERI2221 _puts("- IOB : channels = ");2222 _putd(channels);2223 _puts("\n");2224 #endif2225 2341 if (GIET_USE_IOMMU) 2226 2342 { … … 2236 2352 case PERIPH_TYPE_PIC: // vci_iopic component 2237 2353 { 2238 2239 2354 #if BOOT_DEBUG_PERI 2240 2355 _puts("- PIC : channels = "); … … 2329 2444 // Most of this code is executed by Processor 0 only. 2330 2445 ///////////////////////////////////////////////////////////////////////// 2331 void boot_init() 2446 void boot_init() 2332 2447 { 2333 2448 mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; … … 2341 2456 _puts("\n"); 2342 2457 2343 // Load ing the map.bin file into memory and checkingit2458 // Load the map.bin file into memory and check it 2344 2459 boot_mapping_init(); 2345 2460 … … 2350 2465 _puts("\n"); 2351 2466 2352 // Build ing allpage tables2467 // Build page tables 2353 2468 boot_pt_init(); 2354 2469 2355 // Activat ing proc 0 MMU2356 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0] >>13) );2470 // Activate MMU for proc [0,0,0] 2471 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0][0][0]>>13) ); 2357 2472 _set_mmu_mode( 0xF ); 2358 2473 … … 2361 2476 _puts("\n"); 2362 2477 2363 // Initialis ingprivate vobjs in vspaces2478 // Initialise private vobjs in vspaces 2364 2479 boot_vobjs_init(); 2365 2480 … … 2368 2483 _puts("\n"); 2369 2484 2370 // Initiali zingschedulers2485 // Initialise schedulers 2371 2486 boot_schedulers_init(); 2372 2487 … … 2375 2490 _puts("\n"); 2376 2491 2377 // Set ting CP0_SCHED register for proc 02378 _set_sched( (unsigned int)_schedulers[0] );2379 2380 // Initiali zingnon replicated peripherals2492 // Set CP0_SCHED register for proc [0,0,0] 2493 _set_sched( (unsigned int)_schedulers[0][0][0] ); 2494 2495 // Initialise non replicated peripherals 2381 2496 boot_peripherals_init(); 2382 2497 … … 2394 2509 { 2395 2510 unsigned int nprocs = cluster[clusterid].procs; 2396 unsigned int x dest= cluster[clusterid].x;2397 unsigned int y dest= cluster[clusterid].y;2398 unsigned int cluster_xy = (x dest<<Y_WIDTH) + ydest;2511 unsigned int x = cluster[clusterid].x; 2512 unsigned int y = cluster[clusterid].y; 2513 unsigned int cluster_xy = (x<<Y_WIDTH) + y; 2399 2514 2400 2515 for ( p = 0 ; p < nprocs; p++ ) … … 2414 2529 2415 2530 // all processor initialise SCHED register 2416 _set_sched( (unsigned int)_schedulers[gpid] ); 2417 2418 // all processors (but Proc 0) activate MMU 2531 unsigned int cluster_xy = gpid / NB_PROCS_MAX; 2532 unsigned int lpid = gpid % NB_PROCS_MAX; 2533 unsigned int x = cluster_xy >> Y_WIDTH; 2534 unsigned int y = cluster_xy & ((1<<Y_WIDTH)-1); 2535 _set_sched( (unsigned int)_schedulers[x][y][lpid] ); 2536 2537 // all processors (but Proc[0,0,0]) activate MMU 2419 2538 if ( gpid != 0 ) 2420 2539 { 2421 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0] >>13) );2540 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0][x][y]>>13) ); 2422 2541 _set_mmu_mode( 0xF ); 2423 2542 }
Note: See TracChangeset
for help on using the changeset viewer.