Changeset 757 for soft/giet_vm/giet_boot/boot.c
- Timestamp:
- Jan 18, 2016, 4:39:47 PM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/giet_boot/boot.c
r742 r757 133 133 // Temporaty buffer used to load one complete .elf file 134 134 __attribute__((section(".kdata"))) 135 unsigned char _boot_elf_buffer[GIET_ELF_BUFFER_SIZE] __attribute__((aligned(64)));135 unsigned char _boot_elf_buffer[GIET_ELF_BUFFER_SIZE] __attribute__((aligned(64))); 136 136 137 137 // Physical memory allocators array (one per cluster) 138 138 __attribute__((section(".kdata"))) 139 pmem_alloc_t boot_pmem_alloc[X_SIZE][Y_SIZE];139 pmem_alloc_t _boot_pmem_alloc[X_SIZE][Y_SIZE]; 140 140 141 141 // Schedulers virtual base addresses array (one per processor) … … 149 149 // Page tables physical base addresses (one per vspace and per cluster) 150 150 __attribute__((section(".kdata"))) 151 paddr_t_ptabs_paddr[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];151 unsigned long long _ptabs_paddr[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE]; 152 152 153 153 // Page tables pt2 allocators (one per vspace and per cluster) … … 191 191 192 192 extern void boot_entry(); 193 194 //////////////////////////////////////////////////////////////////////////////195 // This function registers a new PTE1 in the page table defined196 // by the vspace_id argument, and the (x,y) coordinates.197 // It updates only the first level PT1.198 // As each vseg is mapped by a different processor, the PT1 entry cannot199 // be concurrently accessed, and we don't need to take any lock.200 //201 // Implementation note:202 // This function checks that the PT1 entry is not already mapped,203 // to enforce the rule: only one vseg in a given BPP.204 // The 4 vsegs used by the boot code being packed in one single BPP,205 // this verif is not done for all identity mapping vsegs.206 //////////////////////////////////////////////////////////////////////////////207 void boot_add_pte1( unsigned int vspace_id,208 unsigned int x,209 unsigned int y,210 unsigned int vpn, // 20 bits right-justified211 unsigned int flags, // 10 bits left-justified212 unsigned int ppn, // 28 bits right-justified213 unsigned int ident ) // identity mapping if non zero214 {215 unsigned int pte1; // PTE1 value216 paddr_t paddr; // PTE1 physical address217 218 // compute index in PT1219 unsigned int ix1 = vpn >> 9; // 11 bits for ix1220 221 // get PT1 physical base address222 paddr_t pt1_base = _ptabs_paddr[vspace_id][x][y];223 224 if ( pt1_base == 0 )225 {226 _printf("\n[BOOT ERROR] in boot_add_pte1() : no PTAB in cluster[%d,%d]"227 " containing processors\n", x , y );228 _exit();229 }230 231 // compute pte1 physical address232 paddr = pt1_base + 4*ix1;233 234 // check PTE1 not already mapped235 if ( ident == 0 )236 {237 if ( _physical_read( paddr ) & PTE_V )238 {239 _printf("\n[BOOT ERROR] in boot_add_pte1() : vpn %x already mapped "240 "in PTAB[%d,%d] for vspace %d\n", vpn , x , y , vspace_id );241 _exit();242 }243 }244 245 // compute pte1 : 2 bits V T / 8 bits flags / 3 bits RSVD / 19 bits bppi246 pte1 = PTE_V | (flags & 0x3FC00000) | ((ppn>>9) & 0x0007FFFF);247 248 // write pte1 in PT1249 _physical_write( paddr , pte1 );250 251 asm volatile ("sync");252 253 } // end boot_add_pte1()254 255 //////////////////////////////////////////////////////////////////////////////256 // This function registers a new PTE2 in the page table defined257 // by the vspace_id argument, and the (x,y) coordinates.258 // It updates both the first level PT1 and the second level PT2.259 // As the set of PT2s is implemented as a fixed size array (no dynamic260 // allocation), this function checks a possible overflow of the PT2 array.261 // As a given entry in PT1 can be shared by several vsegs, mapped by262 // different processors, we need to take the lock protecting PTAB[v][x][y].263 //////////////////////////////////////////////////////////////////////////////264 void boot_add_pte2( unsigned int vspace_id,265 unsigned int x,266 unsigned int y,267 unsigned int vpn, // 20 bits right-justified268 unsigned int flags, // 10 bits left-justified269 unsigned int ppn, // 28 bits right-justified270 unsigned int ident ) // identity mapping if non zero271 {272 unsigned int ix1;273 unsigned int ix2;274 paddr_t pt2_pbase; // PT2 physical base address275 paddr_t pte2_paddr; // PTE2 physical address276 unsigned int pt2_id; // PT2 index277 unsigned int ptd; // PTD : entry in PT1278 279 ix1 = vpn >> 9; // 11 bits for ix1280 ix2 = vpn & 0x1FF; // 9 bits for ix2281 282 // get page table physical base address283 paddr_t pt1_pbase = _ptabs_paddr[vspace_id][x][y];284 285 if ( pt1_pbase == 0 )286 {287 _printf("\n[BOOT ERROR] in boot_add_pte2() : no PTAB for vspace %d "288 "in cluster[%d,%d]\n", vspace_id , x , y );289 _exit();290 }291 292 // get lock protecting PTAB[vspace_id][x][y]293 _spin_lock_acquire( &_ptabs_spin_lock[vspace_id][x][y] );294 295 // get ptd in PT1296 ptd = _physical_read( pt1_pbase + 4 * ix1 );297 298 if ((ptd & PTE_V) == 0) // undefined PTD: compute PT2 base address,299 // and set a new PTD in PT1300 {301 // get a new pt2_id302 pt2_id = _ptabs_next_pt2[vspace_id][x][y];303 _ptabs_next_pt2[vspace_id][x][y] = pt2_id + 1;304 305 // check overflow306 if (pt2_id == _ptabs_max_pt2)307 {308 _printf("\n[BOOT ERROR] in boot_add_pte2() : PTAB[%d,%d,%d]"309 " contains not enough PT2s\n", vspace_id, x, y );310 _exit();311 }312 313 pt2_pbase = pt1_pbase + PT1_SIZE + PT2_SIZE * pt2_id;314 ptd = PTE_V | PTE_T | (unsigned int) (pt2_pbase >> 12);315 316 // set PTD into PT1317 _physical_write( pt1_pbase + 4*ix1, ptd);318 }319 else // valid PTD: compute PT2 base address320 {321 pt2_pbase = ((paddr_t)(ptd & 0x0FFFFFFF)) << 12;322 }323 324 // set PTE in PT2 : flags & PPN in two 32 bits words325 pte2_paddr = pt2_pbase + 8 * ix2;326 _physical_write(pte2_paddr , (PTE_V | flags) );327 _physical_write(pte2_paddr + 4 , ppn );328 329 // release lock protecting PTAB[vspace_id][x][y]330 _spin_lock_release( &_ptabs_spin_lock[vspace_id][x][y] );331 332 asm volatile ("sync");333 334 } // end boot_add_pte2()335 193 336 194 //////////////////////////////////////////////////////////////////////////////////// … … 442 300 { 443 301 // compute pointer on physical memory allocator in dest cluster 444 pmem_alloc_t* palloc = & boot_pmem_alloc[x_dest][y_dest];302 pmem_alloc_t* palloc = &_boot_pmem_alloc[x_dest][y_dest]; 445 303 446 304 if ( big == 0 ) // allocate contiguous SPPs … … 480 338 offset = 0; 481 339 482 // each PTAB must be aligned on a 8 Kbytes boundary340 // compute max_pt2: each PTAB must be aligned on a 8 Kbytes boundary 483 341 nsp = ( vseg->length >> 12 ) / nspaces; 484 342 if ( (nsp & 0x1) == 0x1 ) nsp = nsp - 1; 485 486 // compute max_pt2487 343 _ptabs_max_pt2 = ((nsp<<12) - PT1_SIZE) / PT2_SIZE; 344 345 // save max_pt2 in header 346 header->max_pt2 = _ptabs_max_pt2; 488 347 489 348 for ( vs = 0 ; vs < nspaces ; vs++ ) … … 586 445 if ( big ) // big pages => PTE1s 587 446 { 588 boot_add_pte1( vsid,447 _v2p_add_pte1( vsid, 589 448 x_dest, 590 449 y_dest, … … 596 455 else // small pages => PTE2s 597 456 { 598 boot_add_pte2( vsid,457 _v2p_add_pte2( vsid, 599 458 x_dest, 600 459 y_dest, … … 615 474 if ( big ) // big pages => PTE1s 616 475 { 617 boot_add_pte1( vsid,476 _v2p_add_pte1( vsid, 618 477 x, 619 478 y, … … 625 484 else // small pages => PTE2s 626 485 { 627 boot_add_pte2( vsid,486 _v2p_add_pte2( vsid, 628 487 x, 629 488 y, … … 643 502 if ( big ) // big pages => PTE1s 644 503 { 645 boot_add_pte1( v,504 _v2p_add_pte1( v, 646 505 x_dest, 647 506 y_dest, … … 653 512 else // small pages = PTE2s 654 513 { 655 boot_add_pte2( v,514 _v2p_add_pte2( v, 656 515 x_dest, 657 516 y_dest, … … 675 534 if ( big ) // big pages => PTE1s 676 535 { 677 boot_add_pte1( v,536 _v2p_add_pte1( v, 678 537 x, 679 538 y, … … 685 544 else // small pages -> PTE2s 686 545 { 687 boot_add_pte2( v,546 _v2p_add_pte2( v, 688 547 x, 689 548 y, … … 830 689 vseg_id++) 831 690 { 832 pseg = _get_pseg_base(header) + vseg[vseg_id].psegid; 833 cluster = _get_cluster_base(header) + pseg->clusterid; 834 if ( (cluster->x == cx) && (cluster->y == cy) ) 835 { 836 boot_vseg_map( &vseg[vseg_id], vspace_id ); 837 boot_vseg_pte( &vseg[vseg_id], vspace_id ); 691 if ( vseg[vseg_id].type == VSEG_TYPE_MMAP ) // no static mapping 692 { 693 // psegid used as page allocator in MMAP vseg 694 vseg[vseg_id].psegid = 0; 695 } 696 else // static mapping 697 { 698 pseg = _get_pseg_base(header) + vseg[vseg_id].psegid; 699 cluster = _get_cluster_base(header) + pseg->clusterid; 700 if ( (cluster->x == cx) && (cluster->y == cy) ) 701 { 702 boot_vseg_map( &vseg[vseg_id], vspace_id ); 703 boot_vseg_pte( &vseg[vseg_id], vspace_id ); 704 } 838 705 } 839 706 } … … 1074 941 psched->context[IDLE_THREAD_INDEX].slot[CTX_PTPR_ID] = _ptabs_paddr[0][x][y]>>13; 1075 942 psched->context[IDLE_THREAD_INDEX].slot[CTX_PTAB_ID] = _ptabs_vaddr[0][x][y]; 943 psched->context[IDLE_THREAD_INDEX].slot[CTX_NPT2_ID] = _ptabs_next_pt2[0][x][y]; 1076 944 psched->context[IDLE_THREAD_INDEX].slot[CTX_TTY_ID] = 0; 1077 945 psched->context[IDLE_THREAD_INDEX].slot[CTX_LTID_ID] = IDLE_THREAD_INDEX; … … 1245 1113 unsigned int ctx_ptab = _ptabs_vaddr[vspace_id][req_x][req_y]; 1246 1114 1115 // ctx_npt2 : page_table PT2 allocator 1116 unsigned int ctx_npt2 = _ptabs_next_pt2[vspace_id][req_x][req_y]; 1117 1247 1118 // ctx_entry : Get the virtual address of the memory location containing 1248 1119 // the thread entry point : the start_vector is stored by GCC in the … … 1282 1153 psched->context[ltid].slot[CTX_PTPR_ID] = ctx_ptpr; 1283 1154 psched->context[ltid].slot[CTX_PTAB_ID] = ctx_ptab; 1155 psched->context[ltid].slot[CTX_NPT2_ID] = ctx_npt2; 1284 1156 psched->context[ltid].slot[CTX_LTID_ID] = ltid; 1285 1157 psched->context[ltid].slot[CTX_TRDID_ID] = ctx_trdid; … … 1310 1182 " - ctx[PTPR] = %x\n" 1311 1183 " - ctx[PTAB] = %x\n" 1184 " - ctx[NPT2] = %x\n" 1312 1185 " - ctx[VSID] = %d\n" 1313 1186 " - ctx[NORUN] = %x\n" … … 1323 1196 psched->context[ltid].slot[CTX_PTPR_ID], 1324 1197 psched->context[ltid].slot[CTX_PTAB_ID], 1198 psched->context[ltid].slot[CTX_NPT2_ID], 1325 1199 psched->context[ltid].slot[CTX_VSID_ID], 1326 1200 psched->context[ltid].slot[CTX_NORUN_ID], … … 1758 1632 unsigned int pseg_min = cluster[cluster_id].pseg_offset; 1759 1633 unsigned int pseg_max = pseg_min + cluster[cluster_id].psegs; 1634 1760 1635 for ( pseg_id = pseg_min ; pseg_id < pseg_max ; pseg_id++ ) 1761 1636 { … … 1831 1706 mapping_cluster_t* cluster = _get_cluster_base(header); 1832 1707 1833 _printf("\n[BOOT] Mapping %s loadedat cycle %d\n",1708 _printf("\n[BOOT] Mapping %s at cycle %d\n", 1834 1709 header->name , _get_proctime() ); 1835 1710
Note: See TracChangeset
for help on using the changeset viewer.