Changeset 732
- Timestamp:
- Dec 3, 2015, 4:27:38 PM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/giet_boot/boot.c
r726 r732 31 31 // - the various "application.elf" files. 32 32 // 33 // 2) The GIET-VM uses the paged virtual memory to provide stwo services:33 // 2) The GIET-VM uses the paged virtual memory to provide two services: 34 34 // - classical memory protection, when several independant applications compiled 35 35 // in different virtual spaces are executing on the same hardware platform. … … 198 198 // As each vseg is mapped by a different processor, the PT1 entry cannot 199 199 // be concurrently accessed, and we don't need to take any lock. 200 // 201 // Implementation note: 202 // This function checks that the PT1 entry is not already mapped, 203 // to enforce the rule: only one vseg in a given BPP. 204 // The 4 vsegs used by the boot code being packed in one single BPP, 205 // this verif is not done for all identity mapping vsegs. 200 206 ////////////////////////////////////////////////////////////////////////////// 201 207 void boot_add_pte1( unsigned int vspace_id, … … 204 210 unsigned int vpn, // 20 bits right-justified 205 211 unsigned int flags, // 10 bits left-justified 206 unsigned int ppn ) // 28 bits right-justified 212 unsigned int ppn, // 28 bits right-justified 213 unsigned int ident ) // identity mapping if non zero 207 214 { 215 unsigned int pte1; // PTE1 value 216 paddr_t paddr; // PTE1 physical address 217 208 218 // compute index in PT1 209 219 unsigned int ix1 = vpn >> 9; // 11 bits for ix1 210 220 211 // get page tablephysical base address212 paddr_t pt1_ pbase = _ptabs_paddr[vspace_id][x][y];213 214 if ( pt1_ pbase == 0 )221 // get PT1 physical base address 222 paddr_t pt1_base = _ptabs_paddr[vspace_id][x][y]; 223 224 if ( pt1_base == 0 ) 215 225 { 216 226 _printf("\n[BOOT ERROR] in boot_add_pte1() : no PTAB in cluster[%d,%d]" … … 219 229 } 220 230 231 // compute pte1 physical address 232 paddr = pt1_base + 4*ix1; 233 234 // check PTE1 not already mapped 235 if ( ident == 0 ) 236 { 237 if ( _physical_read( paddr ) & PTE_V ) 238 { 239 _printf("\n[BOOT ERROR] in boot_add_pte1() : vpn %x already mapped " 240 "in PTAB[%d,%d] for vspace %d\n", vpn , x , y , vspace_id ); 241 _exit(); 242 } 243 } 244 221 245 // compute pte1 : 2 bits V T / 8 bits flags / 3 bits RSVD / 19 bits bppi 222 unsigned int pte1 = PTE_V | 223 (flags & 0x3FC00000) | 224 ((ppn>>9) & 0x0007FFFF); 246 pte1 = PTE_V | (flags & 0x3FC00000) | ((ppn>>9) & 0x0007FFFF); 225 247 226 248 // write pte1 in PT1 227 _physical_write( p t1_pbase + 4*ix1, pte1 );249 _physical_write( paddr , pte1 ); 228 250 229 251 asm volatile ("sync"); … … 238 260 // allocation), this function checks a possible overflow of the PT2 array. 239 261 // As a given entry in PT1 can be shared by several vsegs, mapped by 240 // different processors, we need to take the lock protecting PTAB[v][x] y].262 // different processors, we need to take the lock protecting PTAB[v][x][y]. 241 263 ////////////////////////////////////////////////////////////////////////////// 242 264 void boot_add_pte2( unsigned int vspace_id, … … 245 267 unsigned int vpn, // 20 bits right-justified 246 268 unsigned int flags, // 10 bits left-justified 247 unsigned int ppn ) // 28 bits right-justified 269 unsigned int ppn, // 28 bits right-justified 270 unsigned int ident ) // identity mapping if non zero 248 271 { 249 272 unsigned int ix1; … … 331 354 // 332 355 // A given vseg can be mapped in a Big Physical Pages (BPP: 2 Mbytes) or in a 333 // Small Physical Pages (SPP: 4 Kbytes), depending on the "big" attribute of vseg ,334 // with the following rules:335 // - SPP : There is only one vseg in a small physical page, but a single vseg336 // can cover several contiguous small physical pages.337 // - BPP : It can exist several vsegs in a single big physical page, and a single338 // vseg can cover several contiguous big physical pages.356 // Small Physical Pages (SPP: 4 Kbytes), depending on the "big" attribute of vseg. 357 // 358 // All boot vsegs are packed in a single BPP (2 Mbytes). For all other vsegs, 359 // there is only one vseg in a given page (BPP or SPP), but a single vseg can 360 // cover several contiguous physical pages. 361 // Only the vsegs used by the boot code can be identity mapping. 339 362 // 340 363 // 1) First step: it computes various vseg attributes and checks … … 343 366 // 2) Second step: it allocates the required number of contiguous physical pages, 344 367 // computes the physical base address (if the vseg is not identity mapping), 345 // and register it in the vseg pbase field. 346 // Only the vsegs used by the boot code and the peripheral vsegs 347 // can be identity mapping. The first big physical page in cluster[0,0] 348 // is reserved for the boot vsegs. 368 // register it in the vseg pbase field, and update the page table(s). 349 369 // 350 // 3) Third step (only for vseg that have the VSEG_TYPE_PTAB): the M page tables351 // associated to the M vspaces must be packed in the same vseg.370 // 3) Third step (only for vseg that have the VSEG_TYPE_PTAB): for a given cluster, 371 // the M page tables associated to the M vspaces are packed in the same vseg. 352 372 // We divide this vseg in M sub-segments, and compute the vbase and pbase 353 373 // addresses for M page tables, and register these addresses in the _ptabs_paddr 354 374 // and _ptabs_vaddr arrays. 355 //356 375 ///////////////////////////////////////////////////////////////////////////////////// 357 376 void boot_vseg_map( mapping_vseg_t* vseg, … … 414 433 415 434 // compute ppn 416 if ( vseg->ident ) // identity mapping 435 if ( vseg->ident ) // identity mapping : no memory allocation required 417 436 { 418 437 ppn = vpn; … … 425 444 pmem_alloc_t* palloc = &boot_pmem_alloc[x_dest][y_dest]; 426 445 427 if ( big == 0 ) // SPP : small physical pages446 if ( big == 0 ) // allocate contiguous SPPs 428 447 { 429 // allocate contiguous small physical pages430 448 ppn = _get_small_ppn( palloc, npages ); 431 449 } 432 else // BPP : big physical pages450 else // allocate contiguous BPPs 433 451 { 434 435 // one big page can be shared by several vsegs 436 // we must chek if BPP already allocated 437 if ( is_ptab ) // It cannot be mapped 438 { 439 ppn = _get_big_ppn( palloc, npages ); 440 } 441 else // It can be mapped 442 { 443 unsigned int ix1 = vpn >> 9; // 11 bits 444 paddr_t paddr = _ptabs_paddr[vsid][x_dest][y_dest] + (ix1<<2); 445 unsigned int pte1 = _physical_read( paddr ); 446 447 if ( (pte1 & PTE_V) == 0 ) // BPP not allocated yet 448 { 449 // allocate contiguous big physical pages 450 ppn = _get_big_ppn( palloc, npages ); 451 } 452 else // BPP already allocated 453 { 454 // test if new vseg has the same mode bits than 455 // the other vsegs in the same big page 456 unsigned int pte1_mode = 0; 457 if (pte1 & PTE_C) pte1_mode |= C_MODE_MASK; 458 if (pte1 & PTE_X) pte1_mode |= X_MODE_MASK; 459 if (pte1 & PTE_W) pte1_mode |= W_MODE_MASK; 460 if (pte1 & PTE_U) pte1_mode |= U_MODE_MASK; 461 if (vseg->mode != pte1_mode) 462 { 463 _printf("\n[BOOT ERROR] in boot_vseg_map() : " 464 "vseg %s has different flags than another vseg " 465 "in the same BPP\n", vseg->name ); 466 _exit(); 467 } 468 ppn = ((pte1 << 9) & 0x0FFFFE00); 469 } 470 } 471 ppn = ppn | (vpn & 0x1FF); 452 ppn = _get_big_ppn( palloc, npages ); 472 453 } 473 454 } … … 478 459 } 479 460 480 // update vseg.pbase field and update vsegs chaining461 // update vseg.pbase field and register vseg mapped 481 462 vseg->pbase = ((paddr_t)ppn) << 12; 482 463 vseg->mapped = 1; 483 484 464 485 465 //////////// Third step : (only if the vseg is a page table) … … 611 591 vpn + (p<<9), 612 592 flags, 613 ppn + (p<<9) ); 593 ppn + (p<<9), 594 vseg->ident ); 614 595 } 615 596 else // small pages => PTE2s … … 620 601 vpn + p, 621 602 flags, 622 ppn + p ); 603 ppn + p, 604 vseg->ident ); 623 605 } 624 606 } … … 638 620 vpn + (p<<9), 639 621 flags, 640 ppn + (p<<9) ); 622 ppn + (p<<9), 623 vseg->ident ); 641 624 } 642 625 else // small pages => PTE2s … … 647 630 vpn + p, 648 631 flags, 649 ppn + p ); 632 ppn + p, 633 vseg->ident ); 650 634 } 651 635 } … … 664 648 vpn + (p<<9), 665 649 flags, 666 ppn + (p<<9) ); 650 ppn + (p<<9), 651 vseg->ident ); 667 652 } 668 653 else // small pages = PTE2s … … 673 658 vpn + p, 674 659 flags, 675 ppn + p ); 660 ppn + p, 661 vseg->ident ); 676 662 } 677 663 } … … 694 680 vpn + (p<<9), 695 681 flags, 696 ppn + (p<<9) ); 682 ppn + (p<<9), 683 vseg->ident ); 697 684 } 698 685 else // small pages -> PTE2s … … 703 690 vpn + p, 704 691 flags, 705 ppn + p ); 692 ppn + p, 693 vseg->ident ); 706 694 } 707 695 } … … 1337 1325 psched->context[ltid].slot[CTX_VSID_ID], 1338 1326 psched->context[ltid].slot[CTX_NORUN_ID], 1339 psched->context[ltid].slot[CTX_SIG _ID] );1327 psched->context[ltid].slot[CTX_SIGS_ID] ); 1340 1328 #endif 1341 1329 } // end if FIT … … 1897 1885 boot_ptab_extend(); 1898 1886 1899 _printf("\n[BOOT] P hysical memory allocators and page tables"1887 _printf("\n[BOOT] Page tables" 1900 1888 " initialized at cycle %d\n", _get_proctime() ); 1901 1889 }
Note: See TracChangeset
for help on using the changeset viewer.