Changeset 116
- Timestamp:
- Jun 30, 2017, 5:18:13 PM (7 years ago)
- Location:
- trunk/hal/x86_64/core
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/x86_64/core/hal_gpt.c
r114 r116 71 71 void hal_gpt_bootstrap_reset() 72 72 { 73 // XXX: will be revisited later 74 // size_t npages = (va_avail - (CLUSTER_MIN_VA(0) + KERNEL_VA_SIZE)) / PAGE_SIZE; 75 // hal_gpt_leave_range(CLUSTER_MIN_VA(0) + KERNEL_VA_SIZE, npages); 76 // va_avail = CLUSTER_MIN_VA(0) + KERNEL_VA_SIZE; 73 size_t npages = (va_avail - (CLUSTER_MIN_VA(0) + KERNEL_VA_SIZE)) / PAGE_SIZE; 74 hal_gpt_leave_range(CLUSTER_MIN_VA(0) + KERNEL_VA_SIZE, npages); 75 va_avail = CLUSTER_MIN_VA(0) + KERNEL_VA_SIZE; 76 } 77 78 /* 79 * Uniformize the PA and VA offsets, and return the value. After this function, 80 * we are guaranteed to have [VA = PA + constant_offset]. And therefore we can 81 * only call hal_gpt_bootstrap_valloc, without entering it in a PA. 82 */ 83 size_t hal_gpt_bootstrap_uniformize() 84 { 85 size_t pa_offset = pa_avail - 0; 86 size_t va_offset = va_avail - CLUSTER_MIN_VA(0); 87 88 if (pa_offset < va_offset) 89 pa_avail += (va_offset - pa_offset); 90 else if (pa_offset > va_offset) 91 va_avail += (pa_offset - va_offset); 92 93 return MAX(pa_offset, va_offset); 77 94 } 78 95 … … 81 98 XASSERT(va % PAGE_SIZE == 0); 82 99 XASSERT(pa % PAGE_SIZE == 0); 83 XASSERT(va == tmpva || PTE_BASE[pl1_i(va)] == 0);100 //XASSERT(va == tmpva || PTE_BASE[pl1_i(va)] == 0); 84 101 PTE_BASE[pl1_i(va)] = (pa & PG_FRAME) | flags; 85 102 invlpg(va); … … 223 240 224 241 /* Manually enter cluster0's heap */ 225 /*226 242 hal_gpt_enter_range(CLUSTER_MIN_VA(0) + kimg_size, kimg_max_pa, 227 243 (CLUSTER_VA_SIZE - kimg_size) / PAGE_SIZE); 228 */229 244 } 230 245 -
trunk/hal/x86_64/core/hal_init.c
r114 r116 80 80 } 81 81 82 static size_t init_bootinfo_rsvd(boot_rsvd_t *rsvd) 83 { 84 size_t mmap_length = mb_info.mi_mmap_length; 85 uint8_t *mmap_addr = (uint8_t *)&mb_mmap; 86 size_t i, rsvd_nr; 87 88 memset(rsvd, 0, sizeof(boot_rsvd_t)); 89 90 i = 0, rsvd_nr = 0; 91 while (i < mmap_length) { 92 struct multiboot_mmap *mm; 93 94 mm = (struct multiboot_mmap *)(mmap_addr + i); 95 96 rsvd[rsvd_nr].first_page = 97 rounddown(mm->mm_base_addr, PAGE_SIZE) / PAGE_SIZE; 98 rsvd[rsvd_nr].npages = 99 roundup(mm->mm_length, PAGE_SIZE) / PAGE_SIZE; 100 rsvd_nr++; 101 102 if (rsvd_nr == CONFIG_PPM_MAX_RSVD) 103 x86_panic("too many memory holes"); 104 105 i += mm->mm_size + 4; 106 } 107 108 return rsvd_nr; 109 } 110 82 111 static void init_bootinfo_core(boot_core_t *core) 83 112 { … … 109 138 static void init_bootinfo(boot_info_t *info) 110 139 { 111 extern paddr_t pa_avail; 112 extern vaddr_t va_avail; 140 size_t offset; 113 141 114 142 extern uint64_t __kernel_data_start; 115 143 extern uint64_t __kernel_end; 116 117 size_t pa_offset = pa_avail - 0;118 size_t va_offset = va_avail - CLUSTER_MIN_VA(0);119 144 120 145 memset(info, 0, sizeof(boot_info_t)); … … 136 161 init_bootinfo_core(&info->core[0]); 137 162 138 info->rsvd_nr = 0; 139 /* rsvd XXX */ 163 info->rsvd_nr = init_bootinfo_rsvd(&info->rsvd); 140 164 141 165 /* dev_ XXX */ 142 143 info->pages_offset = MAX(pa_offset, va_offset);166 offset = hal_gpt_bootstrap_uniformize(); 167 info->pages_offset = offset / PAGE_SIZE; 144 168 info->pages_nr = 0; /* XXX */ 145 169 -
trunk/hal/x86_64/core/hal_internal.h
r99 r116 51 51 vaddr_t hal_gpt_bootstrap_valloc(size_t npages); 52 52 void hal_gpt_bootstrap_reset(); 53 size_t hal_gpt_bootstrap_uniformize(); 54 53 55 void hal_gpt_enter(vaddr_t va, paddr_t pa, pt_entry_t flags); 54 56 void hal_gpt_enter_range(vaddr_t va, paddr_t pa, size_t n); -
trunk/hal/x86_64/core/hal_ppm.c
r110 r116 32 32 #include <page.h> 33 33 34 #include <hal_boot.h> 34 35 #include <hal_internal.h> 35 36 36 37 error_t hal_ppm_init(boot_info_t *info) 37 38 { 38 size_t i; 39 boot_rsvd_t *rsvd; 40 size_t i, j; 39 41 40 42 // get relevant info from boot_info structure … … 59 61 } 60 62 61 // TODO 63 // initialize dirty_list as empty 64 list_root_init( &ppm->dirty_root ); 62 65 63 x86_panic((char *)__func__); 64 return 0; 66 // compute size of pages_tbl[] array rounded to an integer number of pages 67 uint32_t bytes = ARROUND_UP( pages_nr * sizeof(page_t), CONFIG_PPM_PAGE_SIZE ); 68 69 // compute number of pages required to store page descriptor array 70 uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_SHIFT; 71 72 // compute total number of reserved pages (kernel code & pages_tbl[]) 73 uint32_t reserved_pages = pages_tbl_offset + pages_tbl_nr; 74 75 // initialize pages_nr, pages_tbl, and vaddr_base pointers 76 ppm->pages_nr = pages_nr; 77 ppm->vaddr_base = (void *)CLUSTER_MIN_VA(0); 78 ppm->pages_tbl = (page_t *)hal_gpt_bootstrap_valloc(pages_tbl_nr); 79 80 // make sure we respect the rule [VA = PA + constant_offset] 81 XASSERT(ppm->pages_tbl == ppm->vaddr_base + pages_tbl_offset * PAGE_SIZE); 82 83 // initialize all page descriptors in pages_tbl[] 84 for( i = 0 ; i < pages_nr ; i++ ) 85 { 86 page_init( &ppm->pages_tbl[i] ); 87 88 // TODO optimisation for this enormous loop on small pages: 89 // make only a partial init with a memset, and complete the 90 // initialisation when page is allocated [AG] 91 } 92 93 /* 94 * Set the PG_RESERVED flag for reserved pages (kernel, pages_tbl[] and 95 * memory holes). 96 */ 97 for (i = 0; i < reserved_pages; i++) { 98 page_set_flag(&ppm->pages_tbl[i], PG_RESERVED); 99 } 100 for (i = 0; i < rsvd_nr; i++) { 101 rsvd = &info->rsvd[i]; 102 for (j = 0; j < rsvd->npages; j++) { 103 page_set_flag(&ppm->pages_tbl[rsvd->first_page + j], PG_RESERVED); 104 } 105 } 106 107 /* Release all other pages to populate the free lists */ 108 for (i = reserved_pages; i < pages_nr; i++) { 109 if (!page_is_flag(&ppm->pages_tbl[i], PG_RESERVED)) 110 ppm_free_pages_nolock(&ppm->pages_tbl[i]); 111 } 112 113 /* Check consistency */ 114 return ppm_assert_order(ppm); 65 115 } 66 116
Note: See TracChangeset
for help on using the changeset viewer.