Changeset 106 for trunk/hal/tsar_mips32
- Timestamp:
- Jun 30, 2017, 9:16:22 AM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_ppm.c
r62 r106 1 1 /* 2 * hal_ gpt.C - Generic Physcal Page Table API implementation for the TSAR archtecture.2 * hal_ppm.c - Generic Physical Page Table API implementation for TSAR 3 3 * 4 4 * Authors Alain Greiner (2016,2017) 5 *6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 36 35 37 36 ////////////////////////////////////////////////////////////////////////////////////////// 38 // For The TSAR architecture, the kernel pointers are identity mapp ing:39 // - the 32 bits PTR value is identical to the 32 bits LPA value, 37 // For The TSAR architecture, the kernel pointers are identity mapped: 38 // - the 32 bits PTR value is identical to the 32 bits LPA value, 40 39 // - the 64 bits XPTR value is identical to the 64 bits PADDR value. 41 40 // The pages_tbl[] is mapped in first free page after kernel code. … … 47 46 error_t hal_ppm_init( boot_info_t * info ) 48 47 { 49 48 uint32_t i; 50 49 51 50 // get relevant info from boot_info structure 52 51 uint32_t pages_nr = info->pages_nr; 53 54 52 uint32_t pages_tbl_offset = info->pages_offset; 53 uint32_t rsvd_nr = info->rsvd_nr; 55 54 56 57 assert( (rsvd_nr == 0 ) , __FUNCTION__ , "NO reserved zones for TSAR\n" ); 55 // check no reserved zones other than kernel code for TSAR 56 assert( (rsvd_nr == 0 ) , __FUNCTION__ , "NO reserved zones for TSAR\n" ); 58 57 59 60 61 62 // initialize lock protecting the free_pages[] lists 58 // get pointer on local Physical Page Manager 59 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 60 61 // initialize lock protecting the free_pages[] lists 63 62 spinlock_init( &ppm->free_lock ); 64 63 65 // initialize lock protecting the dirty_pages list 64 // initialize lock protecting the dirty_pages list 66 65 spinlock_init( &ppm->dirty_lock ); 67 66 68 67 // initialize all free_pages[] lists as empty 69 68 for( i = 0 ; i < CONFIG_PPM_MAX_ORDER ; i++ ) 70 69 { … … 73 72 } 74 73 75 76 74 // initialize dirty_list as empty 75 list_root_init( &ppm->dirty_root ); 77 76 78 // initialize pages_nr, pages_tbl, and vaddr_base pointers 79 // (TSAR uses identity mapping for kernel pointers) 80 // I86 architectures should use vaddr_base = 0xFFFF8000000000 + (cxy << 36) 77 // initialize pages_nr, pages_tbl, and vaddr_base pointers 78 // (TSAR uses identity mapping for kernel pointers) 79 // x86 architectures should use vaddr_base = 0xFFFF8000000000 + (cxy << 36) 80 ppm->pages_nr = pages_nr; 81 ppm->vaddr_base = NULL; 82 ppm->pages_tbl = (page_t*)( ppm->vaddr_base + 83 (pages_tbl_offset << CONFIG_PPM_PAGE_SHIFT) ); 81 84 82 ppm->pages_nr = pages_nr; 83 ppm->vaddr_base = NULL; 84 ppm->pages_tbl = (page_t*)( ppm->vaddr_base + 85 (pages_tbl_offset << CONFIG_PPM_PAGE_SHIFT) ); 85 // compute size of pages_tbl[] array rounded to an integer number of pages 86 uint32_t bytes = ARROUND_UP( pages_nr * sizeof(page_t), CONFIG_PPM_PAGE_SIZE ); 86 87 87 // compute size of pages_tbl[] array rounded to an integer number of pages 88 uint32_t bytes = ARROUND_UP( pages_nr * sizeof(page_t), CONFIG_PPM_PAGE_SIZE ); 89 90 // compute number of pages required to store page descriptor array 88 // compute number of pages required to store page descriptor array 91 89 uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_SHIFT; 92 90 93 91 // compute total number of reserved pages (kernel code & pages_tbl[]) 94 92 uint32_t reserved_pages = pages_tbl_offset + pages_tbl_nr; 95 93 96 // initialisesall page descriptors in pages_tbl[]94 // initialize all page descriptors in pages_tbl[] 97 95 for( i = 0 ; i < pages_nr ; i++ ) 98 99 96 { 97 page_init( &ppm->pages_tbl[i] ); 100 98 101 102 // make only a partial init with a memset, and complete the 103 104 99 // TODO optimisation for this enormous loop on small pages: 100 // make only a partial init with a memset, and complete the 101 // initialisation when page is allocated [AG] 102 } 105 103 106 107 104 // - set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[]) 105 // - release all other pages to populate the free lists 108 106 for( i = 0 ; i < reserved_pages ; i++) 109 110 111 107 { 108 page_set_flag( &ppm->pages_tbl[i] , PG_RESERVED ); 109 } 112 110 for( i = reserved_pages ; i < pages_nr ; i++ ) 113 111 { 114 112 ppm_free_pages_nolock( &ppm->pages_tbl[i] ); 115 113 116 117 114 // TODO optimisation : decompose this enormous set of small pages 115 // to several sets of big pages with various order values 118 116 } 119 117 120 // check consistency 121 return ppm_assert_order( ppm ); 118 // check consistency 119 return ppm_assert_order( ppm ); 120 } 122 121 123 } // end hal_ppm_init()124
Note: See TracChangeset
for help on using the changeset viewer.