Changeset 686
- Timestamp:
- Jan 13, 2021, 12:47:53 AM (4 years ago)
- Location:
- trunk/hal
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/generic/hal_gpt.h
r640 r686 2 2 * hal_gpt.h - Generic Page Table API definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 37 37 // defined as a 32 bits-vector. 38 38 // 39 // Any arch -specific implementation must implement this API.39 // Any architecture-specific implementation must implement this API. 40 40 ///////////////////////////////////////////////////////////////////////////////////////// 41 41 -
trunk/hal/generic/hal_special.h
r679 r686 2 2 * hal_special.h - Generic Special Registers Access API definition. 3 3 * 4 * Authors Alain Greiner (2016,2017)4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 48 48 * This function initializes - for architectures requiring it - the MMU registers of the 49 49 * calling core to use the the kernel page table identified by the <gpt> argument for 50 * all threads attached to kernel process_zero.50 * all kernel threads attached to kernel process_zero. 51 51 * It is called by all cores in the kernel_init() function. 52 52 ***************************************************************************************** -
trunk/hal/tsar_mips32/core/hal_context.c
r679 r686 2 2 * hal_context.c - implementation of Thread Context API for TSAR-MIPS32 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 121 121 { 122 122 123 assert( __FUNCTION__, (sizeof(hal_cpu_context_t) <= CONFIG_CPU_CTX_SIZE), "illegal CPU context size" ); 123 assert( __FUNCTION__, (sizeof(hal_cpu_context_t) <= CONFIG_CPU_CTX_SIZE), 124 "illegal CPU context size" ); 124 125 125 126 // allocate memory for cpu_context 126 kmem_req_t req; 127 req.type = KMEM_KCM; 128 req.order = bits_log2( sizeof(hal_cpu_context_t) ); 129 req.flags = AF_KERNEL | AF_ZERO; 130 131 hal_cpu_context_t * context = kmem_alloc( &req ); 132 127 hal_cpu_context_t * context = kmem_alloc( bits_log2( sizeof(hal_cpu_context_t) ), 128 AF_KERNEL | AF_ZERO); 133 129 if( context == NULL ) return -1; 134 130 … … 152 148 hal_cpu_context_t * context = (hal_cpu_context_t *)thread->cpu_context; 153 149 154 assert( __FUNCTION__, (context != NULL ), "CPU context not allocated" ); 150 assert( __FUNCTION__, (context != NULL ), 151 "CPU context not allocated" ); 155 152 156 153 // compute the PPN for the GPT PT1 … … 405 402 void hal_cpu_context_destroy( thread_t * thread ) 406 403 { 407 kmem_req_t req; 408 404 // get pointer on CPU context 409 405 hal_cpu_context_t * ctx = thread->cpu_context; 410 406 411 407 // release CPU context if required 412 if( ctx != NULL ) 413 { 414 req.type = KMEM_KCM; 415 req.ptr = ctx; 416 kmem_free( &req ); 417 } 408 if( ctx != NULL ) kmem_free( ctx , bits_log2( sizeof(hal_cpu_context_t)) ); 418 409 419 410 } // end hal_cpu_context_destroy() … … 434 425 435 426 // allocate memory for fpu_context 436 kmem_req_t req; 437 req.type = KMEM_KCM; 438 req.flags = AF_KERNEL | AF_ZERO; 439 req.order = bits_log2( sizeof(hal_fpu_context_t) ); 440 441 hal_fpu_context_t * context = kmem_alloc( &req ); 427 hal_fpu_context_t * context = kmem_alloc( bits_log2( sizeof(hal_fpu_context_t) ), 428 AF_KERNEL | AF_ZERO ); 442 429 443 430 if( context == NULL ) return -1; … … 454 441 hal_fpu_context_t * context = thread->fpu_context; 455 442 456 assert( __FUNCTION__, (context != NULL) , "fpu context not allocated" ); 443 assert( __FUNCTION__, (context != NULL) , 444 "fpu context not allocated" ); 457 445 458 446 memset( context , 0 , sizeof(hal_fpu_context_t) ); … … 478 466 void hal_fpu_context_destroy( thread_t * thread ) 479 467 { 480 kmem_req_t req; 481 482 hal_fpu_context_t * context = thread->fpu_context; 468 // get pointer on FPU context 469 hal_fpu_context_t * ctx = thread->fpu_context; 483 470 484 471 // release FPU context if required 485 if( context != NULL ) 486 { 487 req.type = KMEM_KCM; 488 req.ptr = context; 489 kmem_free( &req ); 490 } 472 if( ctx != NULL ) kmem_free( ctx , bits_log2( sizeof(hal_fpu_context_t)) ); 491 473 492 474 } // end hal_fpu_context_destroy() -
trunk/hal/tsar_mips32/core/hal_drivers.c
r679 r686 2 2 * hal_drivers.c - Driver initializers for TSAR 3 3 * 4 * Copyright (c) 2017 Maxime Villard 4 * Author Maxime Villard (2017) 5 * 6 * Copyright (c) UPMC Sorbonne Universites 5 7 * 6 8 * This file is part of ALMOS-MKH. -
trunk/hal/tsar_mips32/core/hal_exception.c
r635 r686 228 228 // try to map the unmapped PTE 229 229 error = vmm_handle_page_fault( process, 230 bad_vaddr >> CONFIG_PPM_PAGE_ SHIFT);230 bad_vaddr >> CONFIG_PPM_PAGE_ORDER ); 231 231 232 232 if( error == EXCP_NON_FATAL ) // page-fault successfully handled … … 278 278 // try to handle a possible COW 279 279 error = vmm_handle_cow( process, 280 bad_vaddr >> CONFIG_PPM_PAGE_ SHIFT);280 bad_vaddr >> CONFIG_PPM_PAGE_ORDER ); 281 281 282 282 if( error == EXCP_NON_FATAL ) // COW successfully handled … … 358 358 remote_busylock_acquire( lock_xp ); 359 359 360 nolock_printk("\n=== thread(%x,%x) / core[% d] / cycle %d ===\n",361 process->pid, this->trdid, core->lid, (uint32_t)hal_get_cycles() );360 nolock_printk("\n=== thread(%x,%x) / core[%x,%d] / cycle %d ===\n", 361 process->pid, this->trdid, process->pid, core->lid, (uint32_t)hal_get_cycles() ); 362 362 363 363 nolock_printk("busylocks = %d / blocked_vector = %X / flags = %X\n\n", -
trunk/hal/tsar_mips32/core/hal_gpt.c
r679 r686 2 2 * hal_gpt.c - implementation of the Generic Page Table API for TSAR-MIPS32 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 37 37 38 38 //////////////////////////////////////////////////////////////////////////////////////// 39 // The Page Table for the TSAR-MIPS32 MMU is defined as a two levels radix tree. 40 // 41 // It defines two page sizes : 4 Kbytes pages, and 2 Mbytes pages. 42 // The virtual address space size is 4 Gbytes (32 bits virtual addresses). 43 // The physical address space is limited to 1 Tbytes (40 bits physical addresses). 44 // - For a 4 Kbytes page, the VPN uses 20 bits, and the PPN requires 28 bits. 45 // - For a 2 Mbytes page, the PPN uses 11 bits, and the PPN requires 19 bits. 46 // 47 // The first level array (PT1) contains 2048 entries, each entry contains 4 bytes, 48 // and this array is aligned on a 8K bytes boundary. 49 // 50 // The second level array (PT2) contains 512 entries, each entry contains 8 bytes, 51 // and this array is ligned on a 4K bytes boundary. 52 //////////////////////////////////////////////////////////////////////////////////////// 53 54 55 //////////////////////////////////////////////////////////////////////////////////////// 39 56 // This define the masks for the TSAR MMU PTE attributes (from TSAR MMU specification) 40 57 //////////////////////////////////////////////////////////////////////////////////////// … … 152 169 153 170 // check page size 154 assert( __FUNCTION__, (CONFIG_PPM_PAGE_SIZE == 4096) , "the TSAR page size must be 4 Kbytes\n" ); 155 156 // allocates 2 physical pages for PT1 157 kmem_req_t req; 158 req.type = KMEM_PPM; 159 req.order = 1; // 2 small pages 160 req.flags = AF_KERNEL | AF_ZERO; 161 base = kmem_alloc( &req ); 171 assert( __FUNCTION__, (CONFIG_PPM_PAGE_SIZE == 4096) , 172 "the TSAR page size must be 4 Kbytes\n" ); 173 174 // allocates 8 Kbytes for PT1 175 base = kmem_alloc( 13 , AF_ZERO ); 162 176 163 177 if( base == NULL ) … … 197 211 uint32_t * pt2; 198 212 uint32_t attr; 199 kmem_req_t req;200 213 201 214 thread_t * this = CURRENT_THREAD; … … 241 254 } 242 255 243 // release the page allocated for the PT2 244 req.type = KMEM_PPM; 245 req.ptr = pt2; 246 kmem_free( &req ); 256 // release the 4K bytes allocated for the PT2 257 kmem_free( pt2 , 12 ); 247 258 } 248 259 } 249 260 } 250 261 251 // release the PT1 252 req.type = KMEM_PPM; 253 req.ptr = pt1; 254 kmem_free( &req ); 262 // release the 8K bytes allocated for PT1 263 kmem_free( pt1 , 13 ); 255 264 256 265 #if DEBUG_HAL_GPT_DESTROY … … 272 281 xptr_t pte1_xp; // extended pointer on PT1[x1] entry 273 282 uint32_t pte1; // value of PT1[x1] entry 274 275 kmem_req_t req; // kmem request fro PT2 allocation276 277 283 uint32_t * pt2; // local pointer on PT2 base 278 284 ppn_t pt2_ppn; // PPN of page containing PT2 … … 334 340 hal_disable_irq( &sr_save ); 335 341 336 req.type = KMEM_PPM; 337 req.order = 0; 338 req.flags = AF_ZERO | AF_KERNEL; 339 pt2 = kmem_remote_alloc( gpt_cxy , &req ); 342 // allocate a 4K bytes PT2 343 pt2 = kmem_remote_alloc( gpt_cxy , 12 , AF_ZERO ); 340 344 341 345 if( pt2 == NULL ) … … 863 867 uint32_t * dst_pt2; // local pointer on DST PT2 864 868 865 kmem_req_t req; // for PT2 allocation866 867 869 uint32_t src_pte1; 868 870 uint32_t dst_pte1; … … 917 919 if( (dst_pte1 & TSAR_PTE_MAPPED) == 0 ) 918 920 { 919 // allocate one physical page for a new PT2 920 req.type = KMEM_PPM; 921 req.order = 0; // 1 small page 922 req.flags = AF_KERNEL | AF_ZERO; 923 dst_pt2 = kmem_alloc( &req ); 921 // allocate one 4K bytes physical page for a new PT2 922 dst_pt2 = kmem_alloc( 12 , AF_ZERO ); 924 923 925 924 if( dst_pt2 == NULL ) -
trunk/hal/tsar_mips32/core/hal_ppm.c
r632 r686 79 79 80 80 // compute number of pages required to store page descriptor array 81 uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_ SHIFT;81 uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_ORDER; 82 82 83 83 // compute total number of reserved pages (kernel code & pages_tbl[]) … … 90 90 ppm->vaddr_base = NULL; 91 91 ppm->pages_tbl = (page_t*)( ppm->vaddr_base + 92 (pages_tbl_offset << CONFIG_PPM_PAGE_ SHIFT) );92 (pages_tbl_offset << CONFIG_PPM_PAGE_ORDER) ); 93 93 94 94 // initialize all page descriptors in pages_tbl[] -
trunk/hal/tsar_mips32/core/hal_special.c
r658 r686 2 2 * hal_special.c - implementation of Generic Special Register Access API for TSAR-MIPS32 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 55 55 // For the TSAR architecture, this function register the physical address of 56 56 // the first level page table (PT1) in the PTPR register. 57 // It activates the intructions MMU, and de-activates the data MMU. 57 // It activates the intructions MMU, and de-activates the data MMU, that is NOT 58 // used by the kernel for 32 bits architectures. 58 59 ///////////////////////////////////////////////////////////////////////////////// 59 60 void hal_mmu_init( gpt_t * gpt ) -
trunk/hal/tsar_mips32/core/hal_uspace.c
r679 r686 49 49 uint32_t cxy = (uint32_t)GET_CXY( k_dst_xp ); 50 50 51 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" ); 51 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), 52 "must be called by an user thread" ); 52 53 53 54 #if DEBUG_HAL_USPACE … … 147 148 uint32_t cxy = (uint32_t)GET_CXY( k_src_xp ); 148 149 149 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" ); 150 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), 151 "must be called by an user thread" ); 150 152 151 153 #if DEBUG_HAL_USPACE … … 236 238 uint32_t cxy = (uint32_t)GET_CXY( k_dst_xp ); 237 239 238 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" ); 240 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), 241 "must be called by an user thread" ); 239 242 240 243 hal_disable_irq( &save_sr ); … … 291 294 uint32_t cxy = (uint32_t)GET_CXY( k_src_xp ); 292 295 293 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" ); 296 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), 297 "must be called by an user thread" ); 294 298 295 299 hal_disable_irq( &save_sr ); … … 343 347 uint32_t str = (uint32_t)u_str; 344 348 345 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" ); 349 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), 350 "must be called by an user thread" ); 346 351 347 352 hal_disable_irq( &save_sr ); … … 352 357 "mfc2 $15, $1 \n" /* $15 <= MMU_MODE (DTLB off) */ 353 358 "ori $14, $15, 0x4 \n" /* $14 <= mode DTLB on */ 359 "mtc2 $14, $1 \n" /* set DTLB on */ 354 360 "1: \n" 355 "mtc2 $14, $1 \n" /* set DTLB on */356 361 "lb $12, 0($13) \n" /* $12 <= one byte from u_space */ 362 "beq $12, $0, 2f \n" /* exit loop when NUL found */ 363 "addi $13, $13, 1 \n" /* increment address */ 364 "j 1b \n" /* jump to next iteration */ 365 "addi %0, %0, 1 \n" /* increment count if not NUL */ 366 "2: \n" 357 367 "mtc2 $15, $1 \n" /* set DTLB off */ 358 "addi $13, $13, 1 \n" /* increment address */359 "bne $12, $0, 1b \n" /* loop until NUL found */360 "addi %0, %0, 1 \n" /* increment count */361 368 ".set reorder \n" 362 369 : "+r"(count) -
trunk/hal/tsar_mips32/drivers/soclib_nic.c
r679 r686 1 1 /* 2 * soclib_nic.c - SOCLIB_NIC (Network Interface Controler) driver implementation.2 * soclib_nic.c - VCI_MASTER_NIC (Network Interface Controler) driver implementation. 3 3 * 4 4 * Author Alain Greiner (2016,2017,2018,2019,2020) … … 64 64 remote_busylock_acquire( lock_xp ); 65 65 66 nolock_printk("\n***** chbuf %s : ptr %x / wid %d / rid %d *****\n",67 name, chbuf, chbuf->wid, chbuf->rid );68 69 for( i = 0 ; i < SOCLIB_NIC_CHBUF_DEPTH ; i++ )66 nolock_printk("\n***** chbuf %s : cxy %x / ptr %x / wid %d / rid %d *****\n", 67 name, local_cxy , chbuf, chbuf->wid, chbuf->rid ); 68 69 for( i = 0 ; i < CONFIG_SOCK_QUEUES_DEPTH ; i++ ) 70 70 { 71 71 uint32_t * container = chbuf->cont_ptr[i]; … … 76 76 if( container[511] ) 77 77 { 78 nolock_printk(" - %d : FULL / cont_ptr %x / cont_pad [%x,%x] / plen %d\n", 79 i, chbuf->cont_ptr[i], 80 (uint32_t)(chbuf->cont_pad[i]>>32), 81 (uint32_t)chbuf->cont_pad[i], 82 container[510] ); 78 nolock_printk(" - %d : FULL / cont_ptr %x / plen %d\n", 79 i, chbuf->cont_ptr[i], container[510] ); 83 80 } 84 81 else 85 82 { 86 nolock_printk(" - %d : EMPTY / cont_ptr %x / cont_pad [%x,%x]\n", 87 i, chbuf->cont_ptr[i], 88 (uint32_t)(chbuf->cont_pad[i]>>32), 89 (uint32_t)chbuf->cont_pad[i] ); 83 nolock_printk(" - %d : EMPTY / cont_ptr %x\n", 84 i, chbuf->cont_ptr[i] ); 90 85 } 91 86 } … … 101 96 void soclib_nic_init( chdev_t * chdev ) 102 97 { 103 uint32_t i; 104 kmem_req_t req; 105 ppn_t ppn; 106 uint64_t padr; 98 uint32_t * container; // local pointer on one container 99 uint32_t cont_per_page; // number of containers per page 100 uint32_t cont_gid; // container global index (in chbuf) 101 bool_t cont_error; // not enough memory for chbuf containers 102 103 ppn_t ppn; // used for both the chbuf descriptor and the containers 104 uint64_t padr; // used for both the chbuf descriptor and the containers 105 106 assert( __FUNCTION__ , (chdev->func == DEV_FUNC_NIC), 107 "bad func argument" ); 108 109 assert( __FUNCTION__ , (sizeof(nic_cont_t) == 2048), 110 "container size must be 2048 bytes" ); 111 112 assert( __FUNCTION__ , (CONFIG_PPM_PAGE_ORDER >= 11 ), 113 "page size cannot be smaller than container size" ); 107 114 108 115 // set driver specific fields in chdev descriptor … … 122 129 uint32_t cycle = (uint32_t)hal_get_cycles(); 123 130 if( (is_rx == false) && DEBUG_HAL_NIC_RX < cycle ) 124 printk("\n[%s] thread[%x,%x] enter : NIC_TX channel %d / chdev %x / base %x /cycle %d\n",125 __FUNCTION__, this->process->pid, this->trdid, channel, chdev, nic_ptr,cycle );131 printk("\n[%s] thread[%x,%x] enter : NIC_TX channel %d / chdev %x / cycle %d\n", 132 __FUNCTION__, this->process->pid, this->trdid, channel, chdev, cycle ); 126 133 if( is_rx && DEBUG_HAL_NIC_RX < cycle ) 127 printk("\n[%s] thread[%x,%x] enter : NIC_RX channel %d / chdev %x / base %x /cycle %d\n",128 __FUNCTION__, this->process->pid, this->trdid, channel, chdev, nic_ptr,cycle );129 #endif 130 131 // get number of channels from hardware134 printk("\n[%s] thread[%x,%x] enter : NIC_RX channel %d / chdev %x / cycle %d\n", 135 __FUNCTION__, this->process->pid, this->trdid, channel, chdev, cycle ); 136 #endif 137 138 // get number of channels from NIC hardware register 132 139 uint32_t channels = hal_remote_l32( XPTR( nic_cxy, 133 140 nic_ptr + NIC_GLOBAL_OFFSET + NIC_G_CHANNELS )); … … 144 151 if( channel >= channels ) 145 152 { 146 printk("\n[ PANIC] in %s illegal channel index\n", __FUNCTION__ );153 printk("\n[ERROR] in %s illegal channel index\n", __FUNCTION__ ); 147 154 return; 148 155 } 149 156 150 157 // allocate memory for chbuf descriptor 151 req.type = KMEM_KCM; 152 req.order = bits_log2( sizeof(nic_chbuf_t) ); 153 req.flags = AF_KERNEL; 154 nic_chbuf_t * chbuf = kmem_alloc( &req ); 158 nic_chbuf_t * chbuf = kmem_alloc( bits_log2( sizeof(nic_chbuf_t) ) , AF_KERNEL ); 155 159 156 160 if( chbuf == NULL ) 157 161 { 158 printk("\n[ PANIC] in %s : cannot allocate chbuf descriptor\n", __FUNCTION__ );162 printk("\n[ERROR] in %s : cannot allocate chbuf descriptor\n", __FUNCTION__ ); 159 163 return; 160 164 } … … 166 170 // software L2/L3 cache coherence for chbuf WID & RID 167 171 if( chdev_dir.iob ) dev_mmc_sync( XPTR( local_cxy , chbuf ) , 8 ); 168 169 // allocate containers and complete chbuf initialisation 170 for( i = 0 ; i < SOCLIB_NIC_CHBUF_DEPTH ; i++ ) 171 { 172 // 2048 bytes per container 173 req.type = KMEM_KCM; 174 req.order = 11; 175 req.flags = AF_KERNEL; 176 uint32_t * container = kmem_alloc( &req ); 177 178 if( container == NULL ) 179 { 180 printk("\n[PANIC] in %s : cannot allocate container\n", __FUNCTION__ ); 181 return; 172 173 cont_error = false; 174 cont_gid = 0; 175 cont_per_page = 1 << (CONFIG_PPM_PAGE_ORDER - 11); 176 177 // allocate containers & complete chbuf initialisation 178 // depending on the PPM page size, we pack several 179 // 248 bytes containers in one single page. 180 181 // lopp on containers 182 while( cont_gid < CONFIG_SOCK_QUEUES_DEPTH ) 183 { 184 if( (cont_gid & (cont_per_page - 1)) == 0 ) // allocate one PPM page 185 { 186 container = kmem_alloc( CONFIG_PPM_PAGE_ORDER , AF_KERNEL ); 187 188 if( container == NULL ) 189 { 190 cont_error = true; 191 break; 192 } 193 } 194 else // increment container base address 195 { 196 container = container + 512; 182 197 } 183 198 … … 190 205 // compute container physical address 191 206 ppn = ppm_base2ppn( XPTR( local_cxy , container ) ); 192 padr = ((uint64_t)ppn << CONFIG_PPM_PAGE_ SHIFT) |207 padr = ((uint64_t)ppn << CONFIG_PPM_PAGE_ORDER) | 193 208 ((intptr_t)container & CONFIG_PPM_PAGE_MASK); 194 209 195 210 // complete chbuf initialisation 196 chbuf->cont_ptr[i] = container; 197 chbuf->cont_pad[i] = padr; 211 chbuf->cont_ptr[cont_gid] = container; 212 chbuf->cont_pad[cont_gid] = padr; 213 214 // increment container index 215 cont_gid++; 216 } 217 218 // release allocated containers and chbuf if not enough memory 219 if( cont_error ) 220 { 221 // loop on allocated containers 222 while( cont_gid ) 223 { 224 // release container when required 225 if( (cont_gid & (cont_per_page - 1)) == 0 ) 226 kmem_free( chbuf->cont_ptr[cont_gid] , CONFIG_PPM_PAGE_ORDER ); 227 228 // decrement container index 229 cont_gid--; 230 } 231 232 // release chbuf descriptor 233 kmem_free( chbuf , bits_log2(sizeof(nic_chbuf_t)) ); 234 235 return; 198 236 } 199 237 … … 204 242 // get NIC channel segment base and chbuf depth 205 243 uint32_t * channel_base = nic_ptr + NIC_CHANNEL_SPAN * channel; 206 uint32_t nbufs = SOCLIB_NIC_CHBUF_DEPTH;244 uint32_t nbufs = CONFIG_SOCK_QUEUES_DEPTH; 207 245 208 246 // compute chbuf physical address 209 247 ppn = ppm_base2ppn( XPTR( local_cxy , chbuf ) ); 210 padr = ((uint64_t)ppn << CONFIG_PPM_PAGE_ SHIFT) |248 padr = ((uint64_t)ppn << CONFIG_PPM_PAGE_ORDER) | 211 249 ((intptr_t)chbuf & CONFIG_PPM_PAGE_MASK); 212 250 … … 267 305 thread_t * this = CURRENT_THREAD; 268 306 269 // check calling thread == client thread 270 assert( __FUNCTION__, (thread_xp == XPTR( local_cxy , this )),"calling thread must be the client thread");307 assert( __FUNCTION__, (thread_xp == XPTR( local_cxy , this )), 308 "calling thread must be the client thread"); 271 309 272 310 // get command type … … 286 324 287 325 // check chdev is local 288 assert( __FUNCTION__, (dev_cxy == local_cxy), "illegal cluster for a WRITE command"); 326 assert( __FUNCTION__, (dev_cxy == local_cxy), 327 "illegal cluster for a WRITE command"); 289 328 290 329 // get command arguments … … 293 332 294 333 // check packet length 295 assert( __FUNCTION__, (length <= 2040), "packet length too large"); 334 assert( __FUNCTION__, (length <= 2040), 335 "packet length too large"); 296 336 297 337 // get chbuf descriptor pointer … … 313 353 uint32_t cycle = (uint32_t)hal_get_cycles(); 314 354 if( DEBUG_HAL_NIC_TX < cycle ) 315 printk("\n[%s] thread[%x,%x] enter / WRITE / chdev %x / chbuf %x / len %d / cycle %d\n", 316 __FUNCTION__, this->process->pid, this->trdid, dev_ptr, chbuf, length, cycle ); 317 soclib_nic_chbuf_display( chbuf , dev_ptr->name ); 355 printk("\n[%s] thread[%x,%x] enter / WRITE / %s / chbuf (%x,%x) / len %d / cycle %d\n", 356 __FUNCTION__, this->process->pid, this->trdid, dev_ptr->name, local_cxy, chbuf, length, cycle ); 318 357 #endif 319 358 // check container STS … … 327 366 cycle = (uint32_t)hal_get_cycles(); 328 367 if( DEBUG_HAL_NIC_TX < cycle ) 329 printk("\n[%s] thread[%x,%x] WRITE failure : NIC_TX[%d] queue full / cycle %d\n",368 printk("\n[%s] thread[%x,%x] exit / WRITE failure : NIC_TX[%d] queue full / cycle %d\n", 330 369 __FUNCTION__, this->process->pid , this->trdid , dev_ptr->channel , cycle ); 331 soclib_nic_chbuf_display( chbuf , dev_ptr->name );332 370 #endif 333 371 } … … 346 384 347 385 // update current container WID 348 chbuf->wid = (index + 1) % SOCLIB_NIC_CHBUF_DEPTH;386 chbuf->wid = (index + 1) % CONFIG_SOCK_QUEUES_DEPTH; 349 387 350 388 // software L2/L3 cache coherence for container DATA write … … 364 402 cycle = (uint32_t)hal_get_cycles(); 365 403 if( DEBUG_HAL_NIC_TX < cycle ) 366 printk("\n[%s] thread[%x,%x] WRITE success on NIC_TX[%d] / len %d / cycle %d\n",404 printk("\n[%s] thread[%x,%x] exit / WRITE success on NIC_TX[%d] / len %d / cycle %d\n", 367 405 __FUNCTION__, this->process->pid, this->trdid, dev_ptr->channel , length, cycle ); 368 soclib_nic_chbuf_display( chbuf , dev_ptr->name ); 406 if((DEBUG_HAL_NIC_TX < cycle) && (DEBUG_HAL_NIC_TX & 1)) 407 putb( "64 first bytes moved to TX queue by NIC driver" , buffer , 64 ); 369 408 #endif 370 409 } … … 377 416 378 417 // check chdev is local 379 assert( __FUNCTION__, (dev_cxy == local_cxy), "illegal cluster for a READ command"); 418 assert( __FUNCTION__, (dev_cxy == local_cxy), 419 "illegal cluster for a READ command"); 380 420 381 421 // get target buffer … … 400 440 uint32_t cycle = (uint32_t)hal_get_cycles(); 401 441 if( DEBUG_HAL_NIC_RX < cycle ) 402 printk("\n[%s] thread[%x,%x] enter / READ / chdev %x / chbuf %x / cycle %d\n", 403 __FUNCTION__, this->process->pid, this->trdid, dev_ptr, chbuf, cycle ); 404 soclib_nic_chbuf_display( chbuf , dev_ptr->name ); 442 printk("\n[%s] thread[%x,%x] enter / READ / %s / chbuf (%x,%x) / cycle %d\n", 443 __FUNCTION__, this->process->pid, this->trdid, dev_ptr->name, local_cxy, chbuf, cycle ); 405 444 #endif 406 445 // check container state … … 414 453 cycle = (uint32_t)hal_get_cycles(); 415 454 if( DEBUG_HAL_NIC_RX < cycle ) 416 printk("\n[%s] thread[%x,%x] READ failure : NIC_RX[%d] queue empty / cycle %d\n",455 printk("\n[%s] thread[%x,%x] exit / READ failure : NIC_RX[%d] queue empty / cycle %d\n", 417 456 __FUNCTION__, this->process->pid, this->trdid, dev_ptr->channel , cycle ); 418 soclib_nic_chbuf_display( chbuf , dev_ptr->name );419 457 #endif 420 458 } … … 436 474 437 475 // update current container WID 438 chbuf->rid = (index + 1) % SOCLIB_NIC_CHBUF_DEPTH;476 chbuf->rid = (index + 1) % CONFIG_SOCK_QUEUES_DEPTH; 439 477 440 478 // software L2/L3 cache coherence for container STS write … … 451 489 uint32_t cycle = (uint32_t)hal_get_cycles(); 452 490 if( DEBUG_HAL_NIC_RX < cycle ) 453 printk("\n[%s] thread[%x,%x] READ success on NIC_RX[%d] queue / len %d / cycle %d\n",491 printk("\n[%s] thread[%x,%x] exit / READ success on NIC_RX[%d] queue / len %d / cycle %d\n", 454 492 __FUNCTION__, this->process->pid, this->trdid , dev_ptr->channel , length , cycle ); 455 soclib_nic_chbuf_display( chbuf , dev_ptr->name ); 493 if((DEBUG_HAL_NIC_RX < cycle) && (DEBUG_HAL_NIC_RX & 1)) 494 putb("64 first bytes moved from RX queue by NIC driver" , buffer , 64 ); 456 495 #endif 457 496 } … … 491 530 cxy_t base_cxy = GET_CXY( base_xp ); 492 531 493 // get channel and runfrom the "length" and "status" arguments532 // get "channel" and "run" arguments from the "length" and "status" arguments 494 533 uint32_t channel = this->nic_cmd.length; 495 534 uint32_t run = this->nic_cmd.status; … … 613 652 void __attribute__ ((noinline)) soclib_nic_isr( chdev_t * chdev ) 614 653 { 615 // get base, size, channel, is_rx from NIC channel device NIC654 // get base, size, channel, is_rx, name, and server from NIC chdev 616 655 xptr_t base_xp = chdev->base; 617 656 uint32_t channel = chdev->channel; 618 657 bool_t is_rx = chdev->is_rx; 658 thread_t * server = chdev->server; 619 659 620 660 // get NIC peripheral cluster and local pointer … … 630 670 uint32_t status = hal_remote_l32( XPTR( nic_cxy , ptr ) ); 631 671 632 // check status value 633 if( is_rx && (status != NIC_CHANNEL_STATUS_IDLE) ) 634 printk("\n[PANIC] in %s : error reported by NIC_RX[%d]\n", __FUNCTION__, channel ); 635 if( (is_rx == false) && (status != NIC_CHANNEL_STATUS_IDLE) ) 636 printk("\n[PANIC] in %s : error reported by NIC_TX[%d]\n", __FUNCTION__, channel ); 637 638 // unblock server thread 639 thread_t * server = chdev->server; 640 thread_unblock( XPTR( local_cxy , server ) , THREAD_BLOCKED_ISR ); 672 // check status value 673 if( status == NIC_CHANNEL_STATUS_ERROR ) // error reported 674 { 641 675 642 676 #if (DEBUG_HAL_NIC_RX || DEBUG_HAL_NIC_TX) 643 677 uint32_t cycle = (uint32_t)hal_get_cycles(); 644 if( is_rx && DEBUG_HAL_NIC_RX < cycle ) 645 printk("\n[%s] ISR unblocks NIC_RX[%d] server thread / cycle %d\n", 646 __FUNCTION__, channel, cycle ); 647 if( (is_rx == false) && DEBUG_HAL_NIC_TX < cycle ) 648 printk("\n[%s] ISR unblocks NIC_TX[%d] server thread / cycle %d\n", 649 __FUNCTION__, channel, cycle ); 650 #endif 678 printk("\n[%s] error reported for %s / status %d / cycle %d\n", 679 __FUNCTION__ , chdev->name , status , cycle ); 680 #endif 681 server->nic_cmd.error = 1; 682 } 683 else if( status != NIC_CHANNEL_STATUS_IDLE) // no error but DMA BUSY 684 { 685 686 #if (DEBUG_HAL_NIC_RX || DEBUG_HAL_NIC_TX) 687 uint32_t cycle = (uint32_t)hal_get_cycles(); 688 printk("\n[%s] warning reported for %s / status %d / cycle %d\n", 689 __FUNCTION__ , chdev->name , status , cycle ); 690 #endif 691 server->nic_cmd.error = 0; 692 } 693 else 694 { 695 696 #if (DEBUG_HAL_NIC_RX || DEBUG_HAL_NIC_TX) 697 uint32_t cycle = (uint32_t)hal_get_cycles(); 698 printk("\n[%s] irq reported for %s / status %d / cycle %d\n", 699 __FUNCTION__ , chdev->name , status , cycle ); 700 #endif 701 server->nic_cmd.error = 0; 702 } 703 704 // unblock server thread 705 server->nic_cmd.status = status; 706 thread_unblock( XPTR( local_cxy , server ) , THREAD_BLOCKED_ISR ); 651 707 652 708 } // end soclib_nic_isr() -
trunk/hal/tsar_mips32/drivers/soclib_nic.h
r658 r686 26 26 27 27 #include <chdev.h> 28 #include <kernel_config.h> 28 29 #include <hal_kernel_types.h> 29 30 … … 43 44 * in two memory mapped software FIFOs, called NIC_TX_QUEUE and NIC_RX_QUEUE, implemented 44 45 * as chained buffers (chbuf). Each slot in these FIFOs is a container, containing one 45 * single packet. The number of containers, defining the queue depth, is a software defined46 * parameter. The data transfer unit between is acontainer (one single packet).47 * 48 * - The "container" structure contains a 2040 bytes data buffer, the packet length, and49 * the container state: full (owned by the reader) / empty (owned by the writer).46 * single packet. The number of containers, defining the queue depth, is defined by the 47 * CONFIG_SOCK_QUEUES_DEPTH. The data transfer unit is one container (one single packet). 48 * 49 * - One container contains a 2040 bytes data buffer, the packet length (4bytes), and the 50 * container state (4 bytes) : full (owned by the reader) / empty (owned by the writer). 50 51 * For each container, the state variable is used as a SET/RESET flip-flop to synchronize 51 52 * the software server thread, and the hardware NIC DMA engines. … … 126 127 127 128 /******************************************************************************************** 128 * This structure defines the chbuf descriptor, used to implement both the RX and TX packets129 * This structure defines the soclib_nic chbuf descriptor, used to implement the RX and TX 129 130 * queues. Each container contains one single packet, and has only two states (full/empty). 130 131 * All containers are allocated in the same cluster as the associated NIC chdev descriptor. … … 136 137 *******************************************************************************************/ 137 138 138 #define SOCLIB_NIC_CHBUF_DEPTH 8139 140 139 typedef struct nic_chbuf_s 141 140 { 142 uint32_t wid; /*! current container write index*/143 uint32_t rid; /*! current container read index*/144 uint64_t cont_pad[ SOCLIB_NIC_CHBUF_DEPTH]; /*! containers physical base addresses*/145 uint32_t * cont_ptr[ SOCLIB_NIC_CHBUF_DEPTH]; /*! containers virtual base addresses*/141 uint32_t wid; /*! current container write index */ 142 uint32_t rid; /*! current container read index */ 143 uint64_t cont_pad[CONFIG_SOCK_QUEUES_DEPTH]; /*! containers physical base addresses */ 144 uint32_t * cont_ptr[CONFIG_SOCK_QUEUES_DEPTH]; /*! containers virtual base addresses */ 146 145 } 147 146 nic_chbuf_t; 148 147 149 148 /******************************************************************************************** 150 * This structure defines the container descriptor format. 149 * This structure defines the soclib_nic container descriptor format. 150 * One container occupies exactly 2048 bytes. 151 151 *******************************************************************************************/ 152 152 … … 212 212 213 213 /******************************************************************************************** 214 * This ISR is executed when a new RX container has been moved to an empty TX queue, 215 * or when a TX container has been removed from a full TX queue. In both cases, it 216 * reactivate the corresponding server thread from the BLOCKED_ISR condition. 217 * It is also executed in case of error reported by the DMA engines accessing the TX or RX 218 * queues. It simply print an error message on the kernel terminal. 219 * TODO improve this error handling... 214 * This ISR is executed in four cases : 215 * - when a RX container has been moved to an empty RX queue by the RX DMA engine, 216 * - when a TX container has been removed from a full TX queue by the TX DMA engine, 217 * - when an error is reported by the RX DMA engine accessing the RX queue, 218 * - when an error is reported by the TX DMA engine accessing the TX queue, 219 * In all cases it simply reactivates the corresponding TX or RX server thread, 220 * and signal the event type in writing the relevant value in the command "error" field. 220 221 ******************************************************************************************** 221 222 * @ chdev : local pointer on NIC chdev descriptor. -
trunk/hal/tsar_mips32/drivers/soclib_pic.c
r679 r686 2 2 * soclib_pic.c - soclib PIC driver implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 39 39 ////////////////////////////////////////////////////////////////////////////////////// 40 40 41 extern chdev_directory_t chdev_dir; 41 extern chdev_directory_t chdev_dir; // defined in chdev.h / allocated in kerneL-init.c 42 42 43 43 extern iopic_input_t iopic_input; // defined in dev_pic.h / allocated in kernel_init.c … … 58 58 soclib_pic_cluster_t * ext_ptr = LOCAL_CLUSTER->pic_extend; 59 59 60 61 "no free WTI found : too much external IRQs\n");60 assert( __FUNCTION__, (ext_ptr->first_free_wti < ext_ptr->wti_nr) , 61 "no free WTI found : too much external IRQs"); 62 62 63 63 // update WTI allocator … … 147 147 if( index < LOCAL_CLUSTER->cores_nr ) // it is an IPI 148 148 { 149 assert( __FUNCTION__, (index == core->lid) , "illegal IPI index" ); 149 150 assert( __FUNCTION__, (index == core->lid), 151 "illegal IPI index" ); 150 152 151 153 #if DEBUG_HAL_IRQS … … 170 172 { 171 173 printk("\n[WARNING] in %s : no handler for WTI %d on core %d in cluster %x\n", 172 __FUNCTION__ , index , core->lid , local_cxy ); 173 174 core->spurious_irqs ++; 174 __FUNCTION__ , index , core->lid , local_cxy ); 175 175 176 176 // disable WTI in local XCU controller … … 204 204 { 205 205 printk("\n[WARNING] in %s : no handler for HWI %d on core %d in cluster %x\n", 206 __FUNCTION__ , index , core->lid , local_cxy ); 207 208 core->spurious_irqs ++; 206 __FUNCTION__ , index , core->lid , local_cxy ); 209 207 210 208 // disable HWI in local XCU controller … … 230 228 index = pti_status - 1; 231 229 232 assert( __FUNCTION__, (index == core->lid) , "unconsistent PTI index\n"); 230 assert( __FUNCTION__, (index == core->lid), 231 "unconsistent PTI index\n"); 233 232 234 233 #if DEBUG_HAL_IRQS … … 278 277 soclib_pic_cluster_t * cluster_ext_ptr; 279 278 soclib_pic_core_t * core_ext_ptr; 280 kmem_req_t req;281 279 uint32_t lid; 282 280 uint32_t idx; … … 288 286 { 289 287 // allocate memory for core extension 290 req.type = KMEM_KCM; 291 req.order = bits_log2( sizeof(soclib_pic_core_t) ); 292 req.flags = AF_KERNEL; 293 core_ext_ptr = kmem_alloc( &req ); 288 core_ext_ptr = kmem_alloc( bits_log2( sizeof(soclib_pic_core_t)) , AF_KERNEL ); 294 289 295 290 if( core_ext_ptr == NULL ) … … 308 303 309 304 // allocate memory for cluster extension 310 req.type = KMEM_KCM; 311 req.order = bits_log2( sizeof(soclib_pic_cluster_t) ); 312 req.flags = AF_KERNEL; 313 cluster_ext_ptr = kmem_alloc( &req ); 305 cluster_ext_ptr = kmem_alloc( bits_log2( sizeof(soclib_pic_cluster_t) ), AF_KERNEL ); 314 306 315 307 if( cluster_ext_ptr == NULL ) … … 319 311 } 320 312 321 assert( __FUNCTION__, (cluster_ext_ptr != NULL) , "cannot allocate memory for cluster extension");322 323 313 // get XCU characteristics from the XCU config register 324 314 uint32_t config = xcu_base[XCU_CONFIG<<5]; … … 380 370 bool_t is_rx = src_chdev->is_rx; 381 371 382 if( (func == DEV_FUNC_IOC && impl == IMPL_IOC_BDV) || (func == DEV_FUNC_NIC) || 383 (func == DEV_FUNC_TXT && impl == IMPL_TXT_TTY) || (func == DEV_FUNC_IOB) ) // external IRQ => WTI 372 if( ((func == DEV_FUNC_IOC) && (impl == IMPL_IOC_BDV)) || 373 (func == DEV_FUNC_NIC) || 374 ((func == DEV_FUNC_TXT) && (impl == IMPL_TXT_TTY)) || 375 (func == DEV_FUNC_IOB) ) // external IRQ => WTI 384 376 { 385 377 // get external IRQ index 386 378 uint32_t hwi_id = 0; 387 379 if ( func == DEV_FUNC_IOC ) hwi_id = iopic_input.ioc[channel]; 388 else if( func == DEV_FUNC_TXT&& is_rx ) hwi_id = iopic_input.txt_rx[channel];389 else if( func == DEV_FUNC_TXT&& !is_rx ) hwi_id = iopic_input.txt_tx[channel];380 else if( (func == DEV_FUNC_TXT) && is_rx ) hwi_id = iopic_input.txt_rx[channel]; 381 else if( (func == DEV_FUNC_TXT) && !is_rx ) hwi_id = iopic_input.txt_tx[channel]; 390 382 else if( (func == DEV_FUNC_NIC) && is_rx ) hwi_id = iopic_input.nic_rx[channel]; 391 383 else if( (func == DEV_FUNC_NIC) && !is_rx ) hwi_id = iopic_input.nic_tx[channel]; 392 384 else if( func == DEV_FUNC_IOB ) hwi_id = iopic_input.iob; 393 else assert( __FUNCTION__, false , "illegal device functionnal type\n"); 385 else 386 { 387 printk("\n[WARNING] from %s : illegal device / func %s / is_rx %d\n", 388 __FUNCTION__, chdev_func_str(func), is_rx ); 389 } 394 390 395 391 // get a WTI mailbox from local XCU descriptor … … 420 416 #if DEBUG_HAL_IRQS 421 417 if( DEBUG_HAL_IRQS < cycle ) 422 printk("\n[DBG] %s : %s / channel = %d / rx = %d / hwi_id = %d / wti_id = %d / cluster =%x\n",418 printk("\n[DBG] %s : %s / channel %d / rx %d / hwi_id %d / wti_id %d / cluster %x\n", 423 419 __FUNCTION__ , chdev_func_str( func ) , channel , is_rx , hwi_id , wti_id , local_cxy ); 424 420 #endif 425 421 426 422 } 427 else if( (func == DEV_FUNC_DMA) || (func == DEV_FUNC_MMC) || 423 else if( (func == DEV_FUNC_DMA) || 424 (func == DEV_FUNC_MMC) || 428 425 (func == DEV_FUNC_TXT && impl == IMPL_TXT_MTY) || 429 426 (func == DEV_FUNC_IOC && impl == IMPL_IOC_SPI) ) // internal IRQ => HWI … … 431 428 // get internal IRQ index 432 429 uint32_t hwi_id; 433 if( func == DEV_FUNC_DMA ) hwi_id = lapic_input.dma[channel];430 if( func == DEV_FUNC_DMA ) hwi_id = lapic_input.dma[channel]; 434 431 else if (func == DEV_FUNC_TXT ) hwi_id = lapic_input.mtty; 435 432 else if (func == DEV_FUNC_IOC ) hwi_id = lapic_input.sdcard; 436 else hwi_id = lapic_input.mmc;433 else hwi_id = lapic_input.mmc; 437 434 438 435 // register IRQ type and index in chdev … … 453 450 else 454 451 { 455 assert( __FUNCTION__, false , "illegal device functionnal type\n" ); 452 printk("\n[WARNING] from %s : illegal device / func %s / is_rx %d / impl %d\n", 453 __FUNCTION__, chdev_func_str(func), is_rx, impl ); 456 454 } 457 455 } // end soclib_pic_bind_irq(); … … 477 475 // in TSAR : XCU output [4*lid] is connected to core [lid] 478 476 hal_remote_s32( XPTR( src_chdev_cxy , 479 477 &seg_xcu_ptr[ (XCU_MSK_HWI_ENABLE << 5) | (lid<<2) ] ) , (1 << irq_id) ); 480 478 } 481 479 else if( irq_type == SOCLIB_TYPE_WTI ) … … 484 482 // in TSAR : XCU output [4*lid] is connected to core [lid] 485 483 hal_remote_s32( XPTR( src_chdev_cxy , 486 484 &seg_xcu_ptr[ (XCU_MSK_WTI_ENABLE << 5) | (lid<<2) ] ) , (1 << irq_id) ); 487 485 } 488 486 else 489 487 { 490 assert( __FUNCTION__, false , "illegal IRQ type\n" ); 488 printk("\n[WARNING] from %s : illegal IRQ type %d\n", 489 __FUNCTION__, irq_type ); 491 490 } 492 491 } // end soclib_pic_enable_irq() … … 512 511 // in TSAR : XCU output [4*lid] is connected to core [lid] 513 512 hal_remote_s32( XPTR( src_chdev_cxy , 514 513 &seg_xcu_ptr[(XCU_MSK_HWI_DISABLE << 5) | (lid<<2) ] ) , (1 << irq_id) ); 515 514 } 516 515 else if( irq_type == SOCLIB_TYPE_WTI ) … … 519 518 // in TSAR : XCU output [4*lid] is connected to core [lid] 520 519 hal_remote_s32( XPTR( src_chdev_cxy , 521 520 &seg_xcu_ptr[(XCU_MSK_WTI_DISABLE << 5) | (lid<<2) ] ) , (1 << irq_id) ); 522 521 } 523 522 else 524 523 { 525 assert( __FUNCTION__, false , "illegal IRQ type\n" ); 524 printk("\n[WARNING] from %s : illegal IRQ type %d\n", 525 __FUNCTION__, irq_type ); 526 526 } 527 527 } // end soclib_pic_enable_irq() … … 570 570 } 571 571 572 ///////////////////////// 572 /////////////////////////////// 573 573 void soclib_pic_ack_ipi( void ) 574 574 { … … 582 582 uint32_t ack = base[ (XCU_WTI_REG << 5) | lid ]; 583 583 584 // we m ust make a fake use for ack value to avoid a warning584 // we make a fake use for ack value to avoid a warning 585 585 if( (ack + 1) == 0 ) asm volatile( "nop" ); 586 586 } -
trunk/hal/x86_64/core/hal_context.c
r457 r686 109 109 /* Switch the VM space */ 110 110 if (newproc != oldproc) { 111 lcr3((uint64_t)newproc->vmm.gpt.ppn << CONFIG_PPM_PAGE_ SHIFT);111 lcr3((uint64_t)newproc->vmm.gpt.ppn << CONFIG_PPM_PAGE_ORDER); 112 112 } 113 113 -
trunk/hal/x86_64/core/hal_exception.c
r457 r686 95 95 96 96 error = vmm_handle_page_fault(process, 97 bad_vaddr >> CONFIG_PPM_PAGE_ SHIFT);97 bad_vaddr >> CONFIG_PPM_PAGE_ORDER); 98 98 99 99 x86_printf("VA=%Z ERROR=%Z\n", bad_vaddr, (uint64_t)error); -
trunk/hal/x86_64/core/hal_gpt.c
r635 r686 301 301 L4dst = (pt_entry_t *)ppm_page2base(page_xp); 302 302 memcpy(&L4dst[256], &L4src[256], 256 * sizeof(pt_entry_t)); 303 L4dst[L4_SLOT_PTE] = (ppm_page2ppn(page_xp) << CONFIG_PPM_PAGE_ SHIFT) |303 L4dst[L4_SLOT_PTE] = (ppm_page2ppn(page_xp) << CONFIG_PPM_PAGE_ORDER) | 304 304 PG_V | PG_KW | PG_NX; 305 305 … … 324 324 error_t hal_gpt_set_pte(gpt_t *gpt, vpn_t vpn, uint32_t attr, ppn_t ppn) 325 325 { 326 vaddr_t va = vpn << CONFIG_PPM_PAGE_ SHIFT;326 vaddr_t va = vpn << CONFIG_PPM_PAGE_ORDER; 327 327 paddr_t pa; 328 328 kmem_req_t req; … … 384 384 } 385 385 386 pa = ppn << CONFIG_PPM_PAGE_ SHIFT;386 pa = ppn << CONFIG_PPM_PAGE_ORDER; 387 387 L1_BASE[pl1_i(va)] = pa | hal_gpt_attr_to_pte(attr); 388 388 … … 392 392 void hal_gpt_get_pte(gpt_t *gpt, vpn_t vpn, uint32_t *attr, ppn_t *ppn) 393 393 { 394 vaddr_t va = vpn << CONFIG_PPM_PAGE_ SHIFT;394 vaddr_t va = vpn << CONFIG_PPM_PAGE_ORDER; 395 395 396 396 *attr = 0; … … 408 408 /* large page */ 409 409 *attr = hal_gpt_pte_to_attr(&L2_BASE[pl2_i(va)]); 410 *ppn = (L2_BASE[pl2_i(va)] & PG_2MFRAME) >> CONFIG_PPM_PAGE_ SHIFT;410 *ppn = (L2_BASE[pl2_i(va)] & PG_2MFRAME) >> CONFIG_PPM_PAGE_ORDER; 411 411 } else { 412 412 /* small page */ 413 413 *attr = hal_gpt_pte_to_attr(&L1_BASE[pl1_i(va)]); 414 *ppn = (L1_BASE[pl1_i(va)] & PG_FRAME) >> CONFIG_PPM_PAGE_ SHIFT;414 *ppn = (L1_BASE[pl1_i(va)] & PG_FRAME) >> CONFIG_PPM_PAGE_ORDER; 415 415 } 416 416 -
trunk/hal/x86_64/core/hal_ppm.c
r457 r686 68 68 69 69 // compute number of pages required to store page descriptor array 70 uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_ SHIFT;70 uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_ORDER; 71 71 72 72 // compute total number of reserved pages (kernel code & pages_tbl[])
Note: See TracChangeset
for help on using the changeset viewer.