- Timestamp:
- May 29, 2019, 3:26:44 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_irqmask.c
r632 r634 53 53 "mfc0 $1, $12 \n" /* s1 <= c0_sr */ 54 54 "or %0, $0, $1 \n" /* old <= $1 */ 55 "ori $1, $1, 0x 1\n" /* set IE bit in $1 */55 "ori $1, $1, 0xFF01 \n" /* set IE bit in $1 */ 56 56 "mtc0 $1, $12 \n" /* c0_sr <= $1 */ 57 57 ".set at \n" -
trunk/kernel/fs/vfs.c
r633 r634 2341 2341 gid_t gid = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->gid ) ); 2342 2342 2343 // FIXME: me must use mode2343 // TODO : me must use mode 2344 2344 if( (uid == client_uid) || (gid == client_gid) ) return false; 2345 2345 else return true; -
trunk/kernel/kernel_config.h
r633 r634 2 2 * kernel_config.h - global kernel configuration arguments 3 3 * 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018,2019) 4 * Authors Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 66 65 67 66 #define DEBUG_DEVFS_GLOBAL_INIT 0 68 #define DEBUG_DEVFS_LOCAL_INIT 167 #define DEBUG_DEVFS_LOCAL_INIT 0 69 68 #define DEBUG_DEVFS_MOVE 0 70 69 … … 112 111 #define DEBUG_KMEM 0 113 112 114 #define DEBUG_KERNEL_INIT 1113 #define DEBUG_KERNEL_INIT 0 115 114 116 115 #define DEBUG_MAPPER_GET_PAGE 0 … … 124 123 #define DEBUG_PPM_ALLOC_PAGES 0 125 124 #define DEBUG_PPM_FREE_PAGES 0 125 #define DEBUG_PPM_REMOTE_ALLOC_PAGES 0 126 #define DEBUG_PPM_REMOTE_FREE_PAGES 0 126 127 127 128 #define DEBUG_PROCESS_COPY_INIT 0 … … 141 142 #define DEBUG_QUEUELOCK_CXY 0 142 143 143 #define DEBUG_RPC_CLIENT_GENERIC 2144 #define DEBUG_RPC_SERVER_GENERIC 2144 #define DEBUG_RPC_CLIENT_GENERIC 0 145 #define DEBUG_RPC_SERVER_GENERIC 0 145 146 146 147 #define DEBUG_RPC_KCM_ALLOC 0 … … 152 153 #define DEBUG_RPC_THREAD_USER_CREATE 0 153 154 #define DEBUG_RPC_THREAD_KERNEL_CREATE 0 154 #define DEBUG_RPC_VFS_DENTRY_CREATE 2155 #define DEBUG_RPC_VFS_DENTRY_CREATE 0 155 156 #define DEBUG_RPC_VFS_DENTRY_DESTROY 0 156 157 #define DEBUG_RPC_VFS_DEVICE_GET_DENTRY 0 … … 228 229 #define DEBUG_USER_DIR 0 229 230 230 #define DEBUG_VFS_ADD_CHILD 1231 #define DEBUG_VFS_ADD_CHILD 0 231 232 #define DEBUG_VFS_ADD_SPECIAL 0 232 233 #define DEBUG_VFS_CHDIR 0 … … 253 254 #define DEBUG_VMM_DESTROY 0 254 255 #define DEBUG_VMM_FORK_COPY 0 255 #define DEBUG_VMM_GET_ONE_PPN 2256 #define DEBUG_VMM_GET_ONE_PPN 0 256 257 #define DEBUG_VMM_GET_PTE 0 257 #define DEBUG_VMM_HANDLE_PAGE_FAULT 2258 #define DEBUG_VMM_HANDLE_PAGE_FAULT 19000000 258 259 #define DEBUG_VMM_HANDLE_COW 0 259 260 #define DEBUG_VMM_MMAP_ALLOC 0 260 #define DEBUG_VMM_PAGE_ALLOCATE 2261 #define DEBUG_VMM_PAGE_ALLOCATE 0 261 262 #define DEBUG_VMM_REMOVE_VSEG 0 262 263 #define DEBUG_VMM_RESIZE_VSEG 0 -
trunk/kernel/mm/page.c
r632 r634 61 61 uint32_t value ) 62 62 { 63 hal_atomic_and( (uint32_t *)&page->flags , ~ ((uint32_t)value));63 hal_atomic_and( (uint32_t *)&page->flags , ~value ); 64 64 } 65 65 … … 111 111 page_t * page_ptr = GET_PTR( page_xp ); 112 112 113 hal_remote_atomic_and( XPTR( page_cxy , &page_ptr->flags ) , value );113 hal_remote_atomic_and( XPTR( page_cxy , &page_ptr->flags ) , ~value ); 114 114 } 115 115 -
trunk/kernel/mm/ppm.c
r632 r634 42 42 43 43 //////////////////////////////////////////////////////////////////////////////////////// 44 // global variables 45 //////////////////////////////////////////////////////////////////////////////////////// 46 47 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 48 49 //////////////////////////////////////////////////////////////////////////////////////// 44 50 // functions to translate [ page <-> base <-> ppn ] 45 51 //////////////////////////////////////////////////////////////////////////////////////// 46 47 52 48 53 ///////////////////////////////////////////// … … 212 217 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 213 218 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n", 214 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );219 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle ); 215 220 #endif 216 221 … … 266 271 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 267 272 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 268 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );273 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle ); 269 274 #endif 270 275 … … 307 312 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n", 308 313 __FUNCTION__, this->process->pid, this->trdid, 309 1<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle );314 1<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), local_cxy, cycle ); 310 315 #endif 311 316 … … 377 382 page_t * found_block; 378 383 379 #if DEBUG_PPM_ ALLOC_PAGES384 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 380 385 thread_t * this = CURRENT_THREAD; 381 386 uint32_t cycle = (uint32_t)hal_get_cycles(); 382 if( DEBUG_PPM_ ALLOC_PAGES < cycle )387 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) 383 388 printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n", 384 389 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 385 390 #endif 386 391 387 #if(DEBUG_PPM_ ALLOC_PAGES & 0x1)388 if( DEBUG_PPM_ ALLOC_PAGES < cycle )392 #if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1) 393 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) 389 394 ppm_remote_display( cxy ); 390 395 #endif … … 436 441 remote_busylock_release( lock_xp ); 437 442 438 #if DEBUG_ PPM_ALLOC_PAGES443 #if DEBUG_REMOTE_PPM_ALLOC_PAGES 439 444 cycle = (uint32_t)hal_get_cycles(); 440 if( DEBUG_PPM_ ALLOC_PAGES < cycle )445 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) 441 446 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 442 447 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); … … 468 473 } 469 474 470 // update refcount, flags and order fields in found block remote page descriptor475 // update refcount, flags and order fields in found block 471 476 page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE ); 472 477 page_remote_refcount_up( XPTR( cxy , found_block ) ); … … 479 484 dqdt_increment_pages( cxy , order ); 480 485 481 #if DEBUG_PPM_ ALLOC_PAGES486 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 482 487 cycle = (uint32_t)hal_get_cycles(); 483 if( DEBUG_PPM_ ALLOC_PAGES < cycle )488 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) 484 489 printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x in cluster %x / cycle %d\n", 485 490 __FUNCTION__, this->process->pid, this->trdid, … … 487 492 #endif 488 493 489 #if(DEBUG_PPM_ ALLOC_PAGES & 0x1)490 if( DEBUG_PPM_ ALLOC_PAGES < cycle )494 #if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1) 495 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) 491 496 ppm_remote_display( cxy ); 492 497 #endif … … 509 514 uint32_t current_order; // current (merged) block order 510 515 511 #if DEBUG_PPM_ FREE_PAGES516 #if DEBUG_PPM_REMOTE_FREE_PAGES 512 517 thread_t * this = CURRENT_THREAD; 513 518 uint32_t cycle = (uint32_t)hal_get_cycles(); 514 if( DEBUG_PPM_ FREE_PAGES < cycle )515 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster%x / ppn %x / cycle %d\n",519 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle ) 520 printk("\n[%s] thread[%x,%x] enter for %d page(s) / cxy %x / ppn %x / cycle %d\n", 516 521 __FUNCTION__, this->process->pid, this->trdid, 517 522 1<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle ); 518 523 #endif 519 524 520 #if(DEBUG_PPM_ FREE_PAGES & 0x1)521 if( DEBUG_PPM_ FREE_PAGES < cycle )525 #if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1) 526 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle ) 522 527 ppm_remote_display( cxy ); 523 528 #endif … … 594 599 dqdt_decrement_pages( cxy , page->order ); 595 600 596 #if DEBUG_PPM_ FREE_PAGES601 #if DEBUG_PPM_REMOTE_FREE_PAGES 597 602 cycle = (uint32_t)hal_get_cycles(); 598 if( DEBUG_PPM_ FREE_PAGES < cycle )603 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle ) 599 604 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n", 600 605 __FUNCTION__, this->process->pid, this->trdid, … … 602 607 #endif 603 608 604 #if(DEBUG_PPM_ FREE_PAGES & 0x1)605 if( DEBUG_PPM_ FREE_PAGES < cycle )609 #if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1) 610 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle ) 606 611 ppm_remote_display( cxy ); 607 612 #endif … … 614 619 uint32_t order; 615 620 list_entry_t * iter; 616 page_t * page;621 xptr_t page_xp; 617 622 618 623 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 619 624 620 625 // build extended pointer on lock protecting remote PPM 621 xptr_t lock_xp = XPTR( cxy , &ppm->free_lock ); 622 623 // get lock protecting free lists in remote cluster 624 remote_busylock_acquire( lock_xp ); 625 626 printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr ); 626 xptr_t ppm_lock_xp = XPTR( cxy , &ppm->free_lock ); 627 628 // get pointers on TXT0 chdev 629 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 630 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 631 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 632 633 // build extended pointer on remote TXT0 lock 634 xptr_t txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 635 636 // get PPM lock 637 remote_busylock_acquire( ppm_lock_xp ); 638 639 // get TXT0 lock 640 remote_busylock_acquire( txt_lock_xp ); 641 642 nolock_printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr ); 627 643 628 644 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) … … 630 646 // get number of free pages for free_list[order] in remote cluster 631 647 uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) ); 632 printk("- order = %d / free_pages = %d\t: ", order , n ); 648 649 nolock_printk("- order = %d / n = %d\t: ", order , n ); 633 650 634 651 LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter ) 635 652 { 636 page = LIST_ELEMENT( iter , page_t , list ); 637 printk("%x," , page - ppm->pages_tbl ); 653 // build extended pointer on page descriptor 654 page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) ); 655 656 // display PPN 657 nolock_printk("%x," , ppm_page2ppn( page_xp ) ); 638 658 } 639 659 640 printk("\n"); 641 } 642 643 // release lock protecting free lists in remote cluster 644 remote_busylock_release( lock_xp ); 660 nolock_printk("\n"); 661 } 662 663 // release TXT0 lock 664 remote_busylock_release( txt_lock_xp ); 665 666 // release PPM lock 667 remote_busylock_release( ppm_lock_xp ); 645 668 } 646 669 -
trunk/kernel/mm/vmm.c
r633 r634 1906 1906 1907 1907 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1908 if( vpn == 0x40b)1908 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 1909 1909 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1910 1910 __FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle ); … … 1912 1912 1913 1913 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1914 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 1914 1915 hal_vmm_display( this->process , false ); 1915 1916 #endif … … 1928 1929 1929 1930 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1930 uint32_t cycle = (uint32_t)hal_get_cycles(); 1931 if( vpn == 0x40b ) 1932 printk("\n[%s] thread[%x,%x] found vseg %s / cycle %d\n", 1933 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle ); 1931 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 1932 printk("\n[%s] thread[%x,%x] found vseg %s\n", 1933 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); 1934 1934 #endif 1935 1935 … … 1951 1951 1952 1952 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1953 cycle = (uint32_t)hal_get_cycles(); 1954 if( vpn == 0x40b ) 1953 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 1955 1954 printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x / cycle %d\n", 1956 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy , cycle);1955 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy ); 1957 1956 #endif 1958 1957 … … 1972 1971 1973 1972 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1974 if( vpn == 0x40b)1975 printk("\n[%s] thread[%x,%x] : access local gpt : local_cxy %x / ref_cxy %x / type %s\n",1973 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 1974 printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s\n", 1976 1975 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 1977 1976 #endif … … 2009 2008 2010 2009 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2011 if( vpn == 0x40b)2010 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2012 2011 printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n", 2013 2012 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); … … 2028 2027 2029 2028 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2030 if( vpn == 0x40b)2031 printk("\n[%s] thread[%x,%x] access ref gpt : local_cxy %x / ref_cxy %x / type %s\n",2029 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2030 printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s\n", 2032 2031 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 2033 2032 #endif … … 2052 2051 2053 2052 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2054 if( vpn == 0x40b)2053 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2055 2054 printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n", 2056 2055 __FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn ); … … 2067 2066 2068 2067 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2069 if( vpn == 0x40b)2068 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2070 2069 printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n", 2071 2070 __FUNCTION__, this->process->pid, this->trdid ); … … 2076 2075 2077 2076 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2078 if( vpn == 0x40b)2077 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2079 2078 printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n", 2080 2079 __FUNCTION__, this->process->pid, this->trdid ); … … 2086 2085 2087 2086 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2088 if( vpn == 0x40b)2087 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2089 2088 printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n", 2090 2089 __FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle ); … … 2122 2121 2123 2122 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2124 if( vpn == 0x40b)2123 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2125 2124 printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n", 2126 2125 __FUNCTION__, this->process->pid, this->trdid ); … … 2134 2133 2135 2134 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2136 if( vpn == 0x40b)2135 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2137 2136 printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n", 2138 2137 __FUNCTION__, this->process->pid, this->trdid ); … … 2151 2150 2152 2151 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2153 if( vpn == 0x40b)2152 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2154 2153 printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n", 2155 2154 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); … … 2174 2173 2175 2174 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2176 if( vpn == 0x40b)2175 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2177 2176 printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n", 2178 2177 __FUNCTION__, vpn, ppn, attr, end_cycle ); -
trunk/params-hard.mk
r633 r634 1 1 # Parameters definition for the ALMOS-MKH Makefile 2 2 3 ARCH = / users/enseig/alain/soc/tsar/trunk/platforms/tsar_generic_iob3 ARCH = /Users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob 4 4 X_SIZE = 1 5 5 Y_SIZE = 2 -
trunk/params-soft.mk
r633 r634 1 1 2 2 # define absolute path to almos-mkh directory 3 ALMOSMKH_DIR = / users/enseig/alain/soc/almos-mkh/trunk3 ALMOSMKH_DIR = /Users/alain/soc/almos-mkh/ 4 4 5 5 # Select the libc
Note: See TracChangeset
for help on using the changeset viewer.