Changeset 50 for trunk/kernel
- Timestamp:
- Jun 26, 2017, 3:15:11 PM (7 years ago)
- Location:
- trunk/kernel
- Files:
-
- 1 added
- 1 deleted
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_ioc.c
r23 r50 22 22 */ 23 23 24 #include <hard_config.h> 24 25 #include <kernel_config.h> 25 26 #include <hal_types.h> … … 130 131 #if USE_IOB // software L2/L3 cache coherence for memory buffer 131 132 132 if ( type == IOC_READ ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 );133 else dev_mmc_sync ( XPTR( local_cxy , buffer ) , count<<9 );133 if ( cmd_type == IOC_READ ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 ); 134 else dev_mmc_sync ( XPTR( local_cxy , buffer ) , count<<9 ); 134 135 135 136 #endif // end software L2/L3 cache coherence -
trunk/kernel/drivers/soclib/soclib_mmc.c
r4 r50 85 85 86 86 // get command type 87 uint32_t cc_cmd = MMC_CC_INVAL ? SOCLIB_MMC_CC_INVAL : SOCLIB_MMC_CC_SYNC; 87 uint32_t cc_cmd; 88 if( type == MMC_CC_INVAL ) cc_cmd = SOCLIB_MMC_CC_INVAL; 89 else cc_cmd = SOCLIB_MMC_CC_SYNC; 88 90 89 91 // set SOCLIB_MMC registers to start INVAL/SYNC operation -
trunk/kernel/kern/cluster.c
r23 r50 28 28 #include <hal_atomic.h> 29 29 #include <hal_special.h> 30 #include <hal_ppm.h> 30 31 #include <printk.h> 31 32 #include <errno.h> … … 43 44 #include <dqdt.h> 44 45 45 // TODO #include <sysfs.h>46 47 46 /////////////////////////////////////////////////////////////////////////////////////////// 48 47 // Extern global variables … … 62 61 error_t cluster_init( struct boot_info_s * info ) 63 62 { 63 error_t error; 64 64 lpid_t lpid; // local process_index 65 65 lid_t lid; // local core index … … 82 82 spinlock_init( &cluster->kcm_lock ); 83 83 84 cluster_dmsg("\n[INFO] %s for cluster %x enters\n", 85 __FUNCTION__ , local_cxy ); 86 84 87 // initialises DQDT 85 88 cluster->dqdt_root_level = dqdt_init( info->x_size, … … 90 93 91 94 // initialises embedded PPM 92 ppm_init( &cluster->ppm, 93 info->pages_nr, 94 info->pages_offset ); 95 error = hal_ppm_init( info ); 96 97 if( error ) 98 { 99 printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n", 100 __FUNCTION__ , local_cxy ); 101 return ENOMEM; 102 } 103 104 cluster_dmsg("\n[INFO] %s : PPM initialized in cluster %x at cycle %d\n", 105 __FUNCTION__ , local_cxy , hal_time_stamp() ); 95 106 96 107 // initialises embedded KHM 97 108 khm_init( &cluster->khm ); 98 109 110 cluster_dmsg("\n[INFO] %s : KHM initialized in cluster %x at cycle %d\n", 111 __FUNCTION__ , local_cxy , hal_time_stamp() ); 112 99 113 // initialises embedded KCM 100 114 kcm_init( &cluster->kcm , KMEM_KCM ); 115 116 cluster_dmsg("\n[INFO] %s : KCM initialized in cluster %x at cycle %d\n", 117 __FUNCTION__ , local_cxy , hal_time_stamp() ); 101 118 102 119 // initialises all cores descriptors … … 108 125 } 109 126 127 cluster_dmsg("\n[INFO] %s : cores initialized in cluster %x at cycle %d\n", 128 __FUNCTION__ , local_cxy , hal_time_stamp() ); 129 110 130 // initialises RPC fifo 111 131 rpc_fifo_init( &cluster->rpc_fifo ); 132 133 cluster_dmsg("\n[INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n", 134 __FUNCTION__ , local_cxy , hal_time_stamp() ); 112 135 113 136 // initialise pref_tbl[] in process manager … … 132 155 xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) ); 133 156 } 157 158 cluster_dmsg("\n[INFO] %s Process Manager initialized in cluster %x at cycle %d\n", 159 __FUNCTION__ , local_cxy , hal_time_stamp() ); 134 160 135 161 hal_wbflush(); -
trunk/kernel/kern/cluster.h
r23 r50 133 133 134 134 pmgr_t pmgr; /*! embedded process manager */ 135 136 char name[CONFIG_SYSFS_NAME_LEN];137 138 // sysfs_entry_t node;139 135 } 140 136 cluster_t; -
trunk/kernel/kern/do_syscall.c
r23 r50 81 81 sys_chmod, // 31 82 82 sys_signal, // 32 83 sys_ gettimeofday,// 3383 sys_timeofday, // 33 84 84 sys_kill, // 34 85 85 sys_getpid, // 35 -
trunk/kernel/kern/kernel_init.c
r25 r50 55 55 #include <devfs.h> 56 56 57 // TODO #include <sysfs.h>58 57 59 58 #define KERNEL_INIT_SYNCHRO 0xA5A5B5B5 … … 115 114 __attribute__((section(".kdata"))) 116 115 barrier_t local_barrier CONFIG_CACHE_LINE_ALIGNED; 116 117 // This variable defines the array of supported File System contexts 118 __attribute__((section(".kdata"))) 119 vfs_ctx_t fs_context[FS_TYPES_NR] CONFIG_CACHE_LINE_ALIGNED; 120 117 121 118 122 /////////////////////////////////////////////////////////////////////////////////////////// … … 293 297 } 294 298 295 kinit_dmsg("\n[INFO] %s : core[%x][0] create sICU chdev at cycle %d\n",299 kinit_dmsg("\n[INFO] %s : core[%x][0] created ICU chdev at cycle %d\n", 296 300 __FUNCTION__ , local_cxy , hal_time_stamp() ); 297 301 … … 335 339 } 336 340 337 kinit_dmsg("\n[INFO] %s : core[%x][0] create sMMC chdev at cycle %d\n",341 kinit_dmsg("\n[INFO] %s : core[%x][0] created MMC chdev at cycle %d\n", 338 342 __FUNCTION__ , local_cxy , hal_time_stamp() ); 339 343 } … … 370 374 chdev_dir.dma[channel] = chdev_xp; 371 375 372 kinit_dmsg("\n[INFO] %s : core[%x][0] create sDMA[%d] chdev at cycle %d\n",376 kinit_dmsg("\n[INFO] %s : core[%x][0] created DMA[%d] chdev at cycle %d\n", 373 377 __FUNCTION__ , local_cxy , channel , hal_time_stamp() ); 374 378 } … … 395 399 // 396 400 // TODO check that cluster IO contains a PIC [AG] 401 // TODO make a default initialisation for the chdev_dir structure (XPTR_NULL ) [AG] 397 402 /////////////////////////////////////////////////////////////////////////////////////////// 398 403 // @ info : pointer on the local boot-info structure. … … 713 718 // CP0 allocates one WTI mailbbox per core for Inter Processor Interrupt 714 719 // this must be done after ICU chdev initialisation, by CP0 only, and before 715 // external devices initialisation to enforce the rule (wti_id == lid) 720 // external devices initialisation to enforce the rule : 721 // "The WTI index for the IPI routed to core[lid] is lid" 716 722 if( core_lid == 0 ) 717 723 { … … 733 739 } 734 740 735 // CP0contribute to initialise external peripheral chdev descriptors.741 // All CP0s contribute to initialise external peripheral chdev descriptors. 736 742 // Each CP0[cxy] scan the set of external (shared) peripherals (but the TXT0), 737 743 // and allocates memory for the chdev descriptors that must be placed … … 779 785 } 780 786 781 printk("\n bloup 0\n");782 783 787 // CP0 in all clusters initializes cooperatively VFS and DEVFS 784 788 if( (core_lid == 0) ) … … 786 790 xptr_t root_inode_xp; 787 791 788 // initialize root File System 792 // initialize root File System (must be FATFS in this implementation) 789 793 if( CONFIG_VFS_ROOT_IS_FATFS ) 790 794 { … … 804 808 } 805 809 806 printk("\n bloup 1\n");807 808 810 // mount the DEVFS File system 809 811 devfs_mount( root_inode_xp , "dev" ); 810 812 } 811 812 printk("\n bloup 2\n");813 813 814 814 // CP0 in I/O cluster print banner … … 844 844 845 845 // each core jump to idle thread 846 // asm volatile( "j thread_idle_func\n");846 thread_idle_func(); 847 847 848 848 } // end kernel_init() -
trunk/kernel/kern/printk.h
r23 r50 100 100 /////////////////////////////////////////////////////////////////////////////////// 101 101 102 #if CONFIG_CLUSTER_DEBUG 103 #define cluster_dmsg(...) printk(__VA_ARGS__) 104 #else 105 #define cluster_dmsg(...) 106 #endif 107 102 108 #if CONFIG_CONTEXT_DEBUG 103 109 #define context_dmsg(...) printk(__VA_ARGS__) … … 112 118 #endif 113 119 120 #if CONFIG_DEVFS_DEBUG 121 #define devfs_dmsg(...) printk(__VA_ARGS__) 122 #else 123 #define devfs_dmsg(...) 124 #endif 125 126 #if CONFIG_DMA_DEBUG 127 #define dma_dmsg(...) printk(__VA_ARGS__) 128 #else 129 #define dma_dmsg(...) 130 #endif 131 114 132 #if CONFIG_DQDT_DEBUG 115 133 #define dma_dmsg(...) printk(__VA_ARGS__) … … 136 154 #endif 137 155 156 #if CONFIG_FATFS_DEBUG 157 #define fatfs_dmsg(...) printk(__VA_ARGS__) 158 #else 159 #define fatfs_dmsg(...) 160 #endif 161 138 162 #if CONFIG_FBF_DEBUG 139 163 #define fbf_dmsg(...) printk(__VA_ARGS__) … … 152 176 #else 153 177 #define icu_dmsg(...) 178 #endif 179 180 #if CONFIG_IDLE_DEBUG 181 #define idle_dmsg(...) printk(__VA_ARGS__) 182 #else 183 #define idle_dmsg(...) 154 184 #endif 155 185 -
trunk/kernel/kern/thread.c
r23 r50 749 749 while( 1 ) 750 750 { 751 thread_dmsg("\n[INFO] %s : core[%x][%d] goes to sleep at cycle %d\n",751 idle_dmsg("\n[INFO] %s : core[%x][%d] goes to sleep at cycle %d\n", 752 752 __FUNCTION__ , local_cxy , lid , hal_time_stamp() ); 753 753 … … 755 755 hal_core_sleep(); 756 756 757 thread_dmsg("\n[INFO] %s : core[%x][%d] wake up at cycle %d\n",757 idle_dmsg("\n[INFO] %s : core[%x][%d] wake up at cycle %d\n", 758 758 __FUNCTION__ , local_cxy , lid , hal_time_stamp() ); 759 759 -
trunk/kernel/libk/remote_rwlock.c
r23 r50 26 26 #include <hal_irqmask.h> 27 27 #include <thread.h> 28 #include <printk.h> 28 29 #include <cluster.h> 29 30 #include <scheduler.h> … … 185 186 186 187 // compute extended pointers on lock->ticket, lock->owner and thread->remote_locks 187 xptr_t current_xp = XPTR( lock_cxy , &lock_ptr-> ticket );188 xptr_t current_xp = XPTR( lock_cxy , &lock_ptr->current ); 188 189 xptr_t owner_xp = XPTR( lock_cxy , &lock_ptr->owner ); 189 190 xptr_t locks_xp = XPTR( thread_cxy , &thread_ptr->remote_locks ); … … 202 203 } // end remote_rwlock_wr_unlock() 203 204 204 205 /////////////////////////////////////////// 206 void remote_rwlock_print( xptr_t lock_xp, 207 char * comment ) 208 { 209 uint32_t ticket; // first free ticket index 210 uint32_t current; // ticket index of current owner 211 uint32_t count; // current number of reader threads 212 xptr_t owner; // extended pointer on writer thread 213 214 // get cluster and local pointer on remote_rwlock 215 remote_rwlock_t * lock_ptr = (remote_rwlock_t *)GET_PTR( lock_xp ); 216 cxy_t lock_cxy = GET_CXY( lock_xp ); 217 218 ticket = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->ticket ) ); 219 current = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->current ) ); 220 count = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->count ) ); 221 owner = hal_remote_lwd( XPTR( lock_cxy , &lock_ptr->owner ) ); 222 223 printk("\n*** rwlock <%l> %s : ticket = %d / current = %d / count = %d / owner = %l\n", 224 lock_xp , comment , ticket , current , count , owner ); 225 226 } // end remote_rwlock_print() 227 -
trunk/kernel/libk/remote_rwlock.h
r23 r50 92 92 void remote_rwlock_wr_unlock( xptr_t lock_xp ); 93 93 94 /*************************************************************************************** 95 * Display the lock state on kernel TTY. 96 *************************************************************************************** 97 * @ lock_xp : extended pointer on the remote rwlock 98 * @ comment : comment to be printed. 99 **************************************************************************************/ 100 void remote_rwlock_print( xptr_t lock_xp, 101 char * comment ); 102 94 103 #endif -
trunk/kernel/libk/xhtab.c
r23 r50 143 143 xptr_t xlist_xp ) 144 144 { 145 146 printk("\n @@@ xhtab_insert : 0 / name = %s / xhtab_xp = %l / xlist_xp = %l\n",147 key , xhtab_xp , xlist_xp );148 149 145 // get xhtab cluster and local pointer 150 146 cxy_t xhtab_cxy = GET_CXY( xhtab_xp ); … … 154 150 uint32_t index = xhtab_ptr->index( key ); 155 151 156 printk("\n @@@ xhtab_insert : 1 / name = %s / index = %d\n",157 key , index );158 159 152 // take the lock protecting hash table 160 153 remote_rwlock_wr_lock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) ); … … 168 161 remote_rwlock_wr_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) ); 169 162 170 printk("\n @@@ xhtab_insert : 2 / name = %s / item_xp = %l\n",171 key , item_xp );172 173 163 return EINVAL; 174 164 } … … 183 173 // release the lock protecting hash table 184 174 remote_rwlock_wr_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) ); 185 186 printk("\n @@@ xhtab_insert : 3 / name = %s / item_xp = %l\n",187 key , xhtab_ptr->scan( xhtab_xp , index , key ) );188 175 189 176 return 0; -
trunk/kernel/mm/kcm.c
r20 r50 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 41 41 // It changes the page status if required. 42 42 ////////////////////////////////////////////////////////////////////////////////////// 43 // @ kcm : pointer on kcm allocator.44 // @ ptr: pointer on active kcm page to use.43 // @ kcm : pointer on kcm allocator. 44 // @ kcm_page : pointer on active kcm page to use. 45 45 ///////////////////////////////////////////////////////////////////////////////////// 46 46 static void * kcm_get_block( kcm_t * kcm, 47 kcm_page_t * page ) 48 { 49 assert( page->active , __FUNCTION__ , "kcm page should be active" ); 47 kcm_page_t * kcm_page ) 48 { 49 kcm_dmsg("\n[INFO] %s : enters for %s / page %x / count = %d / active = %d\n", 50 __FUNCTION__ , kmem_type_str( kcm->type ) , 51 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); 52 53 assert( kcm_page->active , __FUNCTION__ , "kcm_page should be active" ); 50 54 51 55 // get first block available 52 int32_t index = bitmap_ffs( page->bitmap , kcm->blocks_nr );53 54 assert( (index != -1) , __FUNCTION__ , "kcm 56 int32_t index = bitmap_ffs( kcm_page->bitmap , kcm->blocks_nr ); 57 58 assert( (index != -1) , __FUNCTION__ , "kcm_page should not be full" ); 55 59 56 60 // allocate block 57 bitmap_clear( page->bitmap , index );58 59 // increase page refcount60 page->refcount ++;61 62 // change the page to busyno more free block in page63 if( page->refcount >= kcm->blocks_nr )64 { 65 66 list_unlink( & page->list);61 bitmap_clear( kcm_page->bitmap , index ); 62 63 // increase kcm_page count 64 kcm_page->count ++; 65 66 // change the kcm_page to busy if no more free block in page 67 if( kcm_page->count >= kcm->blocks_nr ) 68 { 69 kcm_page->active = 0; 70 list_unlink( &kcm_page->list); 67 71 kcm->active_pages_nr --; 68 72 69 list_add_first( &kcm->busy_root , & page->list);73 list_add_first( &kcm->busy_root , &kcm_page->list); 70 74 kcm->busy_pages_nr ++; 71 page->busy = 1; 72 } 73 74 return (page->base + index * kcm->block_size ); 75 kcm_page->busy = 1; 76 } 77 78 // compute return pointer 79 void * ptr = (void *)((intptr_t)kcm_page + CONFIG_KCM_SLOT_SIZE 80 + (index * kcm->block_size) ); 81 82 kcm_dmsg("\n[INFO] %s : allocated one block %s / ptr = %x / page = %x / count = %d\n", 83 __FUNCTION__ , kmem_type_str( kcm->type ) , (uint32_t)ptr , 84 (intptr_t)kcm_page , kcm_page->count ); 85 86 return ptr; 75 87 76 88 } // kcm_get_block() … … 78 90 ///////////////////////////////////////////////////////////////////////////////////// 79 91 // This static function releases a previously allocated block. 80 // It changes the page status if required.92 // It changes the kcm_page status if required. 81 93 ///////////////////////////////////////////////////////////////////////////////////// 82 94 // @ kcm : pointer on kcm allocator. … … 86 98 void * ptr ) 87 99 { 88 kcm_page_t * page;100 kcm_page_t * kcm_page; 89 101 uint32_t index; 90 102 91 page = (kcm_page_t*)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK); 92 index = ((uint8_t*)ptr - page->base) / kcm->block_size; 93 94 bitmap_set( page->bitmap , index ); 95 page->refcount --; 103 // compute pointer on kcm_page from block pointer 104 kcm_page = (kcm_page_t*)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK); 105 106 // compute block index from block pointer 107 index = ((uint8_t *)ptr - (uint8_t *)kcm_page - CONFIG_KCM_SLOT_SIZE) / kcm->block_size; 108 109 bitmap_set( kcm_page->bitmap , index ); 110 kcm_page->count --; 96 111 97 112 // change the page to active if it was busy 98 if( page->busy )99 { 100 page->busy = 0;101 list_unlink( & page->list );113 if( kcm_page->busy ) 114 { 115 kcm_page->busy = 0; 116 list_unlink( &kcm_page->list ); 102 117 kcm->busy_pages_nr --; 103 118 104 list_add_last( &kcm->active_root, & page->list );119 list_add_last( &kcm->active_root, &kcm_page->list ); 105 120 kcm->active_pages_nr ++; 106 page->active = 1;107 } 108 109 // change the page to free if last block in active page110 if( ( page->active) && (page->refcount == 0) )111 { 112 page->active = 0;113 list_unlink( & page->list);121 kcm_page->active = 1; 122 } 123 124 // change the kcm_page to free if last block in active page 125 if( (kcm_page->active) && (kcm_page->count == 0) ) 126 { 127 kcm_page->active = 0; 128 list_unlink( &kcm_page->list); 114 129 kcm->active_pages_nr --; 115 130 116 list_add_first( &kcm->free_root , & page->list);131 list_add_first( &kcm->free_root , &kcm_page->list); 117 132 kcm->free_pages_nr ++; 118 133 } … … 121 136 ///////////////////////////////////////////////////////////////////////////////////// 122 137 // This static function allocates one page from PPM. It initializes 123 // the KCM-page descriptor, and introduces the newpage into freelist.138 // the kcm_page descriptor, and introduces the new kcm_page into freelist. 124 139 ///////////////////////////////////////////////////////////////////////////////////// 125 140 static error_t freelist_populate( kcm_t * kcm ) 126 141 { 127 142 page_t * page; 128 kcm_page_t * ptr;143 kcm_page_t * kcm_page; 129 144 kmem_req_t req; 130 145 … … 143 158 144 159 // get page base address 145 ptr =ppm_page2base( page );160 kcm_page = (kcm_page_t *)ppm_page2base( page ); 146 161 147 162 // initialize KCM-page descriptor 148 bitmap_set_range( ptr->bitmap , 0 , kcm->blocks_nr ); 149 150 ptr->busy = 0; 151 ptr->active = 0; 152 ptr->refcount = 0; 153 ptr->base = (uint8_t*)ptr + kcm->block_size; 154 ptr->kcm = kcm; 155 ptr->page = page; 163 bitmap_set_range( kcm_page->bitmap , 0 , kcm->blocks_nr ); 164 165 kcm_page->busy = 0; 166 kcm_page->active = 0; 167 kcm_page->count = 0; 168 kcm_page->kcm = kcm; 169 kcm_page->page = page; 156 170 157 171 // introduce new page in free-list 158 list_add_first( &kcm->free_root , & ptr->list );172 list_add_first( &kcm->free_root , &kcm_page->list ); 159 173 kcm->free_pages_nr ++; 160 174 … … 170 184 { 171 185 error_t error; 172 kcm_page_t * page;186 kcm_page_t * kcm_page; 173 187 174 188 // get a new page from PPM if freelist empty … … 179 193 } 180 194 181 // get first KCM page from freelist and change its status to active182 page = LIST_FIRST( &kcm->free_root, kcm_page_t , list );183 list_unlink( & page->list );195 // get first KCM page from freelist and unlink it 196 kcm_page = LIST_FIRST( &kcm->free_root, kcm_page_t , list ); 197 list_unlink( &kcm_page->list ); 184 198 kcm->free_pages_nr --; 185 199 186 return page;200 return kcm_page; 187 201 188 202 } // freelist_get() … … 193 207 uint32_t type ) 194 208 { 195 uint32_t blocks_nr; 196 uint32_t block_size; 197 uint32_t remaining;209 // the kcm_page descriptor mut fit in the KCM slot 210 assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) , 211 __FUNCTION__ , "KCM slot too small\n" ); 198 212 199 213 // initialize lock … … 211 225 list_root_init( &kcm->active_root ); 212 226 213 // initialize block size and number of blocks per page 214 block_size = ARROUND_UP( kmem_type_size( type ) , 64 ); 215 blocks_nr = CONFIG_PPM_PAGE_SIZE / block_size; 216 remaining = CONFIG_PPM_PAGE_SIZE % block_size; 217 blocks_nr = (remaining >= sizeof(kcm_page_t)) ? blocks_nr : blocks_nr - 1; 218 219 kcm->blocks_nr = blocks_nr; 227 // initialize block size 228 uint32_t block_size = ARROUND_UP( kmem_type_size( type ) , CONFIG_KCM_SLOT_SIZE ); 220 229 kcm->block_size = block_size; 221 230 231 // initialize number of blocks per page 232 uint32_t blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size; 233 kcm->blocks_nr = blocks_nr; 234 222 235 kcm_dmsg("\n[INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n", 223 __FUNCTION__ , kmem_type_str( type ) , block_size ,blocks_nr );236 __FUNCTION__ , kmem_type_str( type ) , kcm->block_size , kcm->blocks_nr ); 224 237 225 238 } // kcm_init() … … 228 241 void kcm_destroy( kcm_t * kcm ) 229 242 { 230 kcm_page_t * page;243 kcm_page_t * kcm_page; 231 244 list_entry_t * iter; 232 245 … … 237 250 LIST_FOREACH( &kcm->free_root , iter ) 238 251 { 239 page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );252 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); 240 253 list_unlink( iter ); 241 254 kcm->free_pages_nr --; 242 ppm_free_pages( page->page );255 ppm_free_pages( kcm_page->page ); 243 256 } 244 257 … … 246 259 LIST_FOREACH( &kcm->active_root , iter ) 247 260 { 248 page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );261 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); 249 262 list_unlink( iter ); 250 263 kcm->free_pages_nr --; 251 ppm_free_pages( page->page );264 ppm_free_pages( kcm_page->page ); 252 265 } 253 266 … … 255 268 LIST_FOREACH( &kcm->busy_root , iter ) 256 269 { 257 page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );270 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); 258 271 list_unlink( iter ); 259 272 kcm->free_pages_nr --; 260 ppm_free_pages( page->page );273 ppm_free_pages( kcm_page->page ); 261 274 } 262 275 … … 269 282 void * kcm_alloc( kcm_t * kcm ) 270 283 { 271 kcm_page_t * page;284 kcm_page_t * kcm_page; 272 285 void * ptr = NULL; // pointer on block 273 286 … … 278 291 if( list_is_empty( &kcm->active_root ) ) // no active page => get one 279 292 { 280 kcm_dmsg("\n[INFO] %s : enters for type %s but no active page => get one\n",281 __FUNCTION__ , kmem_type_str( kcm->type ) );282 283 293 // get a page from free list 284 page = freelist_get( kcm ); 285 if( page == NULL ) return NULL; 294 kcm_page = freelist_get( kcm ); 295 296 if( kcm_page == NULL ) return NULL; 286 297 287 298 // insert page in active list 288 list_add_first( &kcm->active_root , & page->list );299 list_add_first( &kcm->active_root , &kcm_page->list ); 289 300 kcm->active_pages_nr ++; 290 page->active = 1; 291 } 292 else // get first page from active list 293 { 294 kcm_dmsg("\n[INFO] %s : enters for type %s with an active page\n", 295 __FUNCTION__ , kmem_type_str( kcm->type ) ); 296 301 kcm_page->active = 1; 302 303 kcm_dmsg("\n[INFO] %s : enters for type %s at cycle %d / new page = %x / count = %d\n", 304 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_time_stamp() , 305 (intptr_t)kcm_page , kcm_page->count ); 306 307 } 308 else // get first page from active list 309 { 297 310 // get page pointer from active list 298 page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 311 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 312 313 kcm_dmsg("\n[INFO] %s : enters for type %s at cycle %d / page = %x / count = %d\n", 314 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_time_stamp() , 315 (intptr_t)kcm_page , kcm_page->count ); 299 316 } 300 317 301 318 // get a block from selected active page 302 319 // cannot fail, as an active page cannot be full... 303 ptr = kcm_get_block( kcm , page );320 ptr = kcm_get_block( kcm , kcm_page ); 304 321 305 322 // release lock 306 spinlock_unlock(&kcm->lock); 307 308 kcm_dmsg("\n[INFO] %s : allocated one block of type %s / ptr = %x\n", 309 __FUNCTION__ , kmem_type_str( kcm->type ) , (uint32_t)ptr ); 323 spinlock_unlock( &kcm->lock ); 310 324 311 325 return ptr; 312 326 313 } // kcm_alloc()327 } // end kcm_allo() 314 328 315 329 /////////////////////////// 316 330 void kcm_free( void * ptr ) 317 331 { 318 kcm_page_t * page;332 kcm_page_t * kcm_page; 319 333 kcm_t * kcm; 320 334 321 if( ptr == NULL ) return;322 323 page = (kcm_page_t *)((intptr_t)ptr &CONFIG_PPM_PAGE_MASK);324 kcm =page->kcm;335 assert( (ptr != NULL) , __FUNCTION__ , "pointer cannot be NULL" ); 336 337 kcm_page = (kcm_page_t *)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK); 338 kcm = kcm_page->kcm; 325 339 326 340 // get lock … … 332 346 // release lock 333 347 spinlock_unlock( &kcm->lock ); 334 } 348 349 } // end kcm_free() 335 350 336 351 //////////////////////////// -
trunk/kernel/mm/kcm.h
r23 r50 36 36 * This structure defines a generic Kernel Cache Manager, that is a block allocator, 37 37 * for fixed size objects. It exists a specific KCM allocator for each object type. 38 * The actual allocated block size is the smallest multiple of 64 bytes that can 39 * contain one single object. 38 * The actual allocated block size is the smallest multiple of the KCM slot, that 39 * contain one single object. The KCM slot is typically 64 bytes, as it must be large 40 * enough to store the kcm_page descriptor, defined below. 40 41 * The various KCM allocators themselves are not statically allocated in the cluster 41 42 * manager, but are dynamically allocated when required, using the embedded KCM … … 46 47 { 47 48 spinlock_t lock; /*! protect exclusive access to allocator */ 48 uint32_t block_size; /*! actual block size (bytes)*/49 uint32_t blocks_nr; /*! number of blocks per page*/49 uint32_t block_size; /*! rounded block size (bytes) */ 50 uint32_t blocks_nr; /*! max number of blocks per page */ 50 51 51 52 list_entry_t active_root; /*! root of active pages list */ … … 64 65 /**************************************************************************************** 65 66 * This structure defines a KCM-page descriptor. 66 * A KCM-page c an contain up to (CONFIG_PPM_PAGE_SIZE / CONFIG_CACHE_LINE_SIZE) blocks.67 * A KCM-page contains at most (CONFIG_PPM_PAGE_SIZE / CONFIG_KCM_SLOT_SIZE) blocks. 67 68 * This kcm page descriptor is stored in the first slot of the page. 68 69 ***************************************************************************************/ … … 70 71 typedef struct kcm_page_s 71 72 { 72 uint32_t bitmap[BITMAP_SIZE(CONFIG_KCM_BLOCKS_MAX)]; 73 uint8_t * base; /*! pointer on first block in page */ 74 kcm_t * kcm; /*! owner KCM allocator */ 73 uint32_t bitmap[2]; /*! at most 64 blocks in a single page */ 75 74 list_entry_t list; /*! [active / busy / free] list member */ 75 kcm_t * kcm; /*! pointer on kcm allocator */ 76 76 page_t * page; /*! pointer on the physical page descriptor */ 77 uint8_t refcount; /*! number of allocated blocks */ 78 uint8_t busy; /*! page busy if non zero */ 79 uint8_t active; /*! page active if non zero */ 80 uint8_t unused; /*! */ 77 uint32_t count; /*! number of allocated blocks */ 78 uint32_t busy; /*! page busy if non zero */ 79 uint32_t active; /*! page active if non zero */ 81 80 } 82 81 kcm_page_t; -
trunk/kernel/mm/kmem.c
r23 r50 103 103 else if( type == KMEM_SEM ) return sizeof( remote_sem_t ); 104 104 else if( type == KMEM_CONDVAR ) return sizeof( remote_condvar_t ); 105 106 else if( type == KMEM_512_BYTES ) return 512; 107 105 108 else return 0; 106 109 } … … 130 133 else if( type == KMEM_SEM ) return "KMEM_SEM"; 131 134 else if( type == KMEM_SEM ) return "KMEM_CONDVAR"; 135 136 else if( type == KMEM_512_BYTES ) return "KMEM_512_BYTES"; 137 132 138 else return "undefined"; 133 139 } … … 193 199 assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" ); 194 200 195 kmem_dmsg("\n[INFO] %s : enters in cluster %x for type %s / size %d\n",196 __FUNCTION__ , local_cxy , kmem_type_str( type ) , size);201 kmem_dmsg("\n[INFO] %s : enters in cluster %x for type %s\n", 202 __FUNCTION__ , local_cxy , kmem_type_str( type ) ); 197 203 198 204 // analyse request type … … 202 208 ptr = (void *)ppm_alloc_pages( size ); 203 209 204 // reset page if requ ired210 // reset page if requested 205 211 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 206 212 … … 217 223 if( flags & AF_ZERO ) memset( ptr , 0 , size ); 218 224 219 kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x\n", 220 __FUNCTION__, local_cxy , kmem_type_str( type ) , (intptr_t)ptr ); 225 kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 226 __FUNCTION__, local_cxy , kmem_type_str( type ) , 227 (intptr_t)ptr , req->size ); 221 228 } 222 229 else // KCM allocator … … 237 244 if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) ); 238 245 239 kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x\n", 240 __FUNCTION__, local_cxy , kmem_type_str( type ) , (intptr_t)ptr ); 246 kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 247 __FUNCTION__, local_cxy , kmem_type_str( type ) , 248 (intptr_t)ptr , kmem_type_size( type ) ); 241 249 } 242 250 -
trunk/kernel/mm/kmem.h
r23 r50 59 59 KMEM_CONDVAR = 19, /*! remote_condvar_t */ 60 60 61 KMEM_TYPES_NR = 19, 61 KMEM_512_BYTES = 20, /*! 512 bytes aligned */ 62 63 KMEM_TYPES_NR = 21, 62 64 }; 63 65 -
trunk/kernel/mm/ppm.c
r18 r50 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 48 48 } 49 49 50 //////////////////////////////////////////// 51 inline void * ppm_page2base( page_t * page ) 50 51 52 ///////////////////////////////////////////// 53 inline void * ppm_page2vaddr( page_t * page ) 52 54 { 53 55 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 54 return (void*)((page - ppm->pages_tbl) << CONFIG_PPM_PAGE_SHIFT);55 } 56 57 //////////////////////////////////////////// 58 inline page_t * ppm_ base2page( void * base)56 return ppm->vaddr_base + ((page - ppm->pages_tbl) << CONFIG_PPM_PAGE_SHIFT); 57 } 58 59 ////////////////////////////////////////////// 60 inline page_t * ppm_vaddr2page( void * vaddr ) 59 61 { 60 62 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 61 return (ppm->pages_tbl + (((uint32_t)base ) >> CONFIG_PPM_PAGE_SHIFT)); 62 } 63 return ppm->pages_tbl + (vaddr - ppm->vaddr_base); 64 } 65 66 63 67 64 68 ////////////////////////////////////////// … … 76 80 } 77 81 82 83 78 84 /////////////////////////////////////// 79 inline void * ppm_ppn2base( ppn_t ppn ) 80 { 81 return (void*)( ppn << CONFIG_PPM_PAGE_SHIFT ); 82 } 83 84 //////////////////////////////////////// 85 inline ppn_t ppm_base2ppn( void * base ) 86 { 87 return (ppn_t)( (uint32_t)base >> CONFIG_PPM_PAGE_SHIFT ); 88 } 89 90 ////////////////////////////////////////////////// 91 static void ppm_free_pages_nolock( page_t * page ) 85 inline void * ppm_ppn2vaddr( ppn_t ppn ) 86 { 87 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 88 return ppm->vaddr_base + (ppn << CONFIG_PPM_PAGE_SHIFT); 89 } 90 91 ////////////////////////////////////////// 92 inline ppn_t ppm_vaddr2ppn( void * vaddr ) 93 { 94 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 95 return ( (ppm->vaddr_base - vaddr) >> CONFIG_PPM_PAGE_SHIFT ); 96 } 97 98 99 100 /////////////////////////////////////////// 101 void ppm_free_pages_nolock( page_t * page ) 92 102 { 93 103 page_t * buddy; // searched buddy page descriptor … … 95 105 page_t * current; // current (merged) page descriptor 96 106 uint32_t current_index; // current (merged) page index 97 uint32_t current_order; // current (merge t) page order107 uint32_t current_order; // current (merged) page order 98 108 99 109 ppm_t * ppm = &LOCAL_CLUSTER->ppm; … … 120 130 list_unlink( &buddy->list ); 121 131 ppm->free_pages_nr[current_order] --; 122 ppm->total_free_pages -= (1 << current_order);123 132 124 133 // merge buddy with current … … 134 143 list_add_first( &ppm->free_pages_root[current_order] , ¤t->list ); 135 144 ppm->free_pages_nr[current_order] ++; 136 ppm->total_free_pages += (1 << current_order);137 145 138 146 } // end ppm_free_pages_nolock() 139 140 //////////////////////////////141 void ppm_init( ppm_t * ppm,142 uint32_t pages_nr, // total pages number143 uint32_t pages_offset ) // occupied pages144 {145 uint32_t i;146 147 // set signature148 ppm->signature = PPM_SIGNATURE;149 150 // initialize lock protecting the free_pages[] array151 spinlock_init( &ppm->free_lock );152 153 // initialize free_pages[] array as empty154 ppm->total_free_pages = 0;155 for( i = 0 ; i < CONFIG_PPM_MAX_ORDER ; i++ )156 {157 list_root_init( &ppm->free_pages_root[i] );158 ppm->free_pages_nr[i] = 0;159 }160 161 // initialize dirty_list as empty162 list_root_init( &ppm->dirty_root );163 164 // initialize pointer on page descriptors array165 ppm->pages_tbl = (page_t*)( pages_offset << CONFIG_PPM_PAGE_SHIFT );166 167 // compute size of pages descriptor array rounded to an integer number of pages168 uint32_t bytes = ARROUND_UP( pages_nr * sizeof(page_t), CONFIG_PPM_PAGE_SIZE );169 170 // compute number of pages required to store page descriptor array171 uint32_t pages_array = bytes >> CONFIG_PPM_PAGE_SHIFT;172 173 // compute total number of reserved pages (kernel code & pages_tbl[])174 uint32_t reserved_pages = pages_offset + pages_array;175 176 // set pages numbers177 ppm->pages_nr = pages_nr;178 ppm->pages_offset = reserved_pages;179 180 // initialises all page descriptors in pages_tbl[]181 for( i = 0 ; i < pages_nr ; i++ )182 {183 page_init( &ppm->pages_tbl[i] );184 185 // TODO optimisation : make only a partial init [AG]186 // complete the initialisation when page is allocated [AG]187 // ppm->pages_tbl[i].flags = 0;188 }189 190 // - set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[])191 // - release all other pages to populate the free lists192 for( i = 0 ; i < reserved_pages ; i++)193 {194 page_set_flag( &ppm->pages_tbl[i] , PG_RESERVED );195 }196 for( i = reserved_pages ; i < pages_nr ; i++ )197 {198 ppm_free_pages_nolock( &ppm->pages_tbl[i] );199 200 // TODO optimisation : decompose this enormous set of small pages201 // to a set big pages with various order values202 }203 204 // check consistency205 ppm_assert_order( ppm );206 207 } // end ppm_init()208 147 209 148 //////////////////////////////////////////// … … 216 155 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 217 156 218 assert( (ppm->signature == PPM_SIGNATURE) , __FUNCTION__ , "PPM non initialised" );219 220 157 assert( (order < CONFIG_PPM_MAX_ORDER) , __FUNCTION__ , "illegal order argument" ); 221 158 … … 224 161 ppm_dmsg("\n[INFO] %s : enters / order = %d\n", 225 162 __FUNCTION__ , order ); 226 227 #if( CONFIG_PPM_DEBUG )228 ppm_print( ppm , "before allocation" );229 #endif230 163 231 164 // take lock protecting free lists … … 252 185 253 186 // update free-lists after removing a block 254 ppm->total_free_pages -= (1 << current_order);255 187 ppm->free_pages_nr[current_order] --; 256 188 current_size = (1 << current_order); … … 268 200 list_add_first( &ppm->free_pages_root[current_order] , &remaining_block->list ); 269 201 ppm->free_pages_nr[current_order] ++; 270 ppm->total_free_pages += (1 << current_order);271 202 } 272 203 … … 282 213 __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order ); 283 214 284 #if CONFIG_PPM_DEBUG285 ppm_print( ppm , "after allocation" );286 #endif287 288 215 return block; 289 216 } // end pmm_alloc-pages() … … 315 242 spinlock_lock( &ppm->free_lock ); 316 243 317 printk("\n*** PPM state in cluster %x %s : pages = %d / offset = %d / free = %d***\n",318 local_cxy , string , ppm->pages_nr , ppm->pages_offset , ppm->total_free_pages);244 printk("\n*** PPM in cluster %x : %d pages / &pages_tbl = %x / vaddr_base = %x ***\n", 245 local_cxy , ppm->pages_nr , (intptr_t)ppm->pages_tbl , (intptr_t)ppm->vaddr_base ); 319 246 320 247 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) -
trunk/kernel/mm/ppm.h
r18 r50 32 32 #include <page.h> 33 33 34 #define PPM_SIGNATURE 0xBABEF00D35 34 36 35 /***************************************************************************************** 37 36 * This structure defines the Physical Memory Manager in a cluster. 38 * In all clusters, the physical memory bank starts at address 0. 39 * The segments kcode and kdata are mapped in the first "offset" pages. 40 * The physical page descriptors array is implemented just after this offset zone. 41 * The main service provided by the PMM is the dynamic allocation of physical pages. 37 * In all clusters, the physical memory bank starts at local physical address 0. 38 * The size of this local physical memory is defined by the <pages_nr> field in the 39 * boot_info structure. It is split in three parts: 40 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader. 41 * It starts at PPN = 0 and the size is defined by the <pages_offset> field in the 42 * boot_info structure. 43 * - the "pages_tbl" section contains the physical page descriptors array. It starts 44 * at PPN = pages_offset, and it contains one entry per small physical page in cluster. 45 * It is created and initialized by the hal_ppm_create() function. "the 46 * - The "kernel_heap" section contains all physical pages that are are not in the 47 * in the kernel_code and pages_tbl sections, and that have not been reserved by the 48 * architecture specific bootloader. The reserved pages are defined in the boot_info 49 * structure. 50 * 51 * The main service provided by the PMM is the dynamic allocation of physical pages 52 * from the "kernel_heap" section. 42 53 * This low-level allocator implements the buddy algorithm: an allocated block is 43 * isan integer number n of 4 Kbytes pages, and n (called order) is a power of 2.54 * an integer number n of 4 Kbytes pages, and n (called order) is a power of 2. 44 55 ****************************************************************************************/ 56 45 57 typedef struct ppm_s 46 58 { 47 uint32_t signature; /*! set when initialised */ 48 spinlock_t free_lock; /*! lock protecting free_pages[] array */ 59 spinlock_t free_lock; /*! lock protecting free_pages[] lists */ 49 60 list_entry_t free_pages_root[CONFIG_PPM_MAX_ORDER]; /*! roots of free lists */ 50 61 uint32_t free_pages_nr[CONFIG_PPM_MAX_ORDER]; /*! numbers of free pages */ 51 uint32_t total_free_pages; /*! total number of free pages */52 62 page_t * pages_tbl; /*! pointer on page descriptors array */ 53 uint32_t pages_nr; /*! total number of 4 Kbytes physical page */ 54 uint32_t pages_offset; /*! allocated pages for kcode & kdata */ 55 uint32_t pages_desc; /*! allocated pages for pages_tbl[] array */ 56 spinlock_t dirty_lock; /*! lock protecting the dirty list */ 63 uint32_t pages_nr; /*! total number of small physical page */ 64 spinlock_t dirty_lock; /*! lock protecting the dirty pages list */ 57 65 list_entry_t dirty_root; /*! root of dirty pages list */ 66 void * vaddr_base; /*! pointer on local physical memory base */ 58 67 } 59 68 ppm_t; … … 80 89 * @ order : ln2( number of 4 Kbytes pages) 81 90 * @ returns a pointer on the page descriptor if success / NULL otherwise 82 ************************************************************************************** **/91 **************************************************************************************à))**/ 83 92 page_t * ppm_alloc_pages( uint32_t order ); 84 93 … … 93 102 94 103 /***************************************************************************************** 95 * This function check if a page descriptor is valid.104 * This function check if a page descriptor pointer is valid. 96 105 ***************************************************************************************** 97 106 * @ page : pointer on a page descriptor … … 101 110 102 111 /***************************************************************************************** 103 * Get the page baseaddress from the page descriptor pointer.112 * Get the page virtual address from the page descriptor pointer. 104 113 ***************************************************************************************** 105 114 * @ page : pointer to page descriptor 106 * @ returns page base address115 * @ returns virtual address of page itself. 107 116 ****************************************************************************************/ 108 inline void* ppm_page2 base( page_t * page );117 inline void* ppm_page2vaddr( page_t * page ); 109 118 110 119 /***************************************************************************************** 111 * Get the page descriptor pointer from the page baseaddress.120 * Get the page descriptor pointer from the page virtual address. 112 121 ***************************************************************************************** 113 * @ vaddr : page baseaddress122 * @ vaddr : page virtual address 114 123 * @ returns pointer on page descriptor 115 124 ****************************************************************************************/ 116 inline page_t * ppm_ base2page( void * vaddr );125 inline page_t * ppm_vaddr2page( void * vaddr ); 117 126 118 127 /***************************************************************************************** … … 133 142 134 143 /***************************************************************************************** 135 * Get the page baseaddress from the PPN.144 * Get the page virtual address from the PPN. 136 145 ***************************************************************************************** 137 146 * @ ppn : physical page number 138 * @ returns page base address147 * @ returns page virtual address. 139 148 ****************************************************************************************/ 140 inline void* ppm_ppn2 base( ppn_t ppn );149 inline void* ppm_ppn2vaddr( ppn_t ppn ); 141 150 142 151 /***************************************************************************************** 143 * Get the PPN from the page baseaddress.152 * Get the PPN from the page virtual address. 144 153 ***************************************************************************************** 145 * @ vaddr : page baseaddress146 * @ returns physical page number 154 * @ vaddr : page virtual address 155 * @ returns physical page number. 147 156 ****************************************************************************************/ 148 inline ppn_t ppm_ base2ppn( void * base );157 inline ppn_t ppm_vaddr2ppn( void * base ); 149 158 150 159 /***************************************************************************************** -
trunk/kernel/mm/vmm.c
r23 r50 936 936 offset = (uint32_t)( ((intptr_t)ptr) & CONFIG_PPM_PAGE_MASK ); 937 937 938 if( local_cxy == GET_CXY( process->ref_xp) ) 938 if( local_cxy == GET_CXY( process->ref_xp) ) // calling process is reference process 939 939 { 940 940 error = vmm_get_pte( process, vpn , &attr , &ppn ); 941 941 } 942 else // use a RPC942 else // calling process is not reference process 943 943 { 944 944 cxy_t ref_cxy = GET_CXY( process->ref_xp ); -
trunk/kernel/syscalls/sys_thread_sleep.c
r23 r50 30 30 int sys_thread_sleep() 31 31 { 32 thread_t * this = CURRENT_THREAD;33 34 32 thread_dmsg("\n[INFO] %s : thread %x in process %x goes to sleep at cycle %d\n", 35 __FUNCTION__ , this->trdid , this->process->pid, hal_time_stamp() );33 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_PROCESS->pid, hal_time_stamp() ); 36 34 37 35 thread_block( CURRENT_THREAD , THREAD_BLOCKED_GLOBAL ); … … 39 37 40 38 thread_dmsg("\n[INFO] %s : thread %x in process %x resume at cycle\n", 41 __FUNCTION__ , this->trdid , this->process->pid, hal_time_stamp() );39 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_PROCESS->pid, hal_time_stamp() ); 42 40 43 41 return 0; -
trunk/kernel/syscalls/syscalls.h
r23 r50 528 528 * @ return 0 if success / returns -1 if failure. 529 529 ********************************************************************************************/ 530 int sys_ gettimeofday( struct timeval * tv,531 530 int sys_timeofday( struct timeval * tv, 531 struct timezone * tz ); 532 532 533 533 /********************************************************************************************* -
trunk/kernel/vfs/devfs.c
r23 r50 139 139 xptr_t new_inode_xp; 140 140 141 printk("\n @@@ devfs_chdev : 0 / name = %s\n", name );142 141 devfs_dmsg("\n[INFO] %s : create dentry for %s\n", __FUNCTION__ , name ); 142 143 143 // create vfs_dentry in local cluster 144 144 error = vfs_dentry_create( FS_TYPE_DEVFS, … … 146 146 parent, 147 147 &new_dentry_xp ); 148 149 printk("\n @@@ devfs_chdev : 1 / name = %s\n", name );150 151 148 if ( error ) 152 149 { … … 156 153 } 157 154 158 printk("\n @@@ devfs_chdev : 2 / name = %s\n", name );159 155 devfs_dmsg("\n[INFO] %s : create inode for %s\n", __FUNCTION__ , name ); 156 160 157 // create vfs_inode in local cluster 161 158 uint32_t attr = 0; … … 171 168 gid, 172 169 &new_inode_xp ); 173 174 printk("\n @@@ devfs_chdev : 3 / name = %s\n", name );175 176 170 if( error ) 177 171 { … … 229 223 ///// step 1 : all clusters initialize local DEVFS context ///// 230 224 231 printk("\n @@@ devfs_mount : 0 / name = %s\n", devfs_root_name );232 233 225 devfs_ctx_init( vfs_ctx , parent_inode_xp ); 234 226 235 227 ///// step 2 : cluster_0 creates DEVFS root ///// 236 237 printk("\n @@@ devfs_mount : 1 / name = %s\n", devfs_root_name );238 228 239 229 if( local_cxy == 0 ) … … 242 232 parent_inode_xp, 243 233 &root_inode_xp ); 244 printk("\n @@@ devfs_mount : 2\n");245 246 234 } 247 235 … … 251 239 ///// step 3 : all clusters create "internal" directory and chdevs ///// 252 240 253 printk("\n @@@ devfs_mount : 3 / name = %s\n", devfs_root_name ); 241 // TODO check device existence : (chdev_xp != XPTR_NULL) in chdev_dir 254 242 255 243 snprintf( node_name , 16 , "internal_%x" , local_cxy ); 256 257 printk("\n @@@ devfs_mount : 4 / name = %s\n", devfs_root_name );258 244 259 245 devfs_create_directory( node_name, 260 246 root_inode_xp, 261 247 &internal_inode_xp ); 262 263 printk("\n @@@ devfs_mount : 5 / name = %s\n", devfs_root_name );264 248 265 249 // create ICU chdev inode … … 270 254 &chdev_inode_xp ); 271 255 272 printk("\n @@@ devfs_mount : 6 / name = %s\n", devfs_root_name );273 274 256 // create MMC chdev inode 275 257 chdev_ptr = (chdev_t *)GET_PTR( chdev_dir.mmc[local_cxy] ); … … 279 261 &chdev_inode_xp ); 280 262 281 printk("\n @@@ devfs_mount : 7 / name = %s\n", devfs_root_name );282 283 263 // create DMA chdev inodes (one DMA channel per core) 284 264 for( channel = 0 ; channel < cluster->cores_nr ; channel++ ) … … 290 270 (vfs_inode_t *)GET_PTR( internal_inode_xp ), 291 271 &chdev_inode_xp ); 292 293 printk("\n @@@ devfs_mount : 8 / name = %s\n", devfs_root_name );294 295 272 } 296 273 297 274 ///// step 4 : cluster_io creates "external" directory and chdevs ///// 275 276 // TODO check device existence : (chdev_xp != XPTR_NULL) in chdev_dir 298 277 299 278 if( local_cxy == cluster->io_cxy ) … … 302 281 root_inode_xp, 303 282 &external_inode_xp ); 304 305 printk("\n @@@ devfs_mount : 9 / name = %s\n", devfs_root_name );306 283 307 284 // create IOB chdev inode … … 312 289 &chdev_inode_xp ); 313 290 314 printk("\n @@@ devfs_mount : 10 / name = %s\n", devfs_root_name );315 316 291 // create PIC chdev inode 317 292 chdev_ptr = (chdev_t *)GET_PTR( chdev_dir.pic ); … … 374 349 (vfs_inode_t *)GET_PTR( external_inode_xp ), 375 350 &chdev_inode_xp ); 376 printk("\n @@@ devfs_mount : 11 / name = %s\n", devfs_root_name );377 378 351 } 379 352 } … … 676 649 677 650 678 const struct vfs_file_op_s devfs_f_op =679 {680 .open = devfs_open,681 .read = devfs_read,682 .write = devfs_write,683 .lseek = devfs_lseek,684 .mmap = devfs_mmap,685 .munmap = devfs_munmap,686 .readdir = devfs_readdir,687 .close = devfs_close,688 .release = devfs_release689 };690 691 651 */ 692 652 -
trunk/kernel/vfs/fatfs.c
r23 r50 36 36 #include <fatfs.h> 37 37 38 38 39 ////////////////////////////////////////////////////////////////////////////////////////// 39 40 // Extern variables 40 41 ////////////////////////////////////////////////////////////////////////////////////////// 41 42 42 extern vfs_ctx_t fs_context[FS_TYPES_NR]; // allocated in vfs.c file43 44 extern remote_barrier_t global_barrier; // allocated danskernel_init.c43 extern vfs_ctx_t fs_context[FS_TYPES_NR]; // allocated in vfs.c file 44 45 extern remote_barrier_t global_barrier; // allocated in kernel_init.c 45 46 46 47 ////////////////////////////////////////////////////////////////////////////////////////// … … 297 298 xptr_t root_inode_xp ) 298 299 { 299 error_t error; 300 uint8_t buffer[512]; // buffer for boot record 301 302 // make a synchronous access to IOC device to read the boot record from device 300 error_t error; 301 uint8_t * buffer; 302 kmem_req_t req; 303 304 // allocate a 512 bytes buffer to store the boot record 305 req.type = KMEM_512_BYTES; 306 req.flags = AF_KERNEL | AF_ZERO; 307 buffer = (uint8_t *)kmem_alloc( &req ); 308 309 fatfs_dmsg("\n[INFO] %s : enters with buffer = %x\n", 310 __FUNCTION__ , (intptr_t)buffer ); 311 312 // load the boot record from device 313 // using a synchronous access to IOC device 303 314 error = dev_ioc_sync_read( buffer , 0 , 1 ); 304 assert( (error == 0) , __FUNCTION__ , "cannot access FAT boot record" ); 315 316 assert( (error == 0) , __FUNCTION__ , "cannot access boot record" ); 317 318 #if CONFIG_FAT_DEBUG 319 uint32_t line; 320 uint32_t byte = 0; 321 printk("\n*** boot record at cycle %d ***\n", hal_time_stamp() ); 322 for ( line = 0 ; line < 32 ; line++ ) 323 { 324 printk(" %X | %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x |\n", 325 byte, 326 buffer[byte+ 0],buffer[byte+ 1],buffer[byte+ 2],buffer[byte+ 3], 327 buffer[byte+ 4],buffer[byte+ 5],buffer[byte+ 6],buffer[byte+ 7], 328 buffer[byte+ 8],buffer[byte+ 9],buffer[byte+10],buffer[byte+11], 329 buffer[byte+12],buffer[byte+13],buffer[byte+14],buffer[byte+15] ); 330 331 byte += 16; 332 } 333 #endif 305 334 306 335 // check sector size from boot record 307 336 uint32_t sector_size = get_record_from_buffer( BPB_BYTSPERSEC , buffer , 1 ); 337 308 338 assert( (sector_size == 512) , __FUNCTION__ , "sector size must be 512 bytes" ); 309 339 310 340 // check cluster size from boot record 311 341 uint32_t nb_sectors = get_record_from_buffer( BPB_SECPERCLUS , buffer , 1 ); 342 312 343 assert( (nb_sectors == 8) , __FUNCTION__ , "cluster size must be 8 sectors" ); 313 344 314 345 // check number of FAT copies from boot record 315 346 uint32_t nb_fats = get_record_from_buffer( BPB_NUMFATS , buffer , 1 ); 347 316 348 assert( (nb_fats == 1) , __FUNCTION__ , "number of FAT copies must be 1" ); 317 349 318 350 // get & check number of sectors in FAT from boot record 319 351 uint32_t fat_sectors = get_record_from_buffer( BPB_FAT32_FATSZ32 , buffer , 1 ); 352 320 353 assert( ((fat_sectors & 0xF) == 0) , __FUNCTION__ , "FAT not multiple of 16 sectors"); 321 354 322 355 // get and check root cluster from boot record 323 356 uint32_t root_cluster = get_record_from_buffer( BPB_FAT32_ROOTCLUS , buffer , 1 ); 357 324 358 assert( (root_cluster == 2) , __FUNCTION__ , "Root cluster index must be 2"); 325 359 326 360 // get FAT lba from boot record 327 361 uint32_t fat_lba = get_record_from_buffer( BPB_RSVDSECCNT , buffer , 1 ); 328 362 363 // release the 512 bytes buffer 364 req.type = KMEM_512_BYTES; 365 req.ptr = buffer; 366 kmem_free( &req ); 367 329 368 // allocate a mapper for the FAT itself 330 369 mapper_t * fat_mapper = mapper_create(); 370 331 371 assert( (fat_mapper != NULL) , __FUNCTION__ , "no memory for FAT mapper" ); 332 372 … … 342 382 fatfs_ctx->fat_mapper_xp = XPTR( local_cxy , fat_mapper ); 343 383 384 fatfs_dmsg("\n*** FAT context ***\n" 385 "- fat_sectors = %d\n" 386 "- sector size = %d\n" 387 "- cluster size = %d\n" 388 "- fat_first_lba = %d\n" 389 "- data_first_lba = %d\n" 390 "- mapper = %l\n", 391 fatfs_ctx->fat_sectors_count, 392 fatfs_ctx->bytes_per_sector, 393 fatfs_ctx->bytes_per_cluster, 394 fatfs_ctx->fat_begin_lba, 395 fatfs_ctx->cluster_begin_lba, 396 fatfs_ctx->fat_mapper_xp ); 397 344 398 // initialize the VFS context 345 399 vfs_ctx->type = FS_TYPE_FATFS; -
trunk/kernel/vfs/vfs.c
r23 r50 49 49 50 50 ////////////////////////////////////////////////////////////////////////////////////////// 51 // Global variables 52 ////////////////////////////////////////////////////////////////////////////////////////// 53 54 // array of supported FS contexts 55 vfs_ctx_t fs_context[FS_TYPES_NR]; 56 51 // Extern variables 52 ////////////////////////////////////////////////////////////////////////////////////////// 53 54 extern vfs_ctx_t fs_context[FS_TYPES_NR]; // allocate in kernel_init.c 55 57 56 ////////////////////////////////////////////////////////////////////////////////////////// 58 57 // Context related functions … … 297 296 kmem_req_t req; // request to kernel memory allocator 298 297 299 printk("\n @@@ dentry_create : 0 / name = %s\n", name );300 301 298 // check type and get pointer on context 302 299 if ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS]; … … 319 316 } 320 317 321 printk("\n @@@ dentry_create : 1 / name = %s\n", name );322 323 318 // allocate memory for dentry descriptor 324 319 req.type = KMEM_VFS_DENTRY; … … 340 335 strcpy( dentry->name , name ); 341 336 342 printk("\n @@@ dentry_create : 2 / name = %s\n", name );343 344 337 // register dentry in hash table rooted in parent inode 345 338 xhtab_insert( XPTR( local_cxy , &parent->children ), 346 339 name, 347 340 XPTR( local_cxy , &dentry->xlist ) ); 348 349 printk("\n @@@ dentry_create : 3 / name = %s\n", name );350 341 351 342 // return extended pointer on dentry
Note: See TracChangeset
for help on using the changeset viewer.