Changeset 68 for trunk/kernel
- Timestamp:
- Jun 27, 2017, 10:24:13 AM (8 years ago)
- Location:
- trunk/kernel
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_ioc.c
r52 r68 128 128 lba , (intptr_t)buffer , hal_time_stamp() ); 129 129 130 #if USE_IOB// software L2/L3 cache coherence for memory buffer131 132 if ( cmd_type == IOC_READ ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 );133 else dev_mmc_sync( XPTR( local_cxy , buffer ) , count<<9 );134 135 #endif // end software L2/L3 cache coherence 130 // software L2/L3 cache coherence for memory buffer 131 if( chdev_dir.iob ) 132 { 133 if ( cmd_type == IOC_READ ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 ); 134 else dev_mmc_sync ( XPTR( local_cxy , buffer ) , count<<9 ); 135 } 136 136 137 137 // get extended pointer on IOC chdev descriptor … … 155 155 " completes / error = %d / at cycle %d\n", 156 156 __FUNCTION__ , this->trdid , this->process->pid , 157 this-> dev.ioc.error , hal_time_stamp() );157 this->command.ioc.error , hal_time_stamp() ); 158 158 159 159 // return I/O operation status … … 186 186 thread_t * this = CURRENT_THREAD; 187 187 188 #if USE_IOB // software L2/L3 cache coherence for memory buffer 189 190 dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 ); 191 192 #endif // end software L2/L3 cache coherence 188 // software L2/L3 cache coherence for memory buffer 189 if( chdev_dir.iob ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 ); 193 190 194 191 // get extended pointer on IOC[0] chdev -
trunk/kernel/kern/core.c
r23 r68 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016) 5 * Alain Greiner (2016,2017) 7 6 * 8 7 * Copyright (c) UPMC Sorbonne Universites … … 53 52 core->rpc_threads = 0; 54 53 55 rpc_fifo_init( &core->rpc_fifo );56 57 54 list_root_init( &core->rpc_free_list ); 58 55 -
trunk/kernel/kern/core.h
r19 r68 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016) 5 * Alain Greiner (2016,2017) 7 6 * 8 7 * Copyright (c) UPMC Sorbonne Universites … … 62 61 uint32_t rpc_threads; /*! total number of RPC threads for this core */ 63 62 list_entry_t rpc_free_list; /*! root of the list of free RPC threads */ 64 rpc_fifo_t rpc_fifo; /*! embedded private RPC fifo (one per core) */65 63 66 64 scheduler_t scheduler; /*! embedded private scheduler */ … … 69 67 struct chdev_s * pti_vector[CONFIG_MAX_PTIS_PER_ICU]; /*! on source device */ 70 68 struct chdev_s * wti_vector[CONFIG_MAX_WTIS_PER_ICU]; /*! on source device */ 71 72 // sysfs_entry_t node;73 69 } 74 70 core_t; -
trunk/kernel/kern/kernel_init.c
r50 r68 32 32 #include <core.h> 33 33 #include <list.h> 34 #include <xlist.h> 34 35 #include <thread.h> 35 36 #include <scheduler.h> … … 54 55 #include <soclib_tty.h> 55 56 #include <devfs.h> 57 #include <mapper.h> 56 58 57 59 … … 659 661 if( core_lid == 0 ) local_cxy = info->cxy; 660 662 663 // each core get pointer on its private idle thread descriptor 664 thread = (thread_t *)( idle_threads + (core_lid * CONFIG_THREAD_DESC_SIZE) ); 665 666 // each core register this thread pointer in hardware register 667 hal_set_current_thread( thread ); 668 661 669 // CP0 in I/O cluster initialises TXT0 chdev descriptor 662 670 if( (core_lid == 0) && (core_cxy == info->io_cxy) ) txt0_device_init( info ); … … 755 763 __FUNCTION__ , core_cxy , core_lid , hal_time_stamp() ); 756 764 757 // all cores initialize the private idle thread descriptor758 thread = (thread_t *)( idle_threads + (core_lid * CONFIG_THREAD_DESC_SIZE) );759 765 760 766 error = thread_kernel_init( thread, … … 763 769 NULL, 764 770 core_lid ); 765 766 771 if( error ) 767 772 { … … 775 780 core->scheduler.idle = thread; 776 781 777 // register idle thread pointer in core register778 hal_set_current_thread( thread );779 780 782 // activate the idle thread 781 783 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); … … 816 818 { 817 819 print_banner( (info->x_size * info->y_size) , info->cores_nr ); 820 821 kinit_dmsg("\n\n*** memory fooprint of main kernet objects ***\n" 822 " - thread descriptor : %d bytes\n" 823 " - process descriptor : %d bytes\n" 824 " - cluster manager : %d bytes\n" 825 " - chdev descriptor : %d bytes\n" 826 " - core descriptor : %d bytes\n" 827 " - scheduler : %d bytes\n" 828 " - rpc fifo : %d bytes\n" 829 " - page descriptor : %d bytes\n" 830 " - mapper root : %d bytes\n" 831 " - ppm manager : %d bytes\n" 832 " - kcm manager : %d bytes\n" 833 " - khm manager : %d bytes\n" 834 " - vmm manager : %d bytes\n" 835 " - gpt root : %d bytes\n" 836 " - list item : %d bytes\n" 837 " - xlist item : %d bytes\n" 838 " - spinlock : %d bytes\n" 839 " - remote spinlock : %d bytes\n" 840 " - rwlock : %d bytes\n" 841 " - remote rwlock : %d bytes\n", 842 sizeof( thread_t ), 843 sizeof( process_t ), 844 sizeof( cluster_t ), 845 sizeof( chdev_t ), 846 sizeof( core_t ), 847 sizeof( scheduler_t ), 848 sizeof( rpc_fifo_t ), 849 sizeof( page_t ), 850 sizeof( mapper_t ), 851 sizeof( ppm_t ), 852 sizeof( kcm_t ), 853 sizeof( khm_t ), 854 sizeof( vmm_t ), 855 sizeof( gpt_t ), 856 sizeof( list_entry_t ), 857 sizeof( xlist_entry_t ), 858 sizeof( spinlock_t ), 859 sizeof( remote_spinlock_t ), 860 sizeof( rwlock_t ), 861 sizeof( remote_rwlock_t )); 818 862 } 819 863 -
trunk/kernel/kern/rpc.c
r23 r68 1249 1249 reg_t sr_save; 1250 1250 1251 printk("\n@@@ coucou 0\n");1252 1253 1251 // get client CPU and cluster coordinates 1254 1252 cxy_t client_cxy = local_cxy; … … 1267 1265 { 1268 1266 error = remote_fifo_put_item( XPTR( server_cxy , &rf->fifo ), 1269 (void *)&xp, 1270 sizeof(xptr_t), 1267 (uint64_t *)&xp, 1271 1268 &first ); 1272 1269 … … 1280 1277 while( error ); 1281 1278 1282 printk("\n@@@ coucou 1\n");1283 1284 1279 rpc_dmsg("\n[INFO] %s on core %d in cluster %x sent RPC %p to cluster %x\n", 1285 1280 __FUNCTION__ , client_lid , client_cxy , rpc , server_cxy ); … … 1302 1297 } 1303 1298 1304 printk("\n@@@ coucou 2\n");1305 1306 1299 // activate preemption to allow incoming RPC and avoid deadlock 1307 1300 if( this->type == THREAD_RPC ) hal_enable_irq( &sr_save ); … … 1312 1305 if( rpc->response == 0 ) break; 1313 1306 } 1314 1315 printk("\n@@@ coucou 3\n");1316 1307 1317 1308 // restore preemption … … 1356 1347 { 1357 1348 error = local_fifo_get_item( &rpc_fifo->fifo, 1358 &xp, 1359 sizeof(xptr_t) ); 1349 (uint64_t *)&xp ); 1360 1350 1361 1351 if ( error == 0 ) // One RPC request successfully extracted from RPC_FIFO -
trunk/kernel/kern/thread.c
r60 r68 745 745 void thread_idle_func() 746 746 { 747 748 #if CONFIG_IDLE_DEBUG 747 749 lid_t lid = CURRENT_CORE->lid; 750 #endif 748 751 749 752 while( 1 ) -
trunk/kernel/libk/remote_fifo.c
r60 r68 1 1 /* 2 * remote_fifo.c Implement a lock-less FIFO, 3 multiple-remote-writers / single-local-reader 2 * remote_fifo.c Implement a lock-less FIFO, multiple-remote-writers / single-local-reader 4 3 * 5 4 * Authors : Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017) 7 6 * 8 7 * Copyright (c) UPMC Sorbonne Universites … … 44 43 for( slot = 0 ; slot < CONFIG_REMOTE_FIFO_SLOTS ; slot++ ) 45 44 { 46 fifo->valid[slot] = false;45 fifo->valid[slot] = 0; 47 46 } 48 47 } 49 48 50 //////////////////////////////////////////// 51 error_t remote_fifo_put_item( xptr_t fifo, 52 void * item, 53 uint32_t size, 54 bool_t * first ) 49 ////////////////////////////////////////////// 50 error_t remote_fifo_put_item( xptr_t fifo, 51 uint64_t * item, 52 bool_t * first ) 55 53 { 56 54 uint32_t wr_id; … … 71 69 hal_disable_irq( &save_sr ); 72 70 73 // get write slot index and increment71 // get write slot index and atomic increment 74 72 wr_id = hal_remote_atomic_add( XPTR( cxy , &ptr->wr_id ) , 1 ); 75 73 … … 114 112 115 113 // copy item to fifo 116 hal_remote_memcpy( XPTR( cxy , &ptr->data[ptw] ), 117 XPTR( local_cxy , item ) , size ); 114 hal_remote_swd( XPTR( cxy , &ptr->data[ptw] ), *item ); 118 115 hal_wbflush(); 119 116 120 117 // set the slot valid flag 121 118 hal_remote_sw( XPTR( cxy , &ptr->valid[ptw] ) , 1 ); 122 123 119 hal_wbflush(); 124 120 … … 135 131 ////////////////////////////////////////////////// 136 132 error_t local_fifo_get_item( remote_fifo_t * fifo, 137 void * item, 138 uint32_t size ) 133 uint64_t * item ) 139 134 { 140 135 // get fifo state … … 152 147 153 148 // copy item from FIFO to local buffer 154 memcpy( item , &fifo->data[ptr] , size );149 *item = fifo->data[ptr]; 155 150 156 151 // reset valid slot flag -
trunk/kernel/libk/remote_fifo.h
r14 r68 1 1 /* 2 * remote_fifo.h - kernel generic SRMWFIFO2 * remote_fifo.h - Lock-less Single-Reader Multiple-Writers FIFO 3 3 * 4 * Authors : Mohamed Lamine Karaoui / Alain Greiner (2016) 4 * Authors : Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017) 5 6 * 6 7 * Copyright (c) UPMC Sorbonne Universites … … 33 34 * This structure defines a generic, single reader, multiple writers 34 35 * remote FIFO, that is used by the RPCs for inter cluster communications. 35 * The accesses are implemented using a lock-free algorithm, as it 36 * uses a ticket based mechanism to handle concurrent access between 37 * the multiple writers. 38 * Each FIF0 slot can contain one full cache line, even if RPCs store only 39 * an extended pointer on the RPC descriptor in each slot. 36 * The accesses are implemented using a lock-free algorithm, as it uses a ticket 37 * based mechanism to handle concurrent access between multiple writers. 38 * Each FIF0 slot can contain one 64 bits integer. 40 39 * In case of FIFO full, the writer deschedule without blocking, to retry later. 41 40 * 42 41 * WARNING : the number of slots is statically defined by the global 43 42 * configuration parameter CONFIG_REMOTE_FIFO_SLOTS for all fifos, requiring 44 * CACHE_LINE_SIZE * CONFIG_REMOTE_FIFO_SLOTS bytes for each FIFO...43 * 12 * CONFIG_REMOTE_FIFO_SLOTS bytes for each FIFO. 45 44 ***********************************************************************************/ 45 46 46 typedef struct remote_fifo_s 47 47 { 48 48 volatile uint32_t wr_id; /*! write slot index */ 49 49 volatile uint32_t rd_id; /*! read slot index */ 50 volatile bool_t valid[CONFIG_REMOTE_FIFO_SLOTS]; /*! empty slot if false*/51 cacheline_tdata[CONFIG_REMOTE_FIFO_SLOTS]; /*! fifo slot content */50 volatile uint32_t valid[CONFIG_REMOTE_FIFO_SLOTS]; /*! empty slot if 0 */ 51 uint64_t data[CONFIG_REMOTE_FIFO_SLOTS]; /*! fifo slot content */ 52 52 } 53 53 remote_fifo_t; … … 72 72 ***********************************************************************************/ 73 73 error_t local_fifo_get_item( remote_fifo_t * fifo, 74 void * item, 75 uint32_t size ); 74 uint64_t * item ); 76 75 77 76 /************************************************************************************ … … 84 83 * @ fifo : extended pointer to the fifo in remote cluster. 85 84 * @ item : pointer on a local buffer containing the item to be stored. 86 * @ size : actual number of bytes in one item. 87 * @ first : return value (true if first item registered in remote fifo) 85 * @ first : [out] true if first item registered in remote fifo. 88 86 * @ return 0 on success / EBUSY if a contention has been detected. 89 87 ***********************************************************************************/ 90 88 error_t remote_fifo_put_item( xptr_t fifo, 91 void * item, 92 uint32_t size, 89 uint64_t * item, 93 90 bool_t * first ); 94 91 -
trunk/kernel/mm/page.h
r23 r68 61 61 { 62 62 uint32_t flags; /*! flags defined above (4) */ 63 uint32_t order; /*! log2( number of 4Kbytes pages)(4) */63 uint32_t order; /*! log2( number of small pages) (4) */ 64 64 struct mapper_s * mapper; /*! local pointer on associated mapper (4) */ 65 65 uint32_t index; /*! page index in mapper (4) */ -
trunk/kernel/mm/vmm.c
r50 r68 616 616 617 617 618 ////////////////////////////////////////// 619 error_t vmm_map_ vseg( vseg_t * vseg,620 uint32_t attr )618 ////////////////////////////////////////////// 619 error_t vmm_map_kernel_vseg( vseg_t * vseg, 620 uint32_t attr ) 621 621 { 622 622 vpn_t vpn; // VPN of PTE to be set … … 628 628 error_t error; 629 629 630 // check vseg type 630 // check vseg type : must be a kernel vseg 631 631 uint32_t type = vseg->type; 632 if( (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) 633 { 634 printk("\n[PANIC] in %s : not a kernel vseg\n", __FUNCTION__ ); 635 hal_core_sleep(); 636 } 632 assert( ((type==VSEG_TYPE_KCODE) || (type==VSEG_TYPE_KDATA) || (type==VSEG_TYPE_KDEV)), 633 __FUNCTION__ , "not a kernel vseg\n" ); 637 634 638 635 // get pointer on page table … … 648 645 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 649 646 { 647 // allocate a physical page from local PPM 650 648 kmem_req_t req; 651 649 req.type = KMEM_PAGE; … … 782 780 783 781 // this function must be called by a thread running in the reference cluster 784 if( GET_CXY( process->ref_xp ) != local_cxy ); 785 { 786 printk("\n[PANIC] in %s : not called in the reference cluster\n", __FUNCTION__ ); 787 hal_core_sleep(); 788 } 782 assert( (GET_CXY( process->ref_xp ) == local_cxy ) , __FUNCTION__ , 783 " not called in the reference cluster\n" ); 789 784 790 785 // get VMM pointer … … 954 949 } // end vmm_v2p_translate() 955 950 956 //////////////////////////////////////////////957 951 958 952 -
trunk/kernel/mm/vmm.h
r23 r68 223 223 * This function allocates physical memory from the local cluster to map all PTEs 224 224 * of a "kernel" vseg (type KCODE , KDATA, or KDEV) in the page table of process_zero. 225 * It should not be used for other vseg types, because "user" vsegs usethe225 * WARNING : It should not be used for "user" vsegs, that must be mapped using the 226 226 * "on-demand-paging" policy. 227 227 ********************************************************************************************* … … 230 230 * @ returns 0 if success / returns ENOMEM if no memory 231 231 ********************************************************************************************/ 232 error_t vmm_map_ vseg( vseg_t * vseg,233 uint32_t attr );232 error_t vmm_map_kernel_vseg( vseg_t * vseg, 233 uint32_t attr ); 234 234 235 235 /********************************************************************************************* … … 312 312 /********************************************************************************************* 313 313 * This function makes the virtual to physical address translation, using the calling 314 * process page table. It uses identity mapping if required by the ident flag.314 * process page table. It uses identity mapping if required by the <ident> argument. 315 315 * This address translation is required to configure the peripherals having a DMA 316 316 * capability, or to implement the software L2/L3 cache cohérence, using the MMC device -
trunk/kernel/vfs/fatfs.c
r53 r68 316 316 assert( (error == 0) , __FUNCTION__ , "cannot access boot record" ); 317 317 318 #if CONFIG_FAT _DEBUG318 #if CONFIG_FATFS_DEBUG 319 319 uint32_t line; 320 320 uint32_t byte = 0;
Note: See TracChangeset
for help on using the changeset viewer.