Changeset 14
- Timestamp:
- May 3, 2017, 1:23:24 PM (8 years ago)
- Location:
- trunk/kernel
- Files:
-
- 2 added
- 66 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_dma.h
r3 r14 25 25 #define _DEV_DMA_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <spinlock.h> -
trunk/kernel/devices/dev_fbf.c
r3 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_gpt.h> -
trunk/kernel/devices/dev_icu.c
r3 r14 82 82 if( irq_type == HWI_TYPE ) 83 83 { 84 assert( (irq_index < icu->ext.icu.hwi_nr) , __FUNCTION__ , "illegal HWI" ); 85 } 86 if( irq_type == WTI_TYPE ) 87 { 88 assert( (irq_index < icu->ext.icu.wti_nr) , __FUNCTION__ , "illegal WTI" ); 89 } 90 if( irq_type == PTI_TYPE ) 91 { 92 assert( (irq_index < icu->ext.icu.pti_nr) , __FUNCTION__ , "illegal PTI" ); 84 if( irq_index >= icu->ext.icu.hwi_nr ) 85 { 86 printk("\n[PANIC] in %s : illegal HWI index = %d / max = %d\n", 87 __FUNCTION__ , irq_index , icu->ext.icu.hwi_nr ); 88 hal_core_sleep(); 89 } 90 } 91 else if( irq_type == WTI_TYPE ) 92 { 93 if( irq_index >= icu->ext.icu.wti_nr ) 94 { 95 printk("\n[PANIC] in %s : illegal WTI index = %d / max = %d\n", 96 __FUNCTION__ , irq_index , icu->ext.icu.wti_nr ); 97 hal_core_sleep(); 98 } 99 } 100 else // irq_type == PTI_TYPE 101 { 102 if( irq_index >= icu->ext.icu.pti_nr ) 103 { 104 printk("\n[PANIC] in %s : illegal PTI index = %d / max = %d\n", 105 __FUNCTION__ , irq_index , icu->ext.icu.pti_nr ); 106 hal_core_sleep(); 107 } 93 108 } 94 109 } // end dev_icu_check_irq() … … 107 122 dev_icu_check_irq( icu , irq_type , irq_index ); 108 123 109 // (1)call implementation specific ICU driver to enable IRQ124 // call implementation specific ICU driver to enable IRQ 110 125 if( icu->impl == IMPL_ICU_XCU ) 111 126 { 112 soclib_xcu_enable_irq( icu , 1<<irq_index , irq_type , lid ); 113 } 114 115 // (2) get selected core local pointer, and register 116 // source chdev pointer in relevant interrupt vector 117 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; 118 core_set_irq_vector_entry( core , irq_type , irq_index , src_chdev ); 119 120 // (3) register IRQ type and index in source chdev descriptor 121 src_chdev->irq_type = irq_type; 122 src_chdev->irq_id = irq_index; 123 127 soclib_xcu_enable_irq( icu , 1<<irq_index , irq_type , lid ); 128 } 129 130 // This is only done for an HWI, or for a WTI that is not an IPI 131 if( (irq_type != PTI_TYPE) && (src_chdev != NULL) ) 132 { 133 // get selected core local pointer, and register 134 // source chdev pointer in relevant interrupt vector 135 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; 136 core_set_irq_vector_entry( core , irq_type , irq_index , src_chdev ); 137 138 // (3) register IRQ type and index in source chdev descriptor 139 src_chdev->irq_type = irq_type; 140 src_chdev->irq_id = irq_index; 141 } 124 142 } // end dev_icu_enable_irq() 125 143 … … 136 154 dev_icu_check_irq( icu , irq_type , irq_index ); 137 155 138 // (1)call the implementation specific ICU driver to disable IRQ156 // call the implementation specific ICU driver to disable IRQ 139 157 if( icu->impl == IMPL_ICU_XCU ) 140 158 { … … 142 160 } 143 161 144 // (2) get selected remote core local pointer, and remove 145 // the source chdev xptr from relevant interrupt vector 146 147 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; 148 core_set_irq_vector_entry( core , irq_type , irq_index , NULL ); 149 162 // This is only done for HWI or WTI that are not IPI 163 if( irq_type != PTI_TYPE ) 164 { 165 // get selected remote core local pointer, and remove 166 // the source chdev xptr from relevant interrupt vector 167 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; 168 core_set_irq_vector_entry( core , irq_type , irq_index , NULL ); 169 } 150 170 } // end dev_icu_disable_irq() 171 172 /////////////////////////////////////// 173 void dev_icu_get_masks( lid_t lid, 174 uint32_t * hwi_mask, 175 uint32_t * wti_mask, 176 uint32_t * pti_mask ) 177 { 178 // get local pointer on local ICU chdev 179 xptr_t icu_xp = chdev_dir.icu[local_cxy]; 180 chdev_t * icu = (chdev_t *)GET_PTR( icu_xp ); 181 182 if( icu->impl == IMPL_ICU_XCU ) 183 { 184 soclib_xcu_get_masks( icu , lid , hwi_mask , wti_mask , pti_mask ); 185 } 186 } 151 187 152 188 ////////////////////////////////////////////// … … 312 348 dev_icu_ack_timer( index ); 313 349 314 // TODO execute all actions related to TICK event 315 core_clock( core ); 350 if( index < LOCAL_CLUSTER->cores_nr ) // its a TICK event 351 { 352 // TODO execute all actions related to TICK event 353 core_clock( core ); 354 } 355 else 356 { 357 printk("\n[WARNING] in %s : no handler for PTI %d on core %d in cluster %x\n", 358 __FUNCTION__ , index , core->lid , local_cxy ); 359 core->spurious_irqs ++; 360 dev_icu_disable_irq( core->lid , PTI_TYPE , index ); 361 } 316 362 } 317 363 } // end dev_icu_irq_handler() … … 340 386 // release lock 341 387 spinlock_unlock( lock ); 342 388 343 389 return index; 344 390 } // end dev_icu_wti_alloc() -
trunk/kernel/devices/dev_icu.h
r3 r14 25 25 #define _DEV_ICU_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <spinlock.h> … … 179 179 uint32_t irq_type, 180 180 uint32_t irq_id ); 181 181 182 /***************************************************************************************** 183 * This function returns the set of enabled IRQs for a given core. 184 ***************************************************************************************** 185 * @ lid : local index of selected core in remote cluster. 186 * @ hwi_mask : each non zero bit define an enabled HWI IRQ. 187 * @ wti_mask : each non zero bit define an enabled WTI IRQ. 188 * @ pti_mask : each non zero bit define an enabled PTI IRQ. 189 ****************************************************************************************/ 190 void dev_icu_get_masks( lid_t lid, 191 uint32_t * hwi_mask, 192 uint32_t * WTi_mask, 193 uint32_t * pti_mask ); 194 182 195 /***************************************************************************************** 183 196 * This function set the period value for a timer identified by the PTI index, -
trunk/kernel/devices/dev_iob.c
r3 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_special.h> -
trunk/kernel/devices/dev_iob.h
r3 r14 25 25 #define _DEV_IOB_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <spinlock.h> -
trunk/kernel/devices/dev_ioc.c
r3 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_gpt.h> -
trunk/kernel/devices/dev_ioc.h
r3 r14 25 25 #define _DEV_IOC_H 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 -
trunk/kernel/devices/dev_mmc.c
r3 r14 55 55 assert( false , __FUNCTION__ , "undefined MMC device implementation" ); 56 56 } 57 58 // get MMC HWI IRQ index 59 uint32_t hwi_id = chdev_icu_input.mmc; 60 61 // enable HWI IRQ to CP0 in local ICU, and update interrupt vector 62 dev_icu_enable_irq( 0 , HWI_TYPE , hwi_id , chdev ); 63 57 64 } // end dev_mmc_init() 58 65 -
trunk/kernel/devices/dev_mmc.h
r3 r14 25 25 #define _DEV_MMC_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <spinlock.h> -
trunk/kernel/devices/dev_nic.h
r3 r14 25 25 #define _DEV_NIC_H 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 -
trunk/kernel/devices/dev_pic.h
r3 r14 25 25 #define _DEV_PIC_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 -
trunk/kernel/devices/dev_txt.c
r3 r14 158 158 uint32_t count ) 159 159 { 160 uint32_t save_sr; 161 160 162 // get pointer on calling thread 161 163 thread_t * this = CURRENT_THREAD; … … 175 177 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ) ); 176 178 177 // call directly driver command after taking chdev lock 178 remote_spinlock_lock( XPTR( dev_cxy , &dev_ptr->wait_lock ) ); 179 // call directly driver command 179 180 cmd( XPTR( local_cxy , this ) ); 180 remote_spinlock_unlock( XPTR( dev_cxy , &dev_ptr->wait_lock ) );181 181 182 182 // return I/O operation status from calling thread descriptor -
trunk/kernel/devices/dev_txt.h
r3 r14 25 25 #define _DEV_TXT_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 … … 80 80 /****************************************************************************************** 81 81 * This function completes the TXT chdev descriptor initialisation, 82 * namely the link with the implementation specific driver.82 i * namely the link with the implementation specific driver. 83 83 * The func, impl, channel, is_rxt, base fields have been previously initialised. 84 84 * It calls the specific driver initialisation function, to initialise the hardware -
trunk/kernel/drivers/soclib/soclib_iob.c
r4 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_remote.h> -
trunk/kernel/drivers/soclib/soclib_xcu.c
r4 r14 36 36 uint32_t * base = (uint32_t *)GET_PTR( icu->base ); 37 37 38 // write into registers38 // disable all IRQs 39 39 base[XCU_MSK_HWI_DISABLE << 5 | lid] = 0xFFFFFFFF; 40 40 base[XCU_MSK_WTI_DISABLE << 5 | lid] = 0xFFFFFFFF; … … 70 70 else if( type == HWI_TYPE ) base[XCU_MSK_HWI_ENABLE << 5 | lid] = mask; 71 71 else base[XCU_MSK_PTI_ENABLE << 5 | lid] = mask; 72 } 73 74 /////////////////////////////////////////// 75 void soclib_xcu_get_masks( chdev_t * icu, 76 lid_t lid, 77 uint32_t * hwi_mask, 78 uint32_t * wti_mask, 79 uint32_t * pti_mask ) 80 { 81 // get XCU segment base address 82 uint32_t * base = (uint32_t *)GET_PTR( icu->base ); 83 84 // get values from registers 85 *hwi_mask = base[XCU_MSK_HWI << 5 | lid]; 86 *wti_mask = base[XCU_MSK_WTI << 5 | lid]; 87 *pti_mask = base[XCU_MSK_PTI << 5 | lid]; 72 88 } 73 89 … … 108 124 uint32_t prio = base[XCU_PRIO << 5 | lid]; 109 125 110 if( prio & 0x4 ) *wti_status = ((prio >> 24) & 0x1F) + 1;111 if( prio & 0x2 ) *hwi_status = ((prio >> 16) & 0x1F) + 1;112 if( prio & 0x1 ) *pti_status = ((prio >> 8) & 0x1F) + 1;126 *wti_status = (prio & 0x4) ? (((prio >> 24) & 0x1F) + 1) : 0; 127 *hwi_status = (prio & 0x2) ? (((prio >> 16) & 0x1F) + 1) : 0; 128 *pti_status = (prio & 0x1) ? (((prio >> 8) & 0x1F) + 1) : 0; 113 129 } 114 130 -
trunk/kernel/drivers/soclib/soclib_xcu.h
r4 r14 94 94 95 95 /****************************************************************************************** 96 * This function returns the values contained in the HWI/WTI/PTI mask registers for 97 * a given core. It must be called by a local thread. 98 ****************************************************************************************** 99 * @ icu : pointer on local XCU chdev descriptor 100 * @ lid : local core index == output IRQ index 101 * @ hwi_mask : [out] HWI mask for selected core 102 * @ wti_mask : [out] WTI mask for selected core 103 * @ pti_mask : [out] PTI mask for selected core 104 *****************************************************************************************/ 105 void soclib_xcu_get_masks( chdev_t * icu, 106 lid_t lid, 107 uint32_t * hwi_mask, 108 uint32_t * wti_mask, 109 uint32_t * pti_mask ); 110 111 /****************************************************************************************** 96 112 * This function set the period value for a local XCU timer. 97 113 ****************************************************************************************** -
trunk/kernel/kern/chdev.c
r5 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_special.h> -
trunk/kernel/kern/chdev.h
r5 r14 25 25 #define _CHDEV_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <xlist.h> -
trunk/kernel/kern/cluster.c
r5 r14 24 24 */ 25 25 26 #include < almos_config.h>26 #include <kernel_config.h> 27 27 #include <hal_types.h> 28 28 #include <hal_atomic.h> … … 35 35 #include <list.h> 36 36 #include <cluster.h> 37 #include <sysfs.h>38 37 #include <boot_info.h> 39 38 #include <bits.h> … … 43 42 #include <process.h> 44 43 #include <dqdt.h> 44 45 // TODO #include <sysfs.h> 45 46 46 47 /////////////////////////////////////////////////////////////////////////////////////////// … … 134 135 hal_wbflush(); 135 136 136 // wait all clusters initialised on barrier located in cluster_io137 remote_barrier( XPTR( cluster->io_cxy , &cluster->barrier ) ,138 cluster->x_size * cluster->y_size );139 140 137 return 0; 141 138 } // end cluster_init() -
trunk/kernel/kern/cluster.h
r5 r14 27 27 #define _CLUSTER_H_ 28 28 29 #include < almos_config.h>29 #include <kernel_config.h> 30 30 #include <hal_types.h> 31 31 #include <bits.h> … … 97 97 { 98 98 spinlock_t kcm_lock; /*! local, protect creation of KCM allocators */ 99 remote_barrier_t barrier; /*! used to synchronize kernel parallel init */100 99 101 100 // global parameters … … 137 136 138 137 char name[CONFIG_SYSFS_NAME_LEN]; 139 sysfs_entry_t node; 138 139 // sysfs_entry_t node; 140 140 } 141 141 cluster_t; -
trunk/kernel/kern/core.c
r5 r14 24 24 */ 25 25 26 #include < almos_config.h>26 #include <kernel_config.h> 27 27 #include <hal_types.h> 28 28 #include <hal_special.h> … … 35 35 #include <cluster.h> 36 36 #include <kmem.h> 37 #include <sysfs.h>38 37 #include <dqdt.h> 39 38 #include <core.h> 40 39 40 // TODO #include <sysfs.h> 41 41 42 42 ///////////////////////////////// -
trunk/kernel/kern/core.h
r5 r14 27 27 #define _CORE_H_ 28 28 29 #include < almos_config.h>29 #include <kernel_config.h> 30 30 #include <hal_types.h> 31 31 #include <list.h> 32 32 #include <rpc.h> 33 33 #include <scheduler.h> 34 #include <sysfs.h>35 34 36 35 /**** Forward declarations ****/ … … 42 41 /**************************************************************************************** 43 42 * This structure defines the core descriptor. 44 * It contains the three interrupt vectors, that are implemented as array of pointers 45 * on the source channel devices, for all IRQs allocated to a given core. 43 * - It contains an embedded private scheduler. 44 * - It contains the three interrupt vectors, implemented as three arrays of pointers 45 * on the source channel devices, for all IRQs allocated to the core. 46 46 ***************************************************************************************/ 47 47 … … 63 63 list_entry_t rpc_free_list; /*! root of the list of free RPC threads */ 64 64 rpc_fifo_t rpc_fifo; /*! embedded private RPC fifo (one per core) */ 65 65 66 scheduler_t scheduler; /*! embedded private scheduler */ 66 67 … … 69 70 struct chdev_s * wti_vector[CONFIG_MAX_WTIS_PER_ICU]; /*! on source device */ 70 71 71 sysfs_entry_t node;72 // sysfs_entry_t node; 72 73 } 73 74 core_t; -
trunk/kernel/kern/dqdt.c
r5 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_special.h> -
trunk/kernel/kern/dqdt.h
r1 r14 25 25 #define _DQDT_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <hal_atomic.h> -
trunk/kernel/kern/kernel_init.c
r5 r14 3 3 * 4 4 * Authors : Alain Greiner (2016) 5 * Mohamed Lamine Karaoui (2016) 5 6 * 6 7 * Copyright (c) Sorbonne Universites … … 22 23 */ 23 24 24 #include < almos_config.h>25 #include <kernel_config.h> 25 26 #include <errno.h> 26 27 #include <hal_types.h> 27 28 #include <hal_special.h> 28 29 #include <hal_context.h> 30 #include <barrier.h> 29 31 #include <remote_barrier.h> 30 32 #include <core.h> … … 34 36 #include <kmem.h> 35 37 #include <cluster.h> 36 #include <devfs.h>37 #include <sysfs.h>38 38 #include <string.h> 39 39 #include <memcpy.h> … … 54 54 #include <soclib_tty.h> 55 55 56 // TODO #include <devfs.h> 57 // TODO #include <sysfs.h> 56 58 57 59 #define KERNEL_INIT_SYNCHRO 0xA5A5B5B5 … … 60 62 // All these global variables are replicated in all clusters. 61 63 // They are initialised by the kernel_init() function. 64 // 65 // WARNING : The section names have been defined to control the base addresses of the 66 // boot_info structure and the idle thread descriptors, through the kernel.ld script: 67 // - the boot_info structure is build by the bootloader, and used by kernel_init. 68 // it must be first object in the kdata segment. 69 // - the array of idle threads descriptors must be placed on the first page boundary after 70 // the boot_info structure in the kdata segment. 62 71 /////////////////////////////////////////////////////////////////////////////////////////// 63 72 64 73 // This variable defines the local boot_info structure 65 74 __attribute__((section(".kinfo"))) 66 boot_info_t boot_info CACHELINE_ALIGNED; 75 boot_info_t boot_info; 76 77 // This variable defines the "idle" threads descriptors array 78 __attribute__((section(".kidle"))) 79 char idle_threads[CONFIG_THREAD_DESC_SIZE * 80 CONFIG_MAX_LOCAL_CORES] CONFIG_PPM_PAGE_ALIGNED; 67 81 68 82 // This variable defines the local cluster manager 69 83 __attribute__((section(".kdata"))) 70 cluster_t cluster_manager CACHELINE_ALIGNED;71 72 // Th ese variables define the kernel process0 descriptor and associated thread84 cluster_t cluster_manager CONFIG_CACHE_LINE_ALIGNED; 85 86 // This variables define the kernel process0 descriptor 73 87 __attribute__((section(".kdata"))) 74 process_t process_zero CACHELINE_ALIGNED; 75 thread_t thread_zero CACHELINE_ALIGNED; 76 77 // This variable contains the extended pointers on the device descriptors 88 process_t process_zero CONFIG_CACHE_LINE_ALIGNED; 89 90 // This variable defines extended pointers on the distributed chdevs 78 91 __attribute__((section(".kdata"))) 79 chdev_directory_t chdev_dir CACHELINE_ALIGNED;92 chdev_directory_t chdev_dir CONFIG_CACHE_LINE_ALIGNED; 80 93 81 94 // This variable contains the input IRQ indexes for the PIC device 82 95 __attribute__((section(".kdata"))) 83 chdev_pic_input_t chdev_pic_input CACHELINE_ALIGNED;96 chdev_pic_input_t chdev_pic_input CONFIG_CACHE_LINE_ALIGNED; 84 97 85 98 // This variable contains the input IRQ indexes for the ICU device 86 99 __attribute__((section(".kdata"))) 87 chdev_icu_input_t chdev_icu_input CACHELINE_ALIGNED; 88 89 // This variable synchronizes the local cores during kernel_init() 90 __attribute__((section(".kdata"))) 91 volatile uint32_t local_sync_init CACHELINE_ALIGNED; 100 chdev_icu_input_t chdev_icu_input CONFIG_CACHE_LINE_ALIGNED; 92 101 93 102 // This variable defines the local cluster identifier 94 103 __attribute__((section(".kdata"))) 95 cxy_t local_cxy CACHELINE_ALIGNED;96 97 // This variable is the lock protecting the kernel TXT terminal (used by printk)104 cxy_t local_cxy CONFIG_CACHE_LINE_ALIGNED; 105 106 // This variable defines the TXT0 chdev descriptor 98 107 __attribute__((section(".kdata"))) 99 remote_spinlock_t txt0_lock CACHELINE_ALIGNED; 108 chdev_t txt0_chdev CONFIG_CACHE_LINE_ALIGNED; 109 110 // This variable is used for CP0 cores sychronisation in kernel_init() 111 __attribute__((section(".kdata"))) 112 remote_barrier_t global_barrier CONFIG_CACHE_LINE_ALIGNED; 113 114 // This variable is used for local cores sychronisation in kernel_init() 115 __attribute__((section(".kdata"))) 116 barrier_t local_barrier CONFIG_CACHE_LINE_ALIGNED; 100 117 101 118 /////////////////////////////////////////////////////////////////////////////////////////// … … 119 136 120 137 /////////////////////////////////////////////////////////////////////////////////////////// 121 // This static function allocates memory and initializes the TXT0 chdev descriptor, 122 // associated to the kernel terminal, shared by all kernel instances for debug messages. 123 // It should be called by a thread running in the I/O cluster, because the TXT0 chdev 124 // is created in the I/O cluster. 138 // This static function initializes the TXT0 chdev descriptor, associated to the "kernel 139 // terminal", and shared by all kernel instances for debug messages. It also register it 140 // in the chdev directory, containing extended pointers on all chdevs. 141 // The global variable txt0_chdev is replicated in all clusters, but only the chdev 142 // allocated in I/O cluster is used by ALMOS-MKH. 143 // Therefore, this function must be called by a thread running in the I/O cluster. 144 // As this TXT0 chdev supports only the TXT_SYNC_WRITE command, we don't create 145 // a server thread, we don't allocate a WTI, and we don't initialize the waiting queue. 125 146 /////////////////////////////////////////////////////////////////////////////////////////// 126 147 // @ info : pointer on the local boot-info structure. … … 131 152 uint32_t dev_nr; // actual number of devices in this cluster 132 153 xptr_t base; // remote pointer on segment base 133 uint32_t size; // channel size (bytes)134 154 uint32_t type; // peripheral type 135 155 uint32_t func; // device functionnal index … … 138 158 uint32_t x; // X cluster coordinate 139 159 uint32_t y; // Y cluster coordinate 140 chdev_t * chdev; // local pointer on created chdev141 160 142 161 // get number of peripherals and base of devices array from boot_info … … 144 163 dev_tbl = info->ext_dev; 145 164 146 // loop on external peripherals to find TXT 165 // loop on external peripherals to find TXT device 147 166 for( i = 0 ; i < dev_nr ; i++ ) 148 167 { 149 size = dev_tbl[i].size;150 168 base = dev_tbl[i].base; 151 169 type = dev_tbl[i].type; … … 155 173 if (func == DEV_FUNC_TXT ) 156 174 { 157 // allocate and initialize a local chdev for TXT0 158 chdev = chdev_create( func, 159 impl, 160 0, // channel 161 0, // direction 162 base ); 175 // initialize basic fields 176 txt0_chdev.func = func; 177 txt0_chdev.impl = impl; 178 txt0_chdev.channel = 0; 179 txt0_chdev.is_rx = 0; 180 txt0_chdev.base = base; 181 182 // initialize lock 183 remote_spinlock_init( XPTR( local_cxy , &txt0_chdev.wait_lock ) ); 163 184 164 185 // Complete TXT specific initialisation 165 186 if( impl == IMPL_TXT_TTY ) 166 187 { 167 chdev->cmd = &soclib_tty_cmd;168 chdev->isr = &soclib_tty_isr;169 soclib_tty_init( chdev );188 txt0_chdev.cmd = &soclib_tty_cmd; 189 txt0_chdev.isr = &soclib_tty_isr; 190 soclib_tty_init( &txt0_chdev ); 170 191 } 171 192 … … 176 197 { 177 198 cxy_t cxy = (x<<info->y_width) + y; 178 hal_remote_swd( XPTR( cxy , &chdev_dir.txt[0] ) , XPTR( local_cxy , chdev ) ); 199 hal_remote_swd( XPTR( cxy , &chdev_dir.txt[0] ) , 200 XPTR( local_cxy , &txt0_chdev ) ); 179 201 } 180 202 } 181 203 182 kinit_dmsg("\n[INFO] %s : core[%x][0] created TXT0 chdev / paddr = %l at cycle %d\n", 183 __FUNCTION__ , local_cxy , chdev_func_str( func ), chdev_xp , hal_time_stamp() ); 204 kinit_dmsg("\n[INFO] %s : core[%x][0] created TXT0 chdev" 205 " / paddr = %l at cycle %d\n", 206 __FUNCTION__ , local_cxy , chdev_func_str( func ), 207 XPTR(local_cxy , &txt0_chdev) , hal_time_stamp() ); 184 208 } 185 209 … … 200 224 static void internal_devices_init( boot_info_t * info ) 201 225 { 202 boot_device_t * dev_tbl; // pointer on array of devices in boot_info 203 uint32_t dev_nr; // actual number of devices in this cluster 204 xptr_t base; // remote pointer on segment base 205 uint32_t size; // channel size (bytes) 206 uint32_t type; // peripheral type 207 uint32_t func; // device functionnal index 208 uint32_t impl; // device implementation index 209 uint32_t i; // device index in dev_tbl 226 boot_device_t * dev; // pointer on boot_info device (ICU/MMC/DMA) 210 227 uint32_t x; // X cluster coordinate 211 228 uint32_t y; // Y cluster coordinate 212 uint32_t channels_nr; // number of channels in device 213 uint32_t channel; // channel index 214 uint32_t p0; // device parameter 0 215 uint32_t p1; // device parameter 1 216 uint32_t p2; // device parameter 2 217 uint32_t p3; // device parameter 3 218 219 chdev_t * chdev; // local pointer on one channel_device descriptor 220 xptr_t chdev_xp; // extended pointer on channel_device descriptor 221 222 // get number of internal devices and base of devices array from boot_info 223 dev_nr = info->int_dev_nr; 224 dev_tbl = info->int_dev; 225 226 // loop on all internal devices in cluster 227 for( i = 0 ; i < dev_nr ; i++ ) 228 { 229 size = dev_tbl[i].size; 230 base = dev_tbl[i].base; 231 type = dev_tbl[i].type; 232 channels_nr = dev_tbl[i].channels; 233 p0 = dev_tbl[i].param0; 234 p1 = dev_tbl[i].param1; 235 p2 = dev_tbl[i].param2; 236 p3 = dev_tbl[i].param3; 237 238 func = FUNC_FROM_TYPE( type ); 239 impl = IMPL_FROM_TYPE( type ); 240 241 // do nothing for RAM, that does not require a chdev descriptor. 242 if( func == DEV_FUNC_RAM ) continue; 243 244 // check internal device functional type 245 if( (func != DEV_FUNC_MMC) && 246 (func != DEV_FUNC_ICU) && 247 (func != DEV_FUNC_DMA) ) 248 { 249 assert( false , __FUNCTION__ , "illegal internal peripheral type" ); 250 } 251 252 // loop on channels 253 for( channel = 0 ; channel < channels_nr ; channel++ ) 254 { 255 // create one chdev in local cluster 256 chdev = chdev_create( func , 257 impl, 258 channel, 259 false, // TX 260 base ); 261 262 assert( (chdev != NULL) , __FUNCTION__ , "cannot allocate internal chdev" ); 229 chdev_t * chdev_ptr; // local pointer on chdev descriptor 230 xptr_t chdev_xp; // extended pointer on chdev descriptor 231 232 /////////// ICU ////////// 233 234 dev = &info->dev_icu; 235 236 assert( ((info->cores_nr == 0) || (dev->channels != 0)) , __FUNCTION__ , 237 "ICU device must exist in cluster containing cores" ); 238 239 assert( (dev->channels == 1) , __FUNCTION__ , 240 "channels number must be 1 for ICU device" ); 241 242 assert( (FUNC_FROM_TYPE( dev->type ) == DEV_FUNC_ICU ) , __FUNCTION__ , 243 " inconsistent ICU device type"); 244 245 // create one chdev in local cluster 246 chdev_ptr = chdev_create( FUNC_FROM_TYPE( dev->type ), 247 IMPL_FROM_TYPE( dev->type ), 248 0, // channel 249 false, // TX 250 dev->base ); 251 252 assert( (chdev_ptr != NULL) , __FUNCTION__ , "cannot allocate ICU chdev" ); 253 254 // get extended pointer on chdev descriptor 255 chdev_xp = XPTR( local_cxy , chdev_ptr ); 256 257 // make ICU specific initialisation 258 // TODO remove these three parameters 259 dev_icu_init( chdev_ptr , dev->param0 , dev->param1 , dev->param2 ); 260 261 // initialize the ICU field in the chdev_dir[x][y] structures 262 // replicated in all clusters, and containing extended pointers 263 // on all remotely accessible devices 264 for( x = 0 ; x < info->x_size ; x++ ) 265 { 266 for( y = 0 ; y < info->y_size ; y++ ) 267 { 268 cxy_t cxy = (x<<info->y_width) + y; 269 hal_remote_swd( XPTR( cxy , &chdev_dir.icu[local_cxy] ) , chdev_xp ); 270 } 271 } 272 273 // initialize the entries of the local chdev_icu_input structure 274 // defining how internal peripherals are connected to ICU 275 uint32_t id; 276 uint8_t valid; 277 uint32_t src_type; 278 uint8_t src_ch; 279 uint32_t src_func; 280 for( id = 0 ; id < CONFIG_MAX_HWIS_PER_ICU ; id++ ) 281 { 282 valid = dev->irq[id].valid; 283 src_type = dev->irq[id].dev_type; 284 src_ch = dev->irq[id].channel; 285 src_func = FUNC_FROM_TYPE( src_type ); 286 287 if( valid ) // only valid local IRQs are registered 288 { 289 if ( src_func == DEV_FUNC_MMC ) chdev_icu_input.mmc = id; 290 else if( src_func == DEV_FUNC_DMA ) chdev_icu_input.dma[src_ch] = id; 291 else assert( false , __FUNCTION__ , "illegal source device for ICU input" ); 292 } 293 } 294 295 kinit_dmsg("\n[INFO] %s : core[%x][0] creates ICU chdev at cycle %d\n", 296 __FUNCTION__ , local_cxy , hal_time_stamp() ); 297 298 /////////// MMC internal chdev /////////// 299 300 dev = &info->dev_mmc; 301 302 if( dev->channels != 0 ) // MMC device is defined 303 { 304 assert( (dev->channels == 1) , __FUNCTION__ , 305 "channels number must be 1 for MMC device" ); 306 307 assert( (FUNC_FROM_TYPE( dev->type ) == DEV_FUNC_MMC ) , __FUNCTION__ , 308 " inconsistent MMC device type"); 309 310 // create one chdev in local cluster 311 chdev_ptr = chdev_create( FUNC_FROM_TYPE( dev->type ), 312 IMPL_FROM_TYPE( dev->type ), 313 0, // channel 314 false, // TX 315 dev->base ); 316 317 assert( (chdev_ptr != NULL) , __FUNCTION__ , "cannot allocate MMC chdev" ); 318 319 // get extended pointer on chdev descriptor 320 chdev_xp = XPTR( local_cxy , chdev_ptr ); 321 322 // make MMC specific initialisation 323 dev_mmc_init( chdev_ptr ); 324 325 // initialize the MMC field in the chdev_dir[x][y] structures 326 // replicated in all clusters, and containing extended pointers 327 // on all remotely accessible devices 328 for( x = 0 ; x < info->x_size ; x++ ) 329 { 330 for( y = 0 ; y < info->y_size ; y++ ) 331 { 332 cxy_t cxy = (x<<info->y_width) + y; 333 hal_remote_swd( XPTR( cxy , &chdev_dir.mmc[local_cxy] ) , chdev_xp ); 334 } 335 } 336 337 kinit_dmsg("\n[INFO] %s : core[%x][0] creates MMC chdev at cycle %d\n", 338 __FUNCTION__ , local_cxy , hal_time_stamp() ); 339 } 340 341 /////////// DMA internal chdevs ////////// 342 343 dev = &info->dev_dma; 344 345 if( dev->channels != 0 ) // DMA device is defined 346 { 347 assert( (FUNC_FROM_TYPE( dev->type ) == DEV_FUNC_DMA ) , __FUNCTION__ , 348 " inconsistent DMA device type"); 349 350 // create one chdev per channel in local cluster 351 uint32_t channel; 352 for( channel = 0 ; channel < dev->channels ; channel++ ) 353 { 354 chdev_ptr = chdev_create( FUNC_FROM_TYPE( dev->type ), 355 IMPL_FROM_TYPE( dev->type ), 356 channel, // channel 357 false, // TX 358 dev->base ); 359 360 assert( (chdev_ptr != NULL) , __FUNCTION__ , "cannot allocate DMA chdev" ); 263 361 264 362 // get extended pointer on channel descriptor 265 chdev_xp = XPTR( local_cxy , chdev ); 266 267 // TODO ??? AG 268 // devfs_register( dev ); 269 270 // make device type specific initialisation 271 // the number of parameters depends on the device type 272 // TODO : remove these parameters that must be provided by the driver 273 if ( func == DEV_FUNC_ICU ) dev_icu_init( chdev , p0 , p1 , p2 ); 274 else if( func == DEV_FUNC_MMC ) dev_mmc_init( chdev ); 275 else dev_dma_init( chdev ); 276 277 // initialize the replicated chdev_dir[x][y] structures 278 // containing extended pointers on all devices descriptors 279 xptr_t * entry; 280 281 if ( func == DEV_FUNC_ICU ) entry = &chdev_dir.icu[local_cxy]; 282 else if( func == DEV_FUNC_MMC ) entry = &chdev_dir.mmc[local_cxy]; 283 else entry = &chdev_dir.dma[channel]; 284 285 if( func != DEV_FUNC_DMA ) // ICU and MMC devices are remotely accessible 286 { 287 for( x = 0 ; x < info->x_size ; x++ ) 288 { 289 for( y = 0 ; y < info->y_size ; y++ ) 290 { 291 cxy_t cxy = (x<<info->y_width) + y; 292 hal_remote_swd( XPTR( cxy , entry ) , chdev_xp ); 293 } 294 } 295 } 296 else // DMA devices are NOT remotely accessible 297 { 298 *entry = chdev_xp; 299 } 300 301 kinit_dmsg("\n[INFO] %s :core[%x][0] created chdev %s / channel %d" 302 " / paddr = %l at cycle %d\n", 303 __FUNCTION__ , local_cxy , chdev_func_str( func ) , 304 channel , chdev_xp , hal_time_stamp() ); 305 306 } // end loop on channels 307 308 // initialize the entries of the local chdev_icu_input structure 309 // defining how internal peripherals are connected to ICU 310 if( func == DEV_FUNC_ICU ) 311 { 312 uint32_t id; 313 uint8_t valid; 314 uint32_t dev_type; 315 uint8_t channel; 316 317 // loop on ICU inputs 318 for( id = 0 ; id < CONFIG_MAX_HWIS_PER_ICU ; id++ ) 319 { 320 valid = dev_tbl[i].irq[id].valid; 321 dev_type = dev_tbl[i].irq[id].dev_type; 322 channel = dev_tbl[i].irq[id].channel; 323 324 if( valid ) // only valid local IRQs are registered 325 { 326 uint32_t * index; // local pointer on the entry to be set 327 uint16_t dev_func = FUNC_FROM_TYPE( dev_type ); 328 if( dev_func == DEV_FUNC_MMC ) 329 index = &chdev_icu_input.mmc; 330 else if( dev_func == DEV_FUNC_DMA ) 331 index = &chdev_icu_input.dma[channel]; 332 else 333 { 334 assert( false , __FUNCTION__ , "illegal source device for ICU input" ); 335 } 336 337 // set entry in local structure 338 *index = id; 339 } 340 341 } // end loop on ICU inputs 342 } // end if ICU 343 } // end loop on peripherals 363 chdev_xp = XPTR( local_cxy , chdev_ptr ); 364 365 // make DMA specific initialisation 366 dev_dma_init( chdev_ptr ); 367 368 // initialize only the DMA[channel] field in the local chdev_dir[x][y] 369 // structure because the DMA device is not remotely accessible. 370 chdev_dir.dma[channel] = chdev_xp; 371 372 kinit_dmsg("\n[INFO] %s : core[%x][0] creates DMA[%d] chdev at cycle %d\n", 373 __FUNCTION__ , local_cxy , channel , hal_time_stamp() ); 374 } 375 } 344 376 } // end internal_devices_init() 345 377 … … 353 385 // 354 386 // The number of channel_devices depends on the device functionnal type. 355 // There is three nested loops to scanthe full set of external channel_devices:387 // There is three nested loops to build the full set of external channel_devices: 356 388 // - loop on external devices. 357 389 // - loop on channels for multi-channels devices. … … 371 403 uint32_t dev_nr; // actual number of devices in this cluster 372 404 xptr_t base; // remote pointer on segment base 373 uint32_t size; // channel size (bytes)374 405 uint32_t type; // peripheral type 375 406 uint32_t func; // device functionnal index … … 399 430 for( i = 0 ; i < dev_nr ; i++ ) 400 431 { 401 size = dev_tbl[i].size;402 432 base = dev_tbl[i].base; 403 433 type = dev_tbl[i].type; … … 496 526 } 497 527 498 kinit_dmsg("\n[INFO] %s : core[%x][0] created chdev %s / channel = %d" 499 " / paddr = %l at cycle %d\n", 528 kinit_dmsg("\n[INFO] %s : core[%x][0] create chdev %s[%d] at cycle %d\n", 500 529 __FUNCTION__ , local_cxy , chdev_func_str( func ), 501 channel , chdev_xp ,hal_time_stamp() );530 channel , hal_time_stamp() ); 502 531 503 532 } // end if match … … 564 593 565 594 /////////////////////////////////////////////////////////////////////////////////////////// 595 // This static function returns the identifiers of the calling core. 596 /////////////////////////////////////////////////////////////////////////////////////////// 597 // @ info : pointer on boot_info structure. 598 // @ lid : [out] core local index in cluster. 599 // @ cxy : [out] cluster identifier. 600 // @ lid : [out] core global identifier (hardware). 601 // @ return 0 if success / return EINVAL if not found. 602 /////////////////////////////////////////////////////////////////////////////////////////// 603 static error_t core_get_identifiers( boot_info_t * info, 604 uint32_t * lid, 605 cxy_t * cxy, 606 gid_t * gid ) 607 { 608 uint32_t i; 609 gid_t global_id; 610 611 // get global identifier from hardware register 612 global_id = hal_get_gid(); 613 614 // makes an associative search in boot_info to get (cxy,lid) from global_id 615 for( i = 0 ; i < info->cores_nr ; i++ ) 616 { 617 if( global_id == info->core[i].gid ) 618 { 619 *lid = info->core[i].lid; 620 *cxy = info->core[i].cxy; 621 *gid = global_id; 622 return 0; 623 } 624 } 625 return EINVAL; 626 } 627 628 /////////////////////////////////////////////////////////////////////////////////////////// 566 629 // This function is the entry point for the kernel initialisation. 567 // It is executed by all cores in all clusters, but only core[0] in each cluster568 // initialize the cluster manager, ant the local peripherals.630 // It is executed by all cores in all clusters, but only core[0], called CP0, 631 // initializes the shared resources such as the cluster manager, or the local peripherals. 569 632 // To comply with the multi-kernels paradigm, it access only local cluster memory, using 570 633 // only informations contained in the local boot_info_t structure, set by the bootloader. … … 574 637 void kernel_init( boot_info_t * info ) 575 638 { 576 uint32_t core_lid; // running core local index 577 cxy_t core_cxy; // running core cluster identifier 578 gid_t core_gid; // running core hardware identifier 579 cluster_t * cluster; // pointer on local cluster manager 580 core_t * core; // pointer on running core descriptor 581 thread_t * thread_idle; // pointer on thread_idle 582 583 uint32_t i; 584 bool_t found; 639 uint32_t core_lid = -1; // running core local index 640 cxy_t core_cxy = -1; // running core cluster identifier 641 gid_t core_gid; // running core hardware identifier 642 cluster_t * cluster; // pointer on local cluster manager 643 core_t * core; // pointer on running core descriptor 644 thread_t * thread; // pointer on idle thread descriptor 585 645 error_t error; 586 646 587 // initialise global cluster identifier 588 local_cxy = info->cxy; 589 590 // each core get its global index from hardware register 591 core_gid = hal_get_gid(); 592 593 // Each core makes an associative search in boot_info 594 // to get its (cxy,lid) composite index from its gid 595 found = false; 596 core_cxy = 0; 597 core_lid = 0; 598 for( i = 0 ; i < info->cores_nr ; i++ ) 599 { 600 if( core_gid == info->core[i].gid ) 601 { 602 core_lid = info->core[i].lid; 603 core_cxy = info->core[i].cxy; 604 found = true; 605 break; 606 } 607 } 608 609 // suicide if not found 610 if( (found == false) || (core_cxy != local_cxy) ) hal_core_sleep(); 611 612 ////////////////////////////////////////////////////////////// 613 // In first step, only CP0 initialises local resources 614 ////////////////////////////////////////////////////////////// 615 616 if( core_lid == 0 ) 617 { 618 // initialize local cluster manager (cores and memory allocators) 647 // all cores get core identifiers 648 error = core_get_identifiers( info, 649 &core_lid, 650 &core_cxy, 651 &core_gid ); 652 653 // CP0 initialise cluster identifier 654 if( core_lid == 0 ) local_cxy = info->cxy; 655 656 // CP0 in I/O cluster initialises TXT0 chdev descriptor 657 if( (core_lid == 0) && (core_cxy == info->io_cxy) ) txt0_device_init( info ); 658 659 ///////////////////////////////////////////////////////////////////////////////// 660 // global & local synchro to protect access to TXT0 terminal 661 if( core_lid == 0 ) remote_barrier( XPTR( info->io_cxy , &global_barrier ), 662 (info->x_size * info->y_size) ); 663 barrier_wait( &local_barrier , info->cores_nr ); 664 ///////////////////////////////////////////////////////////////////////////////// 665 666 kinit_dmsg("\n[INFO] %s : core[%x][%d] exit barrier 0\n", 667 __FUNCTION__ , core_cxy , core_lid ); 668 669 // all cores check core identifiers 670 if( error ) 671 { 672 printk("\n[PANIC] in %s : illegal core identifiers" 673 " gid = %x / cxy = %x / lid = %d\n", 674 __FUNCTION__ , core_lid , core_cxy , core_lid ); 675 hal_core_sleep(); 676 } 677 else 678 { 679 kinit_dmsg("\n[INFO] %s : core[%x][%d] enters at cycle %d / sp = %x\n", 680 __FUNCTION__ , core_cxy , core_lid , hal_time_stamp() , hal_get_stack() ); 681 } 682 683 // CP0 initialize local cluster manager (cores and memory allocators) 684 if( core_lid == 0 ) 685 { 619 686 error = cluster_init( info ); 620 621 // suicide if failure 622 if( error ) hal_core_sleep(); 623 624 // get pointer on local cluster manager and on core descriptor 625 cluster = LOCAL_CLUSTER; 626 core = &cluster->core_tbl[core_lid]; 627 628 // initialize process_zero descriptor 629 process_zero_init( info ); 630 631 // CP0 initialize its private thread_zero descriptor 632 memset( &thread_zero , 0 , sizeof(thread_t) ); 633 thread_zero.type = THREAD_KERNEL; 634 thread_zero.process = &process_zero; 635 hal_set_current_thread( &thread_zero ); 636 637 // CP0 in I/O cluster initialize the kernel TXT0 chdev descriptor. 638 // this TXTO device is shared by the all kernel instances for debug messages: 639 // the printk() function call the dev_txt_sync_write() function that call 640 // directly the relevant TXT driver, without desheduling. 641 if( core_cxy == info->io_cxy ) txt0_device_init( info ); 642 643 // synchronise all CP0s before using TXT0 644 remote_barrier( XPTR( info->io_cxy , &cluster->barrier ) , 645 (cluster->x_size * cluster->y_size) ); 646 647 // All CP0 initialise internal peripheral chdev descriptors. 648 // Each CP0[cxy] scan the set of its internal (private) peripherals, 649 // and allocate memory for the corresponding chdev descriptors. 650 internal_devices_init( info ); 687 688 if( error ) 689 { 690 printk("\n[PANIC] in %s : cannot initialise cluster manager in cluster %x", 691 __FUNCTION__ , local_cxy ); 692 hal_core_sleep(); 693 } 694 else 695 { 696 kinit_dmsg("\n[INFO] %s : core[%x][%d] initialised cluster at cycle %d\n", 697 __FUNCTION__ , core_cxy , core_lid , hal_time_stamp()); 698 } 699 } 700 701 ///////////////////////////////////////////////////////////////////////////////// 702 // global & local synchro, to protect access to cluster manager 703 if( core_lid == 0 ) remote_barrier( XPTR( info->io_cxy , &global_barrier ), 704 (info->x_size * info->y_size) ); 705 barrier_wait( &local_barrier , info->cores_nr ); 706 ///////////////////////////////////////////////////////////////////////////////// 707 708 kinit_dmsg("\n[INFO] %s : core[%x][%d] exit barrier 1\n", 709 __FUNCTION__ , core_cxy , core_lid ); 710 711 // all cores get pointer on local cluster manager and on core descriptor 712 cluster = &cluster_manager; 713 core = &cluster->core_tbl[core_lid]; 714 715 // CP0 initialize process_zero descriptor 716 if( core_lid == 0 ) process_zero_init( info ); 717 718 // CP0 allocate and initialise internal peripheral chdev descriptors. 719 // Each CP0[cxy] scan the set of its internal (private) peripherals, 720 // and allocate memory for the corresponding chdev descriptors. 721 if( core_lid == 0 ) internal_devices_init( info ); 651 722 652 // All CP0 contribute to initialise external peripheral chdev descriptors. 653 // Each CP0[cxy] scan the set of external (shared) peripherals (but the TXT0), 654 // and allocates memory for the chdev descriptors that must be placed 655 // on the (cxy) cluster according to its global index. 656 external_devices_init( info ); 657 658 // TODO initialize devFS and sysFS 659 // devfs_root_init(); 660 // sysfs_root_init(); 661 662 // TODO ??? [AG] 663 // clusters_sysfs_register(); 664 665 // TODO initialize virtual file system 723 // CP0 allocates one WTI mailbbox per core for Inter Processor Interrupt 724 // this must be done after ICU chdev initialisation, by CP0 only, and before 725 // external devices initialisation to enforce the rule (wti_id == lid) 726 if( core_lid == 0 ) 727 { 728 uint32_t wti_id; 729 uint32_t lid; 730 for( lid = 0 ; lid < LOCAL_CLUSTER->cores_nr ; lid++ ) 731 { 732 wti_id = dev_icu_wti_alloc(); 733 734 if( wti_id != lid ) 735 { 736 printk("\n[PANIC] in %s : WTI index for IPI = %d / core_lid = %d", 737 __FUNCTION__ , wti_id , lid ); 738 hal_core_sleep(); 739 } 740 741 dev_icu_enable_irq( lid , WTI_TYPE , wti_id , NULL ); 742 } 743 } 744 745 // CP0 contribute to initialise external peripheral chdev descriptors. 746 // Each CP0[cxy] scan the set of external (shared) peripherals (but the TXT0), 747 // and allocates memory for the chdev descriptors that must be placed 748 // on the (cxy) cluster according to the global index value. 749 if( core_lid == 0 ) external_devices_init( info ); 750 751 ///////////////////////////////////////////////////////////////////////////////// 752 // global &local synchro to protect access to peripherals 753 if( core_lid == 0 ) remote_barrier( XPTR( info->io_cxy , &global_barrier ), 754 (info->x_size * info->y_size) ); 755 barrier_wait( &local_barrier , info->cores_nr ); 756 ///////////////////////////////////////////////////////////////////////////////// 757 758 kinit_dmsg("\n[INFO] %s : core[%x][%d] exit barrier 2\n", 759 __FUNCTION__ , core_cxy , core_lid ); 760 761 // all cores initialize the private idle thread descriptor 762 thread = (thread_t *)( idle_threads + (core_lid * CONFIG_THREAD_DESC_SIZE) ); 763 764 error = thread_kernel_init( thread, 765 THREAD_IDLE, 766 &thread_idle_func, 767 NULL, 768 core_lid ); 769 770 if( error ) 771 { 772 printk("\n[PANIC] in %s : core[%x][%d] cannot initialize idle thread\n", 773 __FUNCTION__ , local_cxy , core_lid ); 774 hal_core_sleep(); 775 } 776 else 777 { 778 // register idle thread in scheduler 779 core->scheduler.idle = thread; 780 781 // register idle thread pointer in core register 782 hal_set_current_thread( thread ); 783 784 // activate the idle thread 785 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); 786 787 kinit_dmsg("\n[INFO] %s : core[%x][%d] created idle thread %x at cycle %d\n", 788 __FUNCTION__ , core_cxy , core_lid , thread , hal_time_stamp()); 789 } 790 791 // TODO CP0 in IO cluster initialize VFS, devFS and sysFS 792 { 793 // devfs_root_init(); 794 // sysfs_root_init(); 795 // clusters_sysfs_register(); 666 796 // vfs_init(); 667 668 // TODO ??? [AG] 669 // sysconf_init(); 670 671 // activate other cores in same cluster 672 local_sync_init = KERNEL_INIT_SYNCHRO; 673 hal_wbflush(); 674 } 675 else // other cores 676 { 677 // other cores wait synchro from core[0] 678 while( local_sync_init != KERNEL_INIT_SYNCHRO ) 679 { 680 uint32_t retval = hal_time_stamp() + 1000; 681 while( hal_time_stamp() < retval ) asm volatile ("nop"); 682 } 683 684 // get pointer on local cluster manager and on core descriptor 685 cluster = LOCAL_CLUSTER; 686 core = &cluster->core_tbl[core_lid]; 687 688 // core initialise its private thread_zero descriptor 689 memset( &thread_zero , 0 , sizeof(thread_t) ); 690 thread_zero.type = THREAD_KERNEL; 691 thread_zero.process = &process_zero; 692 hal_set_current_thread( &thread_zero ); 693 } 694 695 // each core creates its private idle thread descriptor 696 error = thread_kernel_create( &thread_idle, 697 THREAD_IDLE, 698 &thread_idle_func, 699 NULL, 700 core_lid ); 701 702 assert( (error == 0) , __FUNCTION__ , "cannot create idle thread" ); 703 704 // each core register thread_idle in scheduler 705 core->scheduler.idle = thread_idle; 706 707 // each core register thread pointer in core hardware register 708 hal_set_current_thread( thread_idle ); 709 710 kinit_dmsg("\n[INFO] %s : thread idle created for core[%x][%d] at cycle %d\n", 711 __FUNCTION__ , core_cxy , core_lid , hal_time_stamp()); 712 713 // global syncho for all core[0] in all clusters 714 if( core_lid == 0 ) 715 { 716 remote_barrier( XPTR( info->io_cxy , &cluster->barrier ) , 717 (cluster->x_size * cluster->y_size) ); 718 } 719 720 // local synchro for all cores in local cluster 721 remote_barrier( XPTR( local_cxy , &cluster->barrier ) , 722 cluster->cores_nr ); 723 797 // sysconf_init(); 798 } 799 800 // CP0 in I/O cluster print banner 724 801 if( (core_lid == 0) && (local_cxy == info->io_cxy) ) 725 802 { … … 727 804 } 728 805 729 // load idle thread context on calling core 730 hal_cpu_context_load( thread_idle ); 806 ///////////////////////////////////////////////////////////////////////////////// 807 // global syncho to protect access to File System 808 if( core_lid == 0 ) remote_barrier( XPTR( info->io_cxy , &global_barrier ), 809 (info->x_size * info->y_size) ); 810 barrier_wait( &local_barrier , info->cores_nr ); 811 ///////////////////////////////////////////////////////////////////////////////// 812 813 kinit_dmsg("\n[INFO] %s : core[%x][%d] exit barrier 3\n", 814 __FUNCTION__ , core_cxy , core_lid ); 815 816 // each core activates its private PTI IRQ 817 dev_icu_set_period( core_lid , CONFIG_SCHED_TICK_PERIOD ); 818 dev_icu_enable_irq( core_lid , PTI_TYPE , core_lid , NULL ); 819 820 // each core get its private IRQ masks values and 821 uint32_t hwi_mask; 822 uint32_t wti_mask; 823 uint32_t pti_mask; 824 dev_icu_get_masks( core_lid , &hwi_mask , &wti_mask , &pti_mask ); 825 826 thread_dmsg("\n[INFO] %s : core[%x][%d] activates scheduler at cycle %d\n" 827 " hwi_mask = %x / wti_mask = %x / pti_mask = %x\n", 828 __FUNCTION__ , local_cxy , core_lid , hal_time_stamp() , 829 hwi_mask , wti_mask , pti_mask ); 830 831 // each core jump to idle thread 832 asm volatile( "j thread_idle_func\n" ); 731 833 732 834 } // end kernel_init() -
trunk/kernel/kern/metafs.h
r1 r14 26 26 #define _METAFS_H_ 27 27 28 #include < almos_config.h>28 #include <kernel_config.h> 29 29 #include <hal_types.h> 30 30 #include <list.h> -
trunk/kernel/kern/printk.c
r5 r14 28 28 #include <remote_spinlock.h> 29 29 #include <cluster.h> 30 #include <chdev.h> 30 31 #include <printk.h> 32 33 /////////////////////////////////////////////////////////////////////////////////// 34 // Extern variables 35 /////////////////////////////////////////////////////////////////////////////////// 36 37 extern chdev_t txt0_chdev; // allocated in kernel_init.c 31 38 32 39 /////////////////////////////////////////////////////////////////////////////////// … … 213 220 } // end kernel_printf() 214 221 215 //////////////////////////////// 216 void printk( char * format , ...)222 ///////////////////////////////// 223 void printk( char * format , ...) 217 224 { 218 225 va_list args; 219 220 // call kernel_printf 226 uint32_t save_sr; 227 228 // get extended pointer on remote TXT0 chdev lock 229 xptr_t txt0_lock_xp = XPTR( LOCAL_CLUSTER->io_cxy , &txt0_chdev.wait_lock ); 230 231 // get TXT0 lock in busy waiting mode 232 remote_spinlock_lock_busy( txt0_lock_xp , &save_sr ); 233 234 // call kernel_printf in busy waiting mode 221 235 va_start( args , format ); 222 236 kernel_printf( 0 , 1 , format , &args ); 223 237 va_end( args ); 238 239 // release lock 240 remote_spinlock_unlock_busy( txt0_lock_xp , save_sr ); 224 241 } 225 242 226 ///////////////////////////////////// 227 void user_printk( char * format, ...)243 ////////////////////////////////////// 244 void user_printk( char * format , ...) 228 245 { 229 246 va_list args; … … 232 249 uint32_t channel = 0; 233 250 234 // call kernel_printf251 // call kernel_printf in descheduling mode 235 252 va_start( args , format ); 236 253 kernel_printf( channel, 0 , format , &args ); -
trunk/kernel/kern/process.c
r5 r14 24 24 */ 25 25 26 #include < almos_config.h>26 #include <kernel_config.h> 27 27 #include <hal_types.h> 28 28 #include <hal_remote.h> … … 546 546 bool_t found; 547 547 548 if( process == NULL ) 549 { 550 printk("\n[PANIC] in %s : process argument is NULL\n", __FUNCTION__ ); 551 hal_core_sleep(); 552 } 553 if( thread == NULL ) 554 { 555 printk("\n[PANIC] in %s : thread argument is NULL\n", __FUNCTION__ ); 556 hal_core_sleep(); 557 } 548 assert( (process != NULL) , __FUNCTION__ , "process argument is NULL" ); 549 550 assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" ); 558 551 559 552 // search a free slot in th_tbl[] -
trunk/kernel/kern/process.h
r1 r14 27 27 #define _PROCESS_H_ 28 28 29 #include < almos_config.h>29 #include <kernel_config.h> 30 30 #include <errno.h> 31 31 #include <hal_types.h> -
trunk/kernel/kern/rpc.c
r5 r14 23 23 */ 24 24 25 #include < almos_config.h>25 #include <kernel_config.h> 26 26 #include <hal_types.h> 27 27 #include <hal_atomic.h> -
trunk/kernel/kern/rpc.h
r5 r14 26 26 #define _RPC_H_ 27 27 28 #include < almos_config.h>28 #include <kernel_config.h> 29 29 #include <hal_types.h> 30 30 #include <hal_atomic.h> -
trunk/kernel/kern/scheduler.c
r1 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_irqmask.h> … … 112 112 if( thread_can_yield() == false ) 113 113 { 114 printk("\n[ ERROR] in %s : thread %x in process %x on core %d in cluster %x"114 printk("\n[PANIC] in %s : thread %x in process %x on core[%x][%d]" 115 115 " did not released all locks\n", 116 116 __FUNCTION__ , thread->trdid , thread->process->pid, 117 thread->core->lid , local_cxy);117 local_cxy , thread->core->lid ); 118 118 hal_core_sleep(); 119 119 } … … 237 237 if( thread_can_yield() == false ) 238 238 { 239 printk("\n[PANIC] in %s : thread %x for process %x on core %d in cluster %x" 240 " has not released all locks\n", 241 __FUNCTION__, current->trdid, current->process->pid, core->lid, local_cxy ); 239 printk("\n[PANIC] in %s : thread %x for process %x on core_gid %x" 240 " has not released all locks at cycle %d\n", 241 __FUNCTION__, current->trdid, current->process->pid, 242 local_cxy , core->lid , hal_time_stamp() ); 242 243 hal_core_sleep(); 243 244 } … … 256 257 { 257 258 printk("\n[PANIC] in %s : detected stack overflow for thread %x of process %x" 258 " on core %d in cluster %x\n",259 __FUNCTION__, next->trdid, next->process->pid, core->lid, local_cxy);259 " on core [%x][%d]\n", 260 __FUNCTION__, next->trdid, next->process->pid, local_cxy , core->lid ); 260 261 hal_core_sleep(); 261 262 } 262 263 263 264 sched_dmsg("\n[INFO] %s on core %d in cluster %x / old thread = %x / new thread = %x\n", 264 __FUNCTION__, core->lid, cluster->cxy, current->trdid, next->trdid );265 __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid ); 265 266 266 267 // switch contexts if new thread … … 342 343 343 344 sched_dmsg("INFO : %s on core %d in cluster %x / old thread = %x / new thread = %x\n", 344 __FUNCTION__, core->lid, cluster->cxy, current->trdid, new->trdid );345 __FUNCTION__, core->lid, local_cxy, current->trdid, new->trdid ); 345 346 346 347 // switch contexts if new thread -
trunk/kernel/kern/scheduler.h
r1 r14 55 55 56 56 /*********************************************************************************************** 57 * This function initialises the scheduler for a gi nen core.57 * This function initialises the scheduler for a given core. 58 58 **********************************************************************************************/ 59 59 void sched_init( struct core_s * core ); -
trunk/kernel/kern/thread.c
r5 r14 24 24 */ 25 25 26 #include < almos_config.h>26 #include <kernel_config.h> 27 27 #include <hal_types.h> 28 28 #include <hal_context.h> … … 76 76 77 77 ///////////////////////////////////////////////////////////////////////////////////// 78 // This static function makes the actual allocation and initialisation for a thread79 // descriptor. It iscalled by the three functions:78 // This static function allocates physical memory for a thread descriptor. 79 // It can be called by the three functions: 80 80 // - thread_user_create() 81 // - thread_user_ copy()81 // - thread_user_fork() 82 82 // - thread_kernel_create() 83 83 ///////////////////////////////////////////////////////////////////////////////////// 84 // @ new_thread : buffer for new thread pointer. 85 // @ process : local pointer on process descriptor. 86 // @ type : thread type. 87 // @ func : local pointer on thread entry function. 88 // @ args : local pointer on thread entry function arguments. 89 // @ core_lid : target core local index. 84 // @ return pointer on thread descriptor if success / return NULL if failure. 90 85 ///////////////////////////////////////////////////////////////////////////////////// 91 static error_t thread_create( thread_t ** new_thread, 92 process_t * process, 93 thread_type_t type, 94 void * func, 95 void * args, 96 lid_t core_lid, 97 intptr_t u_stack_base, 98 uint32_t u_stack_size ) 99 { 100 error_t error; 101 thread_t * thread; // pointer on thread descriptor 86 static thread_t * thread_alloc() 87 { 102 88 page_t * page; // pointer on page descriptor containing thread descriptor 103 89 kmem_req_t req; // kmem request 104 trdid_t trdid; // allocated thread identifier105 106 cluster_t * local_cluster = LOCAL_CLUSTER;107 90 108 91 // allocates memory for thread descriptor + kernel stack 109 92 req.type = KMEM_PAGE; 110 req.size = CONFIG_THREAD_ PAGE_ORDER;93 req.size = CONFIG_THREAD_DESC_ORDER; 111 94 req.flags = AF_KERNEL | AF_ZERO; 112 95 page = kmem_alloc( &req ); 113 if( page == NULL ) return ENOMEM; 114 115 // get pointer on new thread descriptor 116 thread = (thread_t *)ppm_page2base( page ); 117 118 // register new thread in local process descriptor, and get a TRDID 96 97 // return pointer on new thread descriptor 98 if( page == NULL ) 99 { 100 printk("\n[ERROR] in %s : no memory for thread descriptor\n", __FUNCTION__ ); 101 return NULL; 102 } 103 else 104 { 105 return (thread_t *)ppm_page2base( page ); 106 } 107 } // end thread_alloc() 108 109 ///////////////////////////////////////////////////////////////////////////////////// 110 // This static function initializes a thread descriptor (kernel or user). 111 // It can be called by the four functions: 112 // - thread_user_create() 113 // - thread_user_fork() 114 // - thread_kernel_create() 115 // - thread_user_init() 116 ///////////////////////////////////////////////////////////////////////////////////// 117 // @ thread : pointer on thread descriptor 118 // @ process : pointer on process descriptor. 119 // @ type : thread type. 120 // @ func : pointer on thread entry function. 121 // @ args : pointer on thread entry function arguments. 122 // @ core_lid : target core local index. 123 // @ u_stack_base : stack base (user thread only) 124 // @ u_stack_size : stack base (user thread only) 125 ///////////////////////////////////////////////////////////////////////////////////// 126 static error_t thread_init( thread_t * thread, 127 process_t * process, 128 thread_type_t type, 129 void * func, 130 void * args, 131 lid_t core_lid, 132 intptr_t u_stack_base, 133 uint32_t u_stack_size ) 134 { 135 error_t error; 136 trdid_t trdid; // allocated thread identifier 137 138 cluster_t * local_cluster = LOCAL_CLUSTER; 139 140 // register new thread in process descriptor, and get a TRDID 119 141 spinlock_lock( &process->th_lock ); 120 142 error = process_register_thread( process, thread , &trdid ); … … 123 145 if( error ) 124 146 { 125 // release allocated memory for thread descriptor 126 req.type = KMEM_PAGE; 127 req.ptr = page; 128 kmem_free( &req ); 129 return EAGAIN; 130 } 131 147 printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ ); 148 return EINVAL; 149 } 150 132 151 // Initialize new thread descriptor 133 152 thread->trdid = trdid; … … 138 157 thread->core = &local_cluster->core_tbl[core_lid]; 139 158 thread->process = process; 140 thread->page = page;141 159 142 160 thread->local_locks = 0; … … 149 167 thread->u_stack_size = u_stack_size; 150 168 thread->k_stack_base = (intptr_t)thread; 151 thread->k_stack_size = CONFIG_ PPM_PAGE_SIZE << CONFIG_THREAD_PAGE_ORDER;169 thread->k_stack_size = CONFIG_THREAD_DESC_SIZE; 152 170 153 171 thread->entry_func = func; // thread entry point … … 182 200 sched_register_thread( thread->core , thread ); 183 201 184 *new_thread = thread;185 202 return 0; 186 } // end thread_create() 203 204 } // end thread_init() 187 205 188 206 … … 197 215 process_t * process; // pointer to local process descriptor 198 216 lid_t core_lid; // selected core local index 199 200 thread_dmsg("\n[INFO] %s : enters\n", 201 217 kmem_req_t req; // kmem request (for release) 218 219 thread_dmsg("\n[INFO] %s : enters\n", __FUNCTION__ ); 202 220 203 221 cluster_t * local_cluster = LOCAL_CLUSTER; … … 214 232 if( process == NULL ) return ENOMEM; 215 233 216 // make allocation / initialisation 217 error = thread_create( &thread, 218 process, 219 THREAD_USER, 220 attr->entry_func, 221 attr->entry_args, 222 core_lid, 223 u_stack_base, 224 u_stack_size ); 225 if( error ) return ENOMEM; 226 227 // set LOADABLE flag / set ATTACHED flag if required 234 // allocates memory tor thread descriptor 235 thread = thread_alloc(); 236 237 if( thread == NULL ) return ENOMEM; 238 239 // initializes thread descriptor 240 error = thread_init( thread, 241 process, 242 THREAD_USER, 243 attr->entry_func, 244 attr->entry_args, 245 core_lid, 246 u_stack_base, 247 u_stack_size ); 248 249 if( error ) // release allocated memory for thread descriptor 250 { 251 req.type = KMEM_PAGE; 252 req.ptr = ppm_base2page( thread ); 253 kmem_free( &req ); 254 return EINVAL; 255 } 256 257 // set LOADABLE flag 228 258 thread->flags = THREAD_FLAG_LOADABLE; 259 260 // set DETACHED flag if required 229 261 if( attr->flags & PT_FLAG_DETACH ) thread->flags |= THREAD_FLAG_DETACHED; 230 262 … … 242 274 *new_thread = thread; 243 275 return 0; 276 244 277 } // end thread_user_create() 245 278 … … 252 285 { 253 286 error_t error; 254 thread_t * new; // pointer onthread descriptor287 thread_t * thread; // pointer on new thread descriptor 255 288 lid_t core_lid; // selected core local index 256 257 thread_dmsg("\n[INFO] %s : enters\n", 258 289 kmem_req_t req; // kmem request (for release) 290 291 thread_dmsg("\n[INFO] %s : enters\n", __FUNCTION__ ); 259 292 260 293 // select a target core in local cluster … … 264 297 thread_t * this = CURRENT_THREAD; 265 298 266 // make allocation / initialisation 267 error = thread_create( &new, 268 process, 269 THREAD_USER, 270 this->entry_func, 271 this->entry_args, 272 core_lid, 273 u_stack_base, 274 u_stack_size ); 299 // allocated memory for new thread descriptor 300 thread = thread_alloc(); 301 302 if( thread == NULL ) return ENOMEM; 303 304 // initializes thread descriptor 305 error = thread_init( thread, 306 process, 307 THREAD_USER, 308 this->entry_func, 309 this->entry_args, 310 core_lid, 311 u_stack_base, 312 u_stack_size ); 313 314 if( error ) // release allocated memory for thread descriptor 315 { 316 req.type = KMEM_PAGE; 317 req.ptr = ppm_base2page( thread ); 318 kmem_free( &req ); 319 return EINVAL; 320 } 321 322 // set ATTACHED flag if set in this thread 323 if( this->flags & THREAD_FLAG_DETACHED ) thread->flags = THREAD_FLAG_DETACHED; 324 325 // allocate & initialise CPU context from calling thread 326 error = hal_cpu_context_copy( thread , this ); 275 327 if( error ) return ENOMEM; 276 328 277 // set ATTACHED flag if set in this thread 278 if( this->signals & THREAD_FLAG_DETACHED ) new->signals = THREAD_FLAG_DETACHED; 279 280 // allocate & initialise CPU context from calling thread 281 error = hal_cpu_context_copy( new , this ); 329 // allocate & initialise FPU context from calling thread 330 error = hal_fpu_context_copy( thread , this ); 282 331 if( error ) return ENOMEM; 283 332 284 // allocate & initialise FPU context from calling thread285 error = hal_fpu_context_copy( new , this );286 if( error ) return ENOMEM;287 288 333 thread_dmsg("INFO : %s thread %x for process %x on core %d in cluster %x\n", 289 __FUNCTION__, new->trdid, process->pid, core_lid, local_cxy );290 291 *new_thread = new;334 __FUNCTION__, thread->trdid, process->pid, core_lid, local_cxy ); 335 336 *new_thread = thread; 292 337 return 0; 293 338 … … 304 349 { 305 350 error_t error; 306 thread_t * new; // pointer on new thread descriptor 307 308 thread_dmsg("\n[INFO] %s : enters for %s in cluster %x\n", 351 thread_t * thread; // pointer on new thread descriptor 352 kmem_req_t req; // kmem request (for release) 353 354 thread_dmsg("\n[INFO] %s : enters for type %s in cluster %x\n", 309 355 __FUNCTION__ , thread_type_str( type ) , local_cxy ); 310 356 … … 316 362 __FUNCTION__ , "illegal core_lid" ); 317 363 318 // make allocation / initialisation 319 error = thread_create( &new, 320 &process_zero, 321 type, 322 func, 323 args, 324 core_lid, 325 0 , 0 ); // no user stack for a kernel thread 326 if( error ) 327 { 328 printk("\n[ERROR] in %s : cannot create thread\n", __FUNCTION__ ); 329 return ENOMEM; 330 } 364 // allocated memory for new thread descriptor 365 thread = thread_alloc(); 366 367 if( thread == NULL ) return ENOMEM; 368 369 // initializes thread descriptor 370 error = thread_init( thread, 371 &process_zero, 372 type, 373 func, 374 args, 375 core_lid, 376 0 , 0 ); // no user stack for a kernel thread 377 378 if( error ) // release allocated memory for thread descriptor 379 { 380 req.type = KMEM_PAGE; 381 req.ptr = ppm_base2page( thread ); 382 kmem_free( &req ); 383 return EINVAL; 384 } 385 331 386 332 387 // allocate & initialise CPU context 333 hal_cpu_context_create( new);334 335 thread_dmsg("\n[INFO] %s : sucessfully exit / trdid = %x / core= %d\n",336 __FUNCTION__ , new->trdid , core_lid );337 338 *new_thread = new;388 hal_cpu_context_create( thread ); 389 390 thread_dmsg("\n[INFO] %s : exit in cluster %x / trdid = %x / core_lid = %d\n", 391 __FUNCTION__ , local_cxy , thread->trdid , core_lid ); 392 393 *new_thread = thread; 339 394 return 0; 340 395 341 396 } // end thread_kernel_create() 342 397 398 /////////////////////////////////////////////////// 399 error_t thread_kernel_init( thread_t * thread, 400 thread_type_t type, 401 void * func, 402 void * args, 403 lid_t core_lid ) 404 { 405 assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) || 406 (type == THREAD_IDLE) || (type == THREAD_DEV) ) , 407 __FUNCTION__ , "illegal thread type" ); 408 409 if( core_lid >= LOCAL_CLUSTER->cores_nr ) 410 { 411 printk("\n[PANIC] in %s : illegal core_lid / cores = %d / lid = %d / cxy = %x\n", 412 __FUNCTION__ , LOCAL_CLUSTER->cores_nr , core_lid , local_cxy ); 413 hal_core_sleep(); 414 } 415 416 error_t error = thread_init( thread, 417 &process_zero, 418 type, 419 func, 420 args, 421 core_lid, 422 0 , 0 ); // no user stack for a kernel thread 423 424 // allocate & initialize CPU context if success 425 if( error == 0 ) hal_cpu_context_create( thread ); 426 427 return error; 428 429 } // end thread_kernel_init() 343 430 344 431 /////////////////////////////////////////////////////////////////////////////////////// … … 592 679 593 680 594 ///////////////////////// 595 void * thread_idle_func() 596 { 681 /////////////////////// 682 void thread_idle_func() 683 { 684 lid_t lid = CURRENT_CORE->lid; 685 597 686 while( 1 ) 598 687 { 599 thread_dmsg("\n[INFO] %s : core %d in cluster %x goes to sleeping state at cycle\n",600 __FUNCTION__ , core->lid , local_cxy, hal_time_stamp() );688 thread_dmsg("\n[INFO] %s : core[%x][%d] goes to sleep at cycle %d\n", 689 __FUNCTION__ , local_cxy , lid , hal_time_stamp() ); 601 690 602 691 // force core to sleeping state 603 692 hal_core_sleep(); 604 693 605 thread_dmsg("\n[INFO] %s : core %d in cluster %x wake up at cycle %d\n", 606 __FUNCTION__ , core->lid , local_cxy , hal_time_stamp() ); 607 608 // force scheduling at wake-up 694 thread_dmsg("\n[INFO] %s : core[%x][%d] wake up at cycle %d\n", 695 __FUNCTION__ , local_cxy , lid , hal_time_stamp() ); 696 697 // acknowledge IRQ 698 dev_icu_irq_handler(); 699 700 // force scheduling 609 701 sched_yield(); 610 702 } -
trunk/kernel/kern/thread.h
r5 r14 158 158 core_t * core; /*! pointer to the owner core */ 159 159 process_t * process; /*! pointer on local process descriptor */ 160 page_t * page; /*! pointer on page desc. containing thread */161 160 162 161 uint32_t local_locks; /*! number of local locks owned by thread */ … … 266 265 /*************************************************************************************** 267 266 * This function allocates memory for a kernel thread descriptor in the local cluster, 268 * and initialise it from arguments values. 267 * and initialise it from arguments values, calling the thread_kernel_init() function, 268 * that also allocates and initializes the CPU context. 269 269 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start. 270 270 *************************************************************************************** … … 283 283 284 284 /*************************************************************************************** 285 * This function initialises an existing kernel thread descriptor from arguments values. 286 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start. 287 *************************************************************************************** 288 * @ thread : pointer on existing thread descriptor. 289 * @ type : kernel thread type. 290 * @ func : pointer on function. 291 * @ args : function arguments. 292 * @ core_lid : local core index. 293 * @ returns 0 if success / returns EINVAL if error 294 **************************************************************************************/ 295 error_t thread_kernel_init( thread_t * thread, 296 thread_type_t type, 297 void * func, 298 void * args, 299 lid_t core_lid ); 300 301 /*************************************************************************************** 285 302 * This function releases the physical memory allocated for a thread descriptor 286 303 * in the local cluster. It can be used for both an user and a kernel thread. … … 293 310 294 311 /*************************************************************************************** 295 * This function defines the code of the thread executed when no other thread is 296 * runnable for a given core. It try to force the core to the low-power state. 297 **************************************************************************************/ 298 void * thread_idle_func(); 312 * This function defines the code of the thread executed by all cores after kernel_init, 313 * or when no other thread is runnable for a given core. 314 * 315 * TODO: In the TSAR architecture, it enters an infinite loop, in wich it forces 316 * the core in sleep (low-power) mode. Any IRQ will force the core to exit this sleep 317 * mode, but no ISR is executed. 318 * TODO: We must analyse if we have the same behaviour for I86 architectures... 319 **************************************************************************************/ 320 void thread_idle_func(); 299 321 300 322 /*************************************************************************************** -
trunk/kernel/kernel.ld
r11 r14 26 26 { 27 27 *(.kinfo) 28 *(.kidle) 28 29 *(.kdata*) 29 30 *(.data*) -
trunk/kernel/libk/bits.h
r11 r14 26 26 #define _BITS_H_ 27 27 28 #include < almos_config.h>28 #include <kernel_config.h> 29 29 #include <hal_types.h> 30 30 -
trunk/kernel/libk/elf.c
r1 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_uspace.h> -
trunk/kernel/libk/list.h
r1 r14 26 26 #define _ALMOS_LIST_H_ 27 27 28 #include < almos_config.h>28 #include <kernel_config.h> 29 29 #include <hal_types.h> 30 30 -
trunk/kernel/libk/readlock.c
r1 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_atomic.h> -
trunk/kernel/libk/readlock.h
r1 r14 25 25 #define _READLOCK_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 -
trunk/kernel/libk/remote_barrier.c
r1 r14 42 42 else expected = 0; 43 43 44 // increment count44 // atomically increment current 45 45 uint32_t current = hal_remote_atomic_add( XPTR( cxy , &ptr->current ) , 1 ); 46 46 -
trunk/kernel/libk/remote_barrier.h
r1 r14 25 25 #define _REMOTE_BARRIER_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 -
trunk/kernel/libk/remote_fifo.h
r1 r14 25 25 #define _REMOTE_FIFO_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <errno.h> -
trunk/kernel/libk/remote_rwlock.h
r1 r14 25 25 #define _REMOTE_RWLOCK_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <xlist.h> -
trunk/kernel/libk/remote_spinlock.h
r11 r14 26 26 #define _REMOTE_SPINLOCK_H_ 27 27 28 #include < almos_config.h>28 #include <kernel_config.h> 29 29 #include <hal_types.h> 30 30 #include <xlist.h> -
trunk/kernel/libk/rwlock.c
r1 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_atomic.h> -
trunk/kernel/libk/rwlock.h
r1 r14 25 25 #define _RWLOCK_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 -
trunk/kernel/libk/spinlock.c
r11 r14 23 23 */ 24 24 25 #include < almos_config.h>25 #include <kernel_config.h> 26 26 #include <hal_types.h> 27 27 #include <hal_atomic.h> -
trunk/kernel/libk/spinlock.h
r11 r14 26 26 #define _SPINLOCK_H_ 27 27 28 #include < almos_config.h>28 #include <kernel_config.h> 29 29 #include <hal_types.h> 30 30 #include <list.h> -
trunk/kernel/libk/xhtab.c
r1 r14 22 22 */ 23 23 24 #include < almos_config.h>24 #include <kernel_config.h> 25 25 #include <hal_types.h> 26 26 #include <hal_special.h> -
trunk/kernel/libk/xhtab.h
r1 r14 25 25 #define _XHTAB_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <remote_rwlock.h> -
trunk/kernel/libk/xlist.h
r1 r14 25 25 #define _ALMOS_XLIST_H_ 26 26 27 #include < almos_config.h>27 #include <kernel_config.h> 28 28 #include <hal_types.h> 29 29 #include <hal_remote.h> -
trunk/kernel/mm/kcm.c
r7 r14 23 23 */ 24 24 25 #include < almos_config.h>25 #include <kernel_config.h> 26 26 #include <hal_types.h> 27 27 #include <hal_special.h> -
trunk/kernel/mm/khm.c
r7 r14 23 23 */ 24 24 25 #include < almos_config.h>25 #include <kernel_config.h> 26 26 #include <hal_types.h> 27 27 #include <hal_special.h> … … 62 62 khm->size = heap_size; 63 63 khm->next = (intptr_t)heap_base; 64 65 kinit_dmsg("\n[INFO] %s done in cluster %x at cycle %d\n",66 __FUNCTION__ , local_cxy , hal_time_stamp() );67 64 } 68 65 -
trunk/kernel/mm/khm.h
r1 r14 27 27 #define _HEAP_MANAGER_H_ 28 28 29 #include < almos_config.h>29 #include <kernel_config.h> 30 30 #include <hal_types.h> 31 31 #include <spinlock.h> -
trunk/kernel/mm/kmem.c
r7 r14 24 24 */ 25 25 26 #include < almos_config.h>26 #include <kernel_config.h> 27 27 #include <hal_types.h> 28 28 #include <hal_special.h> -
trunk/kernel/mm/mapper.c
r1 r14 23 23 */ 24 24 25 #include < almos_config.h>25 #include <kernel_config.h> 26 26 #include <hal_types.h> 27 27 #include <hal_special.h> -
trunk/kernel/mm/page.h
r1 r14 26 26 #define _PAGE_H_ 27 27 28 #include < almos_config.h>28 #include <kernel_config.h> 29 29 #include <hal_types.h> 30 30 #include <spinlock.h> -
trunk/kernel/mm/ppm.c
r7 r14 23 23 */ 24 24 25 #include < almos_config.h>25 #include <kernel_config.h> 26 26 #include <hal_types.h> 27 27 #include <hal_special.h> … … 159 159 } 160 160 161 #if( CONFIG_PPM_DEBUG )162 ppm_print( ppm , "after reset" );163 #endif164 165 161 // initialize dirty_list as empty 166 162 list_root_init( &ppm->dirty_root ); … … 209 205 ppm_assert_order( ppm ); 210 206 211 kinit_dmsg("\n[INFO] %s : done in cluster %x at cycle %d\n",212 __FUNCTION__ , local_cxy , hal_time_stamp() );213 214 #if( CONFIG_PPM_DEBUG )215 ppm_print( ppm , "after init" );216 #endif217 218 207 } // end ppm_init() 219 208 -
trunk/kernel/mm/vmm.c
r1 r14 24 24 */ 25 25 26 #include < almos_config.h>26 #include <kernel_config.h> 27 27 #include <hal_types.h> 28 28 #include <hal_special.h> -
trunk/kernel/syscalls/sys_exec.c
r1 r14 23 23 */ 24 24 25 #include < almos_config.h>25 #include <kernel_config.h> 26 26 #include <hal_types.h> 27 27 #include <errno.h> -
trunk/kernel/syscalls/sys_thread_create.c
r1 r14 24 24 */ 25 25 26 #include < almos_config.h>26 #include <kernel_config.h> 27 27 #include <hal_types.h> 28 28 #include <printk.h> -
trunk/kernel/vfs/vfs.c
r10 r14 24 24 25 25 26 #include < almos_config.h>26 #include <kernel_config.h> 27 27 #include <hal_types.h> 28 28 #include <hal_atomic.h> -
trunk/kernel/vfs/vfs.h
r10 r14 26 26 #define _VFS_H_ 27 27 28 #include < almos_config.h>28 #include <kernel_config.h> 29 29 #include <hal_types.h> 30 30 #include <hal_atomic.h>
Note: See TracChangeset
for help on using the changeset viewer.