Changeset 279 for trunk/kernel
- Timestamp:
- Jul 27, 2017, 12:23:29 AM (7 years ago)
- Location:
- trunk/kernel
- Files:
-
- 31 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_dma.c
r262 r279 99 99 100 100 // register command in calling thread descriptor 101 this-> command.dma.dev_xp = dev_xp;102 this-> command.dma.dst_xp = dst_xp;103 this-> command.dma.src_xp = src_xp;104 this-> command.dma.size = size;101 this->dma_cmd.dev_xp = dev_xp; 102 this->dma_cmd.dst_xp = dst_xp; 103 this->dma_cmd.src_xp = src_xp; 104 this->dma_cmd.size = size; 105 105 106 106 // register client thread in waiting queue, activate server thread … … 110 110 111 111 dma_dmsg("\n[INFO] %s : completes for thread %x / error = %d\n", 112 __FUNCTION__ , this->trdid , this-> command.dma.error );112 __FUNCTION__ , this->trdid , this->dma_cmd.error ); 113 113 114 114 // return I/O operation status from calling thread descriptor 115 return this-> command.dma.error;115 return this->dma_cmd.error; 116 116 117 117 } // dev_dma_remote_memcpy() -
trunk/kernel/devices/dev_fbf.c
r214 r279 120 120 // It builds and registers the command in the calling thread descriptor, after 121 121 // translation of buffer virtual address to physical address. 122 // Then, it registers the calling thead in chdev waiting queue.122 // Then, it registers the calling thead in the relevant DMA chdev waiting queue. 123 123 // Finally it blocks on the THREAD_BLOCKED_DEV condition and deschedule. 124 124 ////////////////////////////////////i///////////////////////////////////////////// -
trunk/kernel/devices/dev_iob.h
r14 r279 34 34 * The IOB device is used to access external peripherals. It implements an IO-MMU service 35 35 * for DMA transactions launched by DMA capable external peripherals. 36 * 36 37 * This IOB peripheral is acting as a dynamically configurable bridge, used for others 37 38 * I/O operations. Therefore, ALMOS-MKH does not use the IOB device waiting queue, -
trunk/kernel/devices/dev_ioc.c
r238 r279 116 116 117 117 // register command in calling thread descriptor 118 this-> command.ioc.dev_xp = dev_xp;119 this-> command.ioc.type = cmd_type;120 this-> command.ioc.buf_xp = XPTR( local_cxy , buffer );121 this-> command.ioc.lba = lba;122 this-> command.ioc.count = count;118 this->ioc_cmd.dev_xp = dev_xp; 119 this->ioc_cmd.type = cmd_type; 120 this->ioc_cmd.buf_xp = XPTR( local_cxy , buffer ); 121 this->ioc_cmd.lba = lba; 122 this->ioc_cmd.count = count; 123 123 124 124 // register client thread in IOC chdev waiting queue, activate server thread, … … 130 130 " completes / error = %d / at cycle %d\n", 131 131 __FUNCTION__ , this->trdid , this->process->pid , 132 this-> command.ioc.error , hal_get_cycles() );132 this->ioc_cmd.error , hal_get_cycles() ); 133 133 134 134 // return I/O operation status 135 return this-> command.ioc.error;135 return this->ioc_cmd.error; 136 136 137 137 } // end dev_ioc_access() … … 158 158 uint32_t count ) 159 159 { 160 ioc_dmsg("\n[INFO] %s : enter in cluster %x\n", 161 __FUNCTION__ , local_cxy ); 162 160 163 // get pointer on calling thread 161 164 thread_t * this = CURRENT_THREAD; … … 165 168 166 169 // get extended pointer on IOC[0] chdev 167 xptr_t dev_xp = chdev_dir.ioc[0];168 169 assert( ( dev_xp != XPTR_NULL) , __FUNCTION__ , "undefined IOC chdev descriptor" );170 xptr_t ioc_xp = chdev_dir.ioc[0]; 171 172 assert( (ioc_xp != XPTR_NULL) , __FUNCTION__ , "undefined IOC chdev descriptor" ); 170 173 171 174 // register command in calling thread descriptor 172 this-> command.ioc.dev_xp = dev_xp;173 this-> command.ioc.type = IOC_SYNC_READ;174 this-> command.ioc.buf_xp = XPTR( local_cxy , buffer );175 this-> command.ioc.lba = lba;176 this-> command.ioc.count = count;175 this->ioc_cmd.dev_xp = ioc_xp; 176 this->ioc_cmd.type = IOC_SYNC_READ; 177 this->ioc_cmd.buf_xp = XPTR( local_cxy , buffer ); 178 this->ioc_cmd.lba = lba; 179 this->ioc_cmd.count = count; 177 180 178 181 // get driver command function 179 cxy_t dev_cxy = GET_CXY( dev_xp ); 180 chdev_t * dev_ptr = (chdev_t *)GET_PTR( dev_xp ); 181 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ) ); 182 cxy_t ioc_cxy = GET_CXY( ioc_xp ); 183 chdev_t * ioc_ptr = (chdev_t *)GET_PTR( ioc_xp ); 184 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) ); 185 186 // get core local index for the core handling the IOC IRQ 187 thread_t * server = (thread_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->server ) ); 188 core_t * core = (core_t *)hal_remote_lpt( XPTR( ioc_cxy , &server->core ) ); 189 lid_t lid = (lid_t)hal_remote_lw( XPTR( ioc_cxy , &core->lid ) ); 182 190 183 191 // mask the IRQ 184 thread_t * server = (thread_t *)hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->server ));185 core_t * core = (core_t *)hal_remote_lpt( XPTR( dev_cxy , &server->core ) ); 186 lid_t lid = (lid_t)hal_remote_lw( XPTR( dev_cxy , &core->lid ) );187 dev_pic_disable_irq( lid , dev_xp);188 189 // call d irectly driver command192 dev_pic_disable_irq( lid , ioc_xp ); 193 194 ioc_dmsg("\n[INFO] %s : coucou 3\n", 195 __FUNCTION__ ); 196 197 // call driver function 190 198 cmd( XPTR( local_cxy , this ) ); 191 199 192 200 // unmask the IRQ 193 dev_pic_enable_irq( lid , dev_xp ); 201 dev_pic_enable_irq( lid , ioc_xp ); 202 203 ioc_dmsg("\n[INFO] %s : exit in cluster %x\n", 204 __FUNCTION__ , local_cxy ); 194 205 195 206 // return I/O operation status from calling thread descriptor 196 return this->command.ioc.error; 197 } 198 207 return this->ioc_cmd.error; 208 209 } // end ioc_sync_read() 210 -
trunk/kernel/devices/dev_mmc.c
r257 r279 65 65 { 66 66 // get extended pointer on MMC device descriptor 67 xptr_t dev_xp = this-> command.mmc.dev_xp;67 xptr_t dev_xp = this->mmc_cmd.dev_xp; 68 68 69 69 assert( (dev_xp != XPTR_NULL) , __FUNCTION__ , "target MMC device undefined" ); … … 86 86 87 87 // return operation status 88 return this-> command.mmc.error;88 return this->mmc_cmd.error; 89 89 90 90 } // end dev_mmc_access() … … 116 116 117 117 // store command arguments in thread descriptor 118 this-> command.mmc.dev_xp = chdev_dir.mmc[buf_cxy];119 this-> command.mmc.type = MMC_CC_INVAL;120 this-> command.mmc.buf_paddr = buf_paddr;121 this-> command.mmc.buf_size = buf_size;118 this->mmc_cmd.dev_xp = chdev_dir.mmc[buf_cxy]; 119 this->mmc_cmd.type = MMC_CC_INVAL; 120 this->mmc_cmd.buf_paddr = buf_paddr; 121 this->mmc_cmd.buf_size = buf_size; 122 122 123 123 // call MMC driver … … 156 156 157 157 // store command arguments in thread descriptor 158 this-> command.mmc.dev_xp = chdev_dir.mmc[buf_cxy];159 this-> command.mmc.type = MMC_CC_SYNC;160 this-> command.mmc.buf_paddr = buf_paddr;161 this-> command.mmc.buf_size = buf_size;158 this->mmc_cmd.dev_xp = chdev_dir.mmc[buf_cxy]; 159 this->mmc_cmd.type = MMC_CC_SYNC; 160 this->mmc_cmd.buf_paddr = buf_paddr; 161 this->mmc_cmd.buf_size = buf_size; 162 162 163 163 // call MMC driver … … 179 179 180 180 // store command arguments in thread descriptor 181 this-> command.mmc.dev_xp = chdev_dir.mmc[cxy];182 this-> command.mmc.type = MMC_SET_ERROR;183 this-> command.mmc.reg_index = index;184 this-> command.mmc.reg_ptr = &wdata;181 this->mmc_cmd.dev_xp = chdev_dir.mmc[cxy]; 182 this->mmc_cmd.type = MMC_SET_ERROR; 183 this->mmc_cmd.reg_index = index; 184 this->mmc_cmd.reg_ptr = &wdata; 185 185 186 186 // execute operation … … 197 197 198 198 // store command arguments in thread descriptor 199 this-> command.mmc.dev_xp = chdev_dir.mmc[cxy];200 this-> command.mmc.type = MMC_GET_ERROR;201 this-> command.mmc.reg_index = index;202 this-> command.mmc.reg_ptr = rdata;199 this->mmc_cmd.dev_xp = chdev_dir.mmc[cxy]; 200 this->mmc_cmd.type = MMC_GET_ERROR; 201 this->mmc_cmd.reg_index = index; 202 this->mmc_cmd.reg_ptr = rdata; 203 203 204 204 // execute operation … … 215 215 216 216 // store command arguments in thread descriptor 217 this-> command.mmc.dev_xp = chdev_dir.mmc[cxy];218 this-> command.mmc.type = MMC_GET_INSTRU;219 this-> command.mmc.reg_index = index;220 this-> command.mmc.reg_ptr = rdata;217 this->mmc_cmd.dev_xp = chdev_dir.mmc[cxy]; 218 this->mmc_cmd.type = MMC_GET_INSTRU; 219 this->mmc_cmd.reg_index = index; 220 this->mmc_cmd.reg_ptr = rdata; 221 221 222 222 // execute operation -
trunk/kernel/devices/dev_nic.c
r259 r279 110 110 111 111 // initialize command in thread descriptor 112 thread_ptr-> command.nic.dev_xp = dev_xp;112 thread_ptr->nic_cmd.dev_xp = dev_xp; 113 113 114 114 // call driver to test readable 115 thread_ptr-> command.nic.cmd = NIC_CMD_READABLE;116 dev_ptr->cmd( thread_xp ); 117 118 // check error 119 error = thread_ptr-> command.nic.error;115 thread_ptr->nic_cmd.cmd = NIC_CMD_READABLE; 116 dev_ptr->cmd( thread_xp ); 117 118 // check error 119 error = thread_ptr->nic_cmd.error; 120 120 if( error ) return error; 121 121 122 122 // block and deschedule if queue non readable 123 if( thread_ptr-> command.nic.status == false )123 if( thread_ptr->nic_cmd.status == false ) 124 124 { 125 125 // enable NIC-RX IRQ … … 135 135 136 136 // call driver for actual read 137 thread_ptr-> command.nic.cmd = NIC_CMD_READ;138 thread_ptr-> command.nic.buffer = pkd->buffer;139 dev_ptr->cmd( thread_xp ); 140 141 // check error 142 error = thread_ptr-> command.nic.error;137 thread_ptr->nic_cmd.cmd = NIC_CMD_READ; 138 thread_ptr->nic_cmd.buffer = pkd->buffer; 139 dev_ptr->cmd( thread_xp ); 140 141 // check error 142 error = thread_ptr->nic_cmd.error; 143 143 if( error ) return error; 144 144 145 145 // returns packet length 146 pkd->length = thread_ptr-> command.nic.length;146 pkd->length = thread_ptr->nic_cmd.length; 147 147 148 148 nic_dmsg("\n[INFO] %s exit for NIC-RX thread on core %d in cluster %x\n", … … 180 180 181 181 // initialize command in thread descriptor 182 thread_ptr-> command.nic.dev_xp = dev_xp;182 thread_ptr->nic_cmd.dev_xp = dev_xp; 183 183 184 184 // call driver to test writable 185 thread_ptr-> command.nic.cmd = NIC_CMD_WRITABLE;186 dev_ptr->cmd( thread_xp ); 187 188 // check error 189 error = thread_ptr-> command.nic.error;185 thread_ptr->nic_cmd.cmd = NIC_CMD_WRITABLE; 186 dev_ptr->cmd( thread_xp ); 187 188 // check error 189 error = thread_ptr->nic_cmd.error; 190 190 if( error ) return error; 191 191 192 192 // block and deschedule if queue non writable 193 if( thread_ptr-> command.nic.status == false )193 if( thread_ptr->nic_cmd.status == false ) 194 194 { 195 195 // enable NIC-TX IRQ … … 205 205 206 206 // call driver for actual write 207 thread_ptr-> command.nic.cmd = NIC_CMD_WRITE;208 thread_ptr-> command.nic.buffer = pkd->buffer;209 thread_ptr-> command.nic.length = pkd->length;210 dev_ptr->cmd( thread_xp ); 211 212 // check error 213 error = thread_ptr-> command.nic.error;207 thread_ptr->nic_cmd.cmd = NIC_CMD_WRITE; 208 thread_ptr->nic_cmd.buffer = pkd->buffer; 209 thread_ptr->nic_cmd.length = pkd->length; 210 dev_ptr->cmd( thread_xp ); 211 212 // check error 213 error = thread_ptr->nic_cmd.error; 214 214 if( error ) return error; 215 215 -
trunk/kernel/devices/dev_pic.c
r252 r279 26 26 #include <chdev.h> 27 27 #include <printk.h> 28 #include <thread.h> 28 29 #include <hal_drivers.h> 29 30 #include <dev_pic.h> … … 82 83 xptr_t src_chdev_xp ) 83 84 { 85 irq_dmsg("\n[INFO] %s : core = [%x,%d] / source_chdev_xp = %l\n", 86 __FUNCTION__ , local_cxy , lid , src_chdev_xp ); 87 84 88 // get pointer on PIC chdev 85 89 chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic ); … … 97 101 xptr_t src_chdev_xp ) 98 102 { 103 irq_dmsg("\n[INFO] %s : core = [%x,%d] / source_chdev_xp = %l\n", 104 __FUNCTION__ , local_cxy , lid , src_chdev_xp ); 105 99 106 // get pointer on PIC chdev 100 107 chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic ); … … 111 118 void dev_pic_enable_timer( uint32_t period ) 112 119 { 120 irq_dmsg("\n[INFO] %s : core = [%x,%d] / period = %d\n", 121 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , period ); 122 113 123 // get pointer on PIC chdev 114 124 chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic ); … … 122 132 } 123 133 134 ///////////////////////// 135 void dev_pic_enable_ipi() 136 { 137 irq_dmsg("\n[INFO] %s : core = [%x,%d]\n", 138 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 139 140 // get pointer on PIC chdev 141 chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic ); 142 cxy_t pic_cxy = GET_CXY( chdev_dir.pic ); 143 144 // get pointer on enable_timer function 145 enable_ipi_t * f = hal_remote_lpt( XPTR( pic_cxy , &pic_ptr->ext.pic.enable_ipi ) ); 146 147 // call relevant driver function 148 f(); 149 } 150 124 151 ////////////////////////////////// 125 152 void dev_pic_send_ipi( cxy_t cxy, 126 153 lid_t lid ) 127 154 { 155 irq_dmsg("\n[INFO] %s : enter / src_core = [%x,%d] / dst_core = [%x,%d] / cycle = %d\n", 156 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, hal_time_stamp() ); 157 128 158 // get pointer on PIC chdev 129 159 chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic ); … … 135 165 // call relevant driver function 136 166 f( cxy , lid ); 167 168 irq_dmsg("\n[INFO] %s : exit / src_core = [%x,%d] / dst_core = [%x,%d] / cycle = %d\n", 169 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, hal_time_stamp() ); 137 170 } 138 171 -
trunk/kernel/devices/dev_pic.h
r205 r279 34 34 * to route a given IRQ to a given core, in a given cluster, and to help the interrupt 35 35 * handler to select and execute the relevant ISR (Interrupt Service Routine). 36 * It handles the following type of interrupts: 37 * - External IRQs generated by the external (shared) peripherals. 38 * - Internal IRQs generated by the internal (replicated) peripherals. 39 * - Timer IRQs generated by the timers (one timer per core). 40 * - Inter Processor IRQs (IPI) generated by software. 41 * 42 * In most supported manycores architectures, the PIC device contains two types 36 * It handles the four following types of interrupts: 37 * 38 * 1) EXT_IRQ (External IRQ) generated by the external (shared) peripherals. 39 * 2) INT_IRQ (Internal IRQ) generated by the internal (replicated) peripherals. 40 * 3) TIM_IRQ (Timer IRQ) generated by the timers (one timer per core). 41 * 4) IPI_IRQ (Inter Processor IRQ) generated by software (one IPI per core). 42 * 43 * In supported manycores architectures, the PIC device contains two types 43 44 * of hardware components: 44 45 * - the IOPIC is an external component, handling all external peripherals IRQs. … … 54 55 * at kernel initialization. 55 56 * 56 * The PIC device defines 4generic commands that can be used by each kernel instance,57 * The PIC device defines generic commands that can be used by each kernel instance, 57 58 * - to create in local cluster the PIC implementation specific interupt vector(s), 58 59 * - to bind a given IRQ (internal or external IRQ to a given core in the local cluster, … … 64 65 * cluster manager or to the core descriptors to register the interrupt vectors 65 66 * used by the kernel to select the relevant ISR when an interrupt is received 66 * by a given core in a given cluster.67 * by a given core in a given cluster. 67 68 68 69 * This PIC device does not execute itself I/O operations. It is just acting as a … … 87 88 typedef void (disable_irq_t) ( lid_t lid , xptr_t src_chdev_xp ); 88 89 typedef void (enable_timer_t) ( uint32_t period ); 90 typedef void (enable_ipi_t) ( ); 89 91 typedef void (send_ipi_t) ( cxy_t cxy , lid_t lid ); 90 92 typedef void (extend_init_t) ( uint32_t * lapic_base ); … … 96 98 disable_irq_t * disable_irq; /*! pointer on the driver "disable_irq" function */ 97 99 enable_timer_t * enable_timer; /*! pointer on the driver "enable_timer" function */ 100 enable_timer_t * enable_ipi; /*! pointer on the driver "enable_ipi" function */ 98 101 send_ipi_t * send_ipi; /*! pointer on the driver "send_ipi" function */ 99 102 extend_init_t * extend_init; /*! pointer on the driver "init_extend" function */ … … 186 189 187 190 /***************************************************************************************** 188 * This function enables remote IRQ generated by a remote chdev, defined by the191 * This function enables the IRQ generated by a remote chdev, defined by the 189 192 * <src_chdev_xp> argument. It can be called by any thread running in any cluster, 190 193 * and can be used for both internal & external IRQs. … … 199 202 * This function disables remote IRQ generated by a remote chdev, defined by the 200 203 * <src_chdev_xp> argument. It can be called by any thread running in any cluster, 201 * and can be used for both internal & external IRQs.204 * and can be used for both INT_IRq & EXT_IRQ. 202 205 ***************************************************************************************** 203 206 * @ lid : target core local index (in cluster containing the source chdev). … … 208 211 209 212 /***************************************************************************************** 210 * This function activates the TI CK timerfor the calling core.211 * The <period> argument define the number of cycles between IRQs.213 * This function activates the TIM_IRQ for the calling core. 214 * The <period> argument define the number of cycles between twoo successive IRQs. 212 215 ***************************************************************************************** 213 216 * @ period : number of cycles between IRQs. 214 217 ****************************************************************************************/ 215 218 void dev_pic_enable_timer( uint32_t period ); 219 220 /***************************************************************************************** 221 * This function activates the IPI_IRQ for the calling core. 222 ****************************************************************************************/ 223 void dev_pic_enable_ipi(); 216 224 217 225 /***************************************************************************************** -
trunk/kernel/devices/dev_txt.c
r255 r279 112 112 113 113 // register command in calling thread descriptor 114 this-> command.txt.dev_xp = dev_xp;115 this-> command.txt.type = type;116 this-> command.txt.buf_xp = XPTR( local_cxy , buffer );117 this-> command.txt.count = count;114 this->txt_cmd.dev_xp = dev_xp; 115 this->txt_cmd.type = type; 116 this->txt_cmd.buf_xp = XPTR( local_cxy , buffer ); 117 this->txt_cmd.count = count; 118 118 119 119 // register client thread in waiting queue, activate server thread … … 123 123 124 124 txt_dmsg("\n[INFO] in %s : thread %x in process %x completes / error = %d\n", 125 __FUNCTION__ , this->trdid , this->process->pid , this-> command.txt.error );125 __FUNCTION__ , this->trdid , this->process->pid , this->txt_cmd.error ); 126 126 127 127 // return I/O operation status from calling thread descriptor 128 return this-> command.txt.error;128 return this->txt_cmd.error; 129 129 } 130 130 … … 157 157 assert( (dev_xp != XPTR_NULL) , __FUNCTION__ , "undefined TXT0 chdev descriptor" ); 158 158 159 // register command in calling thread 160 this-> command.txt.dev_xp = dev_xp;161 this-> command.txt.type = TXT_SYNC_WRITE;162 this-> command.txt.buf_xp = XPTR( local_cxy , buffer );163 this-> command.txt.count = count;159 // register command in calling thread descriptor 160 this->txt_cmd.dev_xp = dev_xp; 161 this->txt_cmd.type = TXT_SYNC_WRITE; 162 this->txt_cmd.buf_xp = XPTR( local_cxy , buffer ); 163 this->txt_cmd.count = count; 164 164 165 165 // get driver command function … … 168 168 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ) ); 169 169 170 // call d irectly driver command170 // call driver function 171 171 cmd( XPTR( local_cxy , this ) ); 172 172 173 173 // return I/O operation status from calling thread descriptor 174 return this-> command.txt.error;174 return this->txt_cmd.error; 175 175 } 176 176 -
trunk/kernel/kern/chdev.h
r249 r279 169 169 xptr_t nic_tx[CONFIG_MAX_NIC_CHANNELS]; // external / multi-channels / shared 170 170 171 xptr_t icu[CONFIG_MAX_CLUSTERS]; // internal / single channel / shared172 171 xptr_t mmc[CONFIG_MAX_CLUSTERS]; // internal / single channel / shared 173 172 -
trunk/kernel/kern/cluster.c
r124 r279 77 77 // initialize cluster local parameters 78 78 cluster->cores_nr = info->cores_nr; 79 cluster->cores_in_kernel = info->cores_nr; // all cpus start in kernel mode79 cluster->cores_in_kernel = 0; 80 80 81 81 // initialize the lock protecting the embedded kcm allocator … … 130 130 // initialises RPC fifo 131 131 rpc_fifo_init( &cluster->rpc_fifo ); 132 cluster->rpc_threads = 0; 132 133 133 134 cluster_dmsg("\n[INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n", -
trunk/kernel/kern/cluster.h
r188 r279 91 91 * This structure defines a cluster manager. 92 92 * It contains both global platform information, and cluster specific resources 93 * managed by the local kernel instance.93 * controled by the local kernel instance. 94 94 ******************************************************************************************/ 95 95 … … 99 99 100 100 // global parameters 101 102 101 uint32_t paddr_width; /*! numer of bits in physical address */ 103 102 uint32_t x_width; /*! number of bits to code x_size (can be 0) */ … … 109 108 110 109 // local parameters 111 112 110 uint32_t cores_nr; /*! number of cores in cluster */ 113 111 uint32_t cores_in_kernel; /*! number of cores currently in kernel mode */ 114 112 113 uint32_t ram_size; /*! physical memory size */ 114 uint32_t ram_base; /*! physical memory base (local address) */ 115 115 116 core_t core_tbl[CONFIG_MAX_LOCAL_CORES]; /*! embedded cores */ 116 117 118 list_entry_t dev_root; /*! root of list of devices in cluster */ 119 120 // memory allocators 117 121 ppm_t ppm; /*! embedded kernel page manager */ 118 122 khm_t khm; /*! embedded kernel heap manager */ 119 123 kcm_t kcm; /*! embedded kernel cache manager (for KCMs) */ 120 121 124 kcm_t * kcm_tbl[KMEM_TYPES_NR]; /*! pointers on allocated KCMs */ 122 125 123 uint32_t ram_size; /*! physical memory size */ 124 uint32_t ram_base; /*! physical memory base (local address) */ 125 126 rpc_fifo_t rpc_fifo; /*! cluster RPC fifo (shared) */ 127 list_entry_t devlist; /*! root of list of devices in cluster */ 128 126 // RPC 127 rpc_fifo_t rpc_fifo; /*! RPC fifo */ 128 uint32_t rpc_threads; /*! current number of RPC threads */ 129 130 // DQDT 129 131 int32_t pages_var; /*! pages number increment from last DQQT update */ 130 132 int32_t threads_var; /*! threads number increment from last DQDT update */ … … 132 134 dqdt_node_t dqdt_tbl[CONFIG_MAX_DQDT_DEPTH]; /*! embedded DQDT nodes */ 133 135 136 // Local process manager 134 137 pmgr_t pmgr; /*! embedded process manager */ 135 138 -
trunk/kernel/kern/core.c
r188 r279 50 50 core->usage = 0; 51 51 core->spurious_irqs = 0; 52 core->rpc_threads = 0;53 52 core->thread_idle = NULL; 54 53 core->fpu_owner = NULL; -
trunk/kernel/kern/core.h
r188 r279 56 56 uint32_t usage; /*! cumulated busy_percent (idle / total) */ 57 57 uint32_t spurious_irqs; /*! for instrumentation... */ 58 uint32_t rpc_threads; /*! current RPC threads number for this core */59 struct thread_s * thread_rpc; /*! pointer on current RPC thread descriptor */60 58 struct thread_s * thread_idle; /*! pointer on idle thread descriptor */ 61 59 struct thread_s * fpu_owner; /*! pointer on current FPU owner thread */ -
trunk/kernel/kern/kernel_init.c
r265 r279 28 28 #include <hal_special.h> 29 29 #include <hal_context.h> 30 #include <hal_irqmask.h> 30 31 #include <barrier.h> 31 32 #include <remote_barrier.h> … … 59 60 60 61 /////////////////////////////////////////////////////////////////////////////////////////// 61 // All the seglobal variables are replicated in all clusters.62 // All the following global variables are replicated in all clusters. 62 63 // They are initialised by the kernel_init() function. 63 64 // … … 135 136 " /_/ \\_\\ |______| |_| |_| \\_____/ |______/ |_| |_| |_| \\_\\ |_| |_| \n" 136 137 "\n\n\t\t Advanced Locality Management Operating System / Multi Kernel Hybrid\n" 137 "\n\n\t\t\t Version 0.0 : %d clusters / %d coresper cluster\n\n", nclusters , ncores );138 "\n\n\t\t\t Version 0.0 : %d cluster(s) / %d core(s) per cluster\n\n", nclusters , ncores ); 138 139 } 139 140 … … 274 275 } 275 276 276 if( local_cxy == 0 ) 277 kinit_dmsg("\n[INFO] %s created MMC chdev in cluster 0 at cycle %d\n", 278 __FUNCTION__ , local_cxy , (uint32_t)hal_time_stamp() ); 277 kinit_dmsg("\n[INFO] %s created MMC in cluster %x / chdev = %x\n", 278 __FUNCTION__ , channel , local_cxy , chdev_ptr ); 279 279 } 280 280 /////////////////////////////// … … 301 301 chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr ); 302 302 303 kinit_dmsg("\n[INFO] %s created DMA[%d] chdev in cluster 0 at cycle %d\n",304 __FUNCTION__ , channel , (uint32_t)hal_time_stamp());303 kinit_dmsg("\n[INFO] %s created DMA[%d] in cluster %x / chdev = %x\n", 304 __FUNCTION__ , channel , local_cxy , chdev_ptr ); 305 305 } 306 306 } … … 433 433 } 434 434 435 kinit_dmsg("\n[INFO] %s create chdev %s[%d] in cluster %x at cycle %d\n", 436 __FUNCTION__ , chdev_func_str( func ), channel, 437 local_cxy , (uint32_t)hal_time_stamp() ); 435 kinit_dmsg("\n[INFO] %s create chdev %s[%d] in cluster %x / chdev = %x\n", 436 __FUNCTION__ , chdev_func_str( func ), channel , local_cxy , chdev ); 438 437 439 438 } // end if match … … 658 657 } 659 658 659 //////////////////////////////////////////////////////////////////////////////////////////// 660 // This function display on TXT0 the content of the external chdev directory, 661 // in the local cluster. 662 //////////////////////////////////////////////////////////////////////////////////////////// 663 static void chdev_dir_display( ) 664 { 665 cxy_t iob_cxy = GET_CXY( chdev_dir.iob ); 666 chdev_t * iob_ptr = (chdev_t *)GET_PTR( chdev_dir.iob ); 667 xptr_t iob_base = hal_remote_lwd( XPTR( iob_cxy , &iob_ptr->base ) ); 668 669 cxy_t pic_cxy = GET_CXY( chdev_dir.pic ); 670 chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic ); 671 xptr_t pic_base = hal_remote_lwd( XPTR( pic_cxy , &pic_ptr->base ) ); 672 673 cxy_t txt0_cxy = GET_CXY( chdev_dir.txt[0] ); 674 chdev_t * txt0_ptr = (chdev_t *)GET_PTR( chdev_dir.txt[0] ); 675 xptr_t txt0_base = hal_remote_lwd( XPTR( txt0_cxy , &txt0_ptr->base ) ); 676 677 cxy_t txt1_cxy = GET_CXY( chdev_dir.txt[1] ); 678 chdev_t * txt1_ptr = (chdev_t *)GET_PTR( chdev_dir.txt[1] ); 679 xptr_t txt1_base = hal_remote_lwd( XPTR( txt1_cxy , &txt1_ptr->base ) ); 680 681 cxy_t txt2_cxy = GET_CXY( chdev_dir.txt[2] ); 682 chdev_t * txt2_ptr = (chdev_t *)GET_PTR( chdev_dir.txt[2] ); 683 xptr_t txt2_base = hal_remote_lwd( XPTR( txt2_cxy , &txt2_ptr->base ) ); 684 685 cxy_t ioc_cxy = GET_CXY( chdev_dir.ioc[0] ); 686 chdev_t * ioc_ptr = (chdev_t *)GET_PTR( chdev_dir.ioc[0] ); 687 xptr_t ioc_base = hal_remote_lwd( XPTR( ioc_cxy , &ioc_ptr->base ) ); 688 689 cxy_t fbf_cxy = GET_CXY( chdev_dir.fbf[0] ); 690 chdev_t * fbf_ptr = (chdev_t *)GET_PTR( chdev_dir.fbf[0] ); 691 xptr_t fbf_base = hal_remote_lwd( XPTR( fbf_cxy , &fbf_ptr->base ) ); 692 693 cxy_t nic_rx_cxy = GET_CXY( chdev_dir.nic_rx[0] ); 694 chdev_t * nic_rx_ptr = (chdev_t *)GET_PTR( chdev_dir.nic_rx[0] ); 695 xptr_t nic_rx_base = hal_remote_lwd( XPTR( nic_rx_cxy , &nic_rx_ptr->base ) ); 696 697 cxy_t nic_tx_cxy = GET_CXY( chdev_dir.nic_tx[0] ); 698 chdev_t * nic_tx_ptr = (chdev_t *)GET_PTR( chdev_dir.nic_tx[0] ); 699 xptr_t nic_tx_base = hal_remote_lwd( XPTR( nic_tx_cxy , &nic_tx_ptr->base ) ); 700 701 printk("\n*** external chdev directory in cluster %x\n" 702 " - iob = %l / base = %l\n" 703 " - pic = %l / base = %l\n" 704 " - txt[0] = %l / base = %l\n" 705 " - txt[1] = %l / base = %l\n" 706 " - txt[2] = %l / base = %l\n" 707 " - ioc[0] = %l / base = %l\n" 708 " - fbf[0] = %l / base = %l\n" 709 " - nic_rx[0] = %l / base = %l\n" 710 " - nic_tx[0] = %l / base = %l\n", 711 local_cxy, 712 chdev_dir.iob, iob_base, 713 chdev_dir.pic, pic_base, 714 chdev_dir.txt[0], txt0_base, 715 chdev_dir.txt[1], txt1_base, 716 chdev_dir.txt[2], txt2_base, 717 chdev_dir.ioc[0], ioc_base, 718 chdev_dir.fbf[0], fbf_base, 719 chdev_dir.nic_rx[0], nic_rx_base, 720 chdev_dir.nic_tx[0], nic_tx_base ); 721 } 722 660 723 /////////////////////////////////////////////////////////////////////////////////////////// 661 724 // This function is the entry point for the kernel initialisation. … … 683 746 684 747 error_t error; 748 uint32_t status; // running core status register 685 749 686 750 cxy_t io_cxy = info->io_cxy; … … 732 796 if( error ) 733 797 { 734 nolock_printk("\n[PANIC] in %s : illegal core identifiers"798 printk("\n[PANIC] in %s : illegal core identifiers" 735 799 " gid = %x / cxy = %x / lid = %d\n", 736 800 __FUNCTION__ , core_lid , core_cxy , core_lid ); … … 745 809 if( error ) 746 810 { 747 nolock_printk("\n[PANIC] in %s : cannot initialise cluster %x",811 printk("\n[PANIC] in %s : cannot initialise cluster %x", 748 812 __FUNCTION__ , local_cxy ); 749 813 hal_core_sleep(); … … 764 828 // STEP 2 : all CP0s initialize the process_zero descriptor. 765 829 // CP0 in cluster 0 initialises the IOPIC device. 766 // all CP0s complete the distibuted LAPIC initialization.767 830 ///////////////////////////////////////////////////////////////////////////////// 768 831 … … 777 840 if( (core_lid == 0) && (local_cxy == 0) ) iopic_init( info ); 778 841 779 // all CP0s initialize their local LAPIC extension,780 if( core_lid == 0 ) lapic_init( info );781 782 842 //////////////////////////////////////////////////////////////////////////////// 783 843 if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ), … … 791 851 792 852 //////////////////////////////////////////////////////////////////////////////// 793 // STEP 3 : all CP0s initialize their local chdev descriptors 794 // (both internal devices and external devices). 853 // STEP 3 : all CP0s complete the distibuted LAPIC initialization. 854 // all CP0s initialize their internal chdev descriptors 855 // all CP0s initialize their local external chdev descriptors 795 856 //////////////////////////////////////////////////////////////////////////////// 857 858 // all CP0s initialize their local LAPIC extension, 859 if( core_lid == 0 ) lapic_init( info ); 796 860 797 861 // CP0 scan the internal (private) peripherals, … … 818 882 819 883 ///////////////////////////////////////////////////////////////////////////////// 820 // STEP 4 : Alls cores initialize their private IDLE thread. 884 // STEP 4 : All cores enable IPI (Inter Procesor Interrupt), 885 // Alh cores initialize IDLE thread. 821 886 // Only CP0 in cluster 0 creates the VFS root inode. 822 887 // It access the boot device to initialize the file system context. 823 888 ///////////////////////////////////////////////////////////////////////////////// 824 889 825 // all cores create idle thread descriptor 890 if( CONFIG_KINIT_DEBUG ) chdev_dir_display(); 891 892 // All cores enable the shared IPI channel 893 894 // @@@ 895 hal_set_ebase( 0x1000 ); 896 // @@@ 897 898 dev_pic_enable_ipi(); 899 hal_enable_irq( &status ); 900 901 kinit_dmsg("\n[INFO] %s : IRQs enabled for core[%x,%d] / SR = %x\n", 902 __FUNCTION__ , local_cxy , core_lid , hal_get_sr() ); 903 904 // all cores create the idle thread descriptor 826 905 error = thread_kernel_init( thread, 827 906 THREAD_IDLE, … … 831 910 if( error ) 832 911 { 833 nolock_printk("\n[PANIC] in %s : core[%x][%d] cannot initialize idle thread\n",912 printk("\n[PANIC] in %s : core[%x][%d] cannot initialize idle thread\n", 834 913 __FUNCTION__ , local_cxy , core_lid ); 835 914 hal_core_sleep(); … … 860 939 fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc(); 861 940 862 nolock_assert( (fatfs_ctx != NULL) , __FUNCTION__ ,863 941 assert( (fatfs_ctx != NULL) , __FUNCTION__ , 942 "cannot create FATFS context in cluster 0\n" ); 864 943 865 944 // 2. access boot device to initialize FATFS context … … 883 962 &vfs_root_inode_xp ); // return 884 963 885 nolock_assert( (error == 0) , __FUNCTION__ ,886 964 assert( (error == 0) , __FUNCTION__ , 965 "cannot create VFS root inode\n" ); 887 966 888 967 // 5. initialize VFS context for FAT in cluster 0 … … 896 975 else 897 976 { 898 nolock_printk("\n[PANIC] in %s : root FS must be FATFS\n", __FUNCTION__ );977 printk("\n[PANIC] in %s : root FS must be FATFS\n", __FUNCTION__ ); 899 978 hal_core_sleep(); 900 979 } … … 931 1010 fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc(); 932 1011 933 nolock_assert( (fatfs_ctx != NULL) , __FUNCTION__ ,934 1012 assert( (fatfs_ctx != NULL) , __FUNCTION__ , 1013 "cannot create FATFS context\n" ); 935 1014 936 1015 // get local pointer on VFS context for FATFS … … 965 1044 ///////////////////////////////////////////////////////////////////////////////// 966 1045 967 if( (core_lid == 0) && (local_cxy == 0) )1046 // if( (core_lid == 0) && (local_cxy == 0) ) 968 1047 kinit_dmsg("\n[INFO] %s exit barrier 5 at cycle %d : VFS OK in all clusters\n", 969 1048 __FUNCTION__, (uint32_t)hal_time_stamp()); … … 986 1065 devfs_ctx_t * devfs_ctx = devfs_ctx_alloc(); 987 1066 988 nolock_assert( (devfs_ctx != NULL) , __FUNCTION__ ,989 1067 assert( (devfs_ctx != NULL) , __FUNCTION__ , 1068 "cannot create DEVFS context in cluster IO\n"); 990 1069 991 1070 // register DEVFS root and external directories … … 993 1072 } 994 1073 1074 printk("\n@@@ %s : cluster %x reach barrier 6\n", __FUNCTION__ , local_cxy ); 1075 995 1076 ///////////////////////////////////////////////////////////////////////////////// 996 1077 if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ), … … 999 1080 ///////////////////////////////////////////////////////////////////////////////// 1000 1081 1001 if( (core_lid == 0) && (local_cxy == 0) )1082 // if( (core_lid == 0) && (local_cxy == 0) ) 1002 1083 kinit_dmsg("\n[INFO] %s exit barrier 6 at cycle %d : DEVFS OK in cluster IO\n", 1003 1084 __FUNCTION__, (uint32_t)hal_time_stamp()); … … 1071 1152 print_banner( (info->x_size * info->y_size) , info->cores_nr ); 1072 1153 1073 kinit_dmsg("\n\n*** memory fooprint of main kernet objects ***\n"1154 kinit_dmsg("\n\n*** memory fooprint for main kernet objects ***\n\n" 1074 1155 " - thread descriptor : %d bytes\n" 1075 1156 " - process descriptor : %d bytes\n" … … 1114 1195 } 1115 1196 1116 // each core activates its private PTIIRQ1197 // each core activates its private TICK IRQ 1117 1198 dev_pic_enable_timer( CONFIG_SCHED_TICK_PERIOD ); 1118 1199 -
trunk/kernel/kern/printk.c
r246 r279 401 401 } 402 402 403 ////////////////////////////////////////404 void nolock_printk( char * format , ...)405 {406 va_list args;407 408 // call kernel_printf on TXT0, in busy waiting mode409 va_start( args , format );410 kernel_printf( 0 , 1 , format , &args );411 va_end( args );412 }413 414 403 /////////////////////////////////////////// 415 404 inline void assert( bool_t condition, … … 424 413 } 425 414 426 //////////////////////////////////////////////////427 inline void nolock_assert( bool_t condition,428 const char * function_name,429 char * string )430 {431 if( condition == false )432 {433 nolock_printk("\n[PANIC] in %s : %s\n" , function_name , string );434 hal_core_sleep();435 }436 }437 438 439 415 440 416 // Local Variables: -
trunk/kernel/kern/printk.h
r188 r279 74 74 75 75 /********************************************************************************** 76 * This function displays a formated string on the kernel terminal TXT0,77 * using a busy waiting policy: It calls directly the relevant TXT driver,78 * without taking the the lock protecting exclusive access to TXT0 terminal.79 **********************************************************************************80 * @ format : formated string.81 *********************************************************************************/82 void nolock_printk( char* format, ... );83 84 /**********************************************************************************85 76 * This function displays a "PANIC" message and force the calling core in 86 77 * sleeping mode if a Boolean condition is false. … … 95 86 char * string ); 96 87 97 /**********************************************************************************98 * This function displays a "PANIC" message and force the calling core in99 * sleeping mode if a Boolean condition is false,100 * without taking the the lock protecting exclusive access to TXT0 terminal.101 **********************************************************************************102 * @ condition : condition that must be true.103 * @ function_name : name of the calling function.104 * @ string : error message if condition is false.105 *********************************************************************************/106 inline void nolock_assert( bool_t condition,107 const char * function_name,108 char * string );109 110 88 /////////////////////////////////////////////////////////////////////////////////// 111 89 // Conditionnal debug macros … … 215 193 216 194 #if CONFIG_KINIT_DEBUG 217 #define kinit_dmsg(...) nolock_printk(__VA_ARGS__)195 #define kinit_dmsg(...) printk(__VA_ARGS__) 218 196 #else 219 197 #define kinit_dmsg(...) -
trunk/kernel/kern/process.c
r204 r279 90 90 pid_t parent_pid; 91 91 92 process_dmsg("\n[INFO] %s : enters for process %x in cluster %x / parent_xp = %l\n",93 __FUNCTION__ , pid , parent_xp);92 process_dmsg("\n[INFO] %s : enters for process %x in cluster %x\n", 93 __FUNCTION__ , pid , local_cxy ); 94 94 95 95 // get parent process cluster, local pointer, and pid … … 198 198 local_process->ref_xp = reference_process_xp; 199 199 200 process_dmsg("\n[INFO] %s : enter for process %x in cluster %x\n", 201 __FUNCTION__ , local_process->pid ); 202 200 203 // reset children list root (not used in a process descriptor copy) 201 204 xlist_root_init( XPTR( local_cxy , &local_process->children_root ) ); … … 229 232 230 233 hal_fence(); 234 235 process_dmsg("\n[INFO] %s : exit for process %x in cluster %x\n", 236 __FUNCTION__ , local_process->pid ); 231 237 232 238 return 0; -
trunk/kernel/kern/rpc.c
r265 r279 101 101 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 102 102 103 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 104 103 105 // initialise RPC descriptor header 104 106 rpc_desc_t rpc; … … 115 117 *error = (error_t)rpc.args[0]; 116 118 *ppn = (uint32_t)rpc.args[1]; 119 120 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 117 121 } 118 122 … … 153 157 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 154 158 159 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 160 155 161 // initialise RPC descriptor header 156 162 rpc_desc_t rpc; … … 167 173 *pid = (pid_t)rpc.args[1]; 168 174 *error = (error_t)rpc.args[2]; 175 176 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 169 177 } 170 178 … … 204 212 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 205 213 214 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 215 206 216 // initialise RPC descriptor header 207 217 rpc_desc_t rpc; … … 217 227 // get output arguments from RPC descriptor 218 228 *error = (error_t)rpc.args[1]; 229 230 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 219 231 } 220 232 … … 256 268 assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__ , 257 269 "caller must be reference process cluster\n"); 270 271 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 258 272 259 273 // get local process index in reference cluster … … 282 296 if( target_cxy != local_cxy ) rpc_send_sync( target_cxy , &rpc ); 283 297 } 298 299 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 284 300 } 285 301 … … 327 343 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 328 344 345 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 346 329 347 // initialise RPC descriptor header 330 348 rpc_desc_t rpc; … … 344 362 *thread_xp = (xptr_t)rpc.args[4]; 345 363 *error = (error_t)rpc.args[5]; 364 365 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 346 366 } 347 367 … … 405 425 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 406 426 427 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 428 407 429 // initialise RPC descriptor header 408 430 rpc_desc_t rpc; … … 421 443 *thread_xp = (xptr_t)rpc.args[3]; 422 444 *error = (error_t)rpc.args[4]; 445 446 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 423 447 } 424 448 … … 463 487 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 464 488 489 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 490 465 491 // initialise RPC descriptor header 466 492 rpc_desc_t rpc; … … 474 500 // register RPC request in remote RPC fifo 475 501 rpc_send_sync( cxy , &rpc ); 502 503 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 476 504 } 477 505 … … 513 541 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 514 542 543 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 544 515 545 // initialise RPC descriptor header 516 546 rpc_desc_t rpc; … … 534 564 *inode_xp = (xptr_t)rpc.args[8]; 535 565 *error = (error_t)rpc.args[9]; 566 567 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 536 568 } 537 569 … … 590 622 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 591 623 624 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 625 592 626 // initialise RPC descriptor header 593 627 rpc_desc_t rpc; … … 600 634 // register RPC request in remote RPC fifo (blocking function) 601 635 rpc_send_sync( cxy , &rpc ); 636 637 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 602 638 } 603 639 … … 632 668 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 633 669 670 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 671 634 672 // initialise RPC descriptor header 635 673 rpc_desc_t rpc; … … 648 686 *dentry_xp = (xptr_t)rpc.args[3]; 649 687 *error = (error_t)rpc.args[4]; 688 689 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 650 690 } 651 691 … … 695 735 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 696 736 737 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 738 697 739 // initialise RPC descriptor header 698 740 rpc_desc_t rpc; … … 705 747 // register RPC request in remote RPC fifo (blocking function) 706 748 rpc_send_sync( cxy , &rpc ); 749 750 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 707 751 } 708 752 … … 737 781 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 738 782 783 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 784 739 785 // initialise RPC descriptor header 740 786 rpc_desc_t rpc; … … 752 798 *file_xp = (xptr_t)rpc.args[2]; 753 799 *error = (error_t)rpc.args[3]; 800 801 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 754 802 } 755 803 … … 790 838 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 791 839 840 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 841 792 842 // initialise RPC descriptor header 793 843 rpc_desc_t rpc; … … 800 850 // register RPC request in remote RPC fifo (blocking function) 801 851 rpc_send_sync( cxy , &rpc ); 852 853 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 802 854 } 803 855 … … 831 883 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 832 884 885 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 886 833 887 // initialise RPC descriptor header 834 888 rpc_desc_t rpc; … … 846 900 // get output values from RPC descriptor 847 901 *error = (error_t)rpc.args[3]; 902 903 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 848 904 } 849 905 … … 889 945 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 890 946 947 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 948 891 949 // initialise RPC descriptor header 892 950 rpc_desc_t rpc; … … 902 960 // get output values from RPC descriptor 903 961 *error = (error_t)rpc.args[1]; 962 963 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 904 964 } 905 965 … … 938 998 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 939 999 1000 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1001 940 1002 // initialise RPC descriptor header 941 1003 rpc_desc_t rpc; … … 954 1016 *cluster = (uint32_t)rpc.args[3]; 955 1017 *error = (error_t)rpc.args[4]; 1018 1019 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 956 1020 } 957 1021 … … 994 1058 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 995 1059 1060 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1061 996 1062 // initialise RPC descriptor header 997 1063 rpc_desc_t rpc; … … 1008 1074 // get output argument from rpc descriptor 1009 1075 *vseg_xp = rpc.args[2]; 1076 1077 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1010 1078 } 1011 1079 … … 1050 1118 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1051 1119 1120 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1121 1052 1122 // initialise RPC descriptor header 1053 1123 rpc_desc_t rpc; … … 1066 1136 *ppn = (ppn_t)rpc.args[3]; 1067 1137 *error = (error_t)rpc.args[4]; 1138 1139 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1068 1140 } 1069 1141 … … 1105 1177 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1106 1178 1179 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1180 1107 1181 // initialise RPC descriptor header 1108 1182 rpc_desc_t rpc; … … 1118 1192 // get output arguments from RPC descriptor 1119 1193 *buf_xp = (xptr_t)rpc.args[1]; 1194 1195 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1120 1196 } 1121 1197 … … 1152 1228 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1153 1229 1230 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1231 1154 1232 // initialise RPC descriptor header 1155 1233 rpc_desc_t rpc; … … 1163 1241 // register RPC request in remote RPC fifo 1164 1242 rpc_send_sync( cxy , &rpc ); 1243 1244 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1165 1245 } 1166 1246 … … 1199 1279 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1200 1280 1281 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1282 1201 1283 // initialise RPC descriptor header 1202 1284 rpc_desc_t rpc; … … 1217 1299 // get output values from RPC descriptor 1218 1300 *error = (error_t)rpc.args[6]; 1301 1302 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1219 1303 } 1220 1304 … … 1262 1346 rpc_desc_t * rpc ) 1263 1347 { 1264 thread_t * this = CURRENT_THREAD;1265 1348 uint32_t cores; 1266 1349 error_t error; … … 1268 1351 reg_t sr_save; 1269 1352 1270 // get client CPU and cluster coordinates 1271 cxy_t client_cxy = local_cxy; 1272 lid_t client_lid = CURRENT_CORE->lid; 1353 thread_t * this = CURRENT_THREAD; 1354 1355 rpc_dmsg("\n[INFO] %s : enter / client_cxy = %x / server_cxy = %x\n", 1356 __FUNCTION__ , local_cxy , server_cxy ); 1273 1357 1274 1358 // allocate and initialise an extended pointer on the RPC descriptor 1275 xptr_t xp = XPTR( client_cxy , rpc );1276 1277 // get local pointer on rpc_fifo in remote cluster with the1278 // assumption that addresses are identical in all clusters1359 xptr_t desc_xp = XPTR( local_cxy , rpc ); 1360 1361 // get local pointer on rpc_fifo in remote cluster, with the 1362 // assumption that rpc_fifo pddresses are identical in all clusters 1279 1363 rpc_fifo_t * rf = &LOCAL_CLUSTER->rpc_fifo; 1280 1364 … … 1284 1368 { 1285 1369 error = remote_fifo_put_item( XPTR( server_cxy , &rf->fifo ), 1286 (uint64_t *)&xp,1370 (uint64_t )desc_xp, 1287 1371 &first ); 1288 1372 1289 1373 if ( error ) 1290 1374 { 1291 printk("\n[WARNING] %s : core %d in cluster %x cannot post RPC to cluster %x\n", 1292 __FUNCTION__ , client_lid , client_cxy , server_cxy ); 1375 printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n", 1376 __FUNCTION__ , local_cxy , server_cxy ); 1377 1293 1378 if( thread_can_yield() ) sched_yield(); 1379 } 1380 else 1381 { 1294 1382 } 1295 1383 } 1296 1384 while( error ); 1297 1385 1298 rpc_dmsg("\n[INFO] %s on core %d in cluster %x sent RPC %p to cluster %x\n",1299 __FUNCTION__ , client_lid , client_cxy , rpc , server_cxy);1386 rpc_dmsg("\n[INFO] %s : RPC registered / client_cxy = %x / server_cxy = %x\n", 1387 __FUNCTION__ , local_cxy , server_cxy , first ); 1300 1388 1301 // send IPI if this is the first RPC in remote FIFO 1302 // and no CPU is in kernel mode in server cluster. 1303 // the selected CPU in server has the same lid as the client CPU. 1389 // send IPI to remote CP0, if this is the first RPC in remote FIFO, 1390 // and there is no CPU is in kernel mode in server cluster. 1304 1391 if( first ) 1305 1392 { … … 1309 1396 if( cores == 0 ) // no core in kernel mode in server 1310 1397 { 1311 dev_pic_send_ipi( server_cxy , client_lid);1312 1313 rpc_dmsg("\n[INFO] %s : core %d in cluster %x send IPI to core %d in cluster%x\n",1314 __FUNCTION__, client_lid , client_cxy , client_lid, server_cxy );1398 dev_pic_send_ipi( server_cxy , 0 ); 1399 1400 rpc_dmsg("\n[INFO] %s : IPI sent / client_cxy = %x / server_cxy = %x\n", 1401 __FUNCTION__, local_cxy , server_cxy ); 1315 1402 } 1316 1403 } 1317 1404 1318 // activate preemptionto allow incoming RPC and avoid deadlock1405 // enable IRQs to allow incoming RPC and avoid deadlock 1319 1406 if( this->type == THREAD_RPC ) hal_enable_irq( &sr_save ); 1320 1407 1321 // the sending thread poll the response slot until RPC completed 1408 // the server thread poll the response slot until RPC completed 1409 // TODO this could be replaced by a descheduling policy... [AG] 1322 1410 while( 1 ) 1323 1411 { … … 1325 1413 } 1326 1414 1327 // restore preemption1415 // restore IRQs 1328 1416 if( this->type == THREAD_RPC ) hal_restore_irq( sr_save ); 1417 1418 rpc_dmsg("\n[INFO] %s : completed / client_cxy = %x / server_cxy = %x\n", 1419 __FUNCTION__ , local_cxy , server_cxy ); 1329 1420 1330 1421 } // end rpc_send_sync() … … 1344 1435 } 1345 1436 1346 ///////////////////////////////////////////// ///1347 error_trpc_execute_all( rpc_fifo_t * rpc_fifo )1437 ///////////////////////////////////////////// 1438 void rpc_execute_all( rpc_fifo_t * rpc_fifo ) 1348 1439 { 1349 1440 xptr_t xp; // extended pointer on RPC descriptor … … 1353 1444 rpc_desc_t * desc; // pointer on RPC descriptor 1354 1445 uint32_t index; // RPC index 1355 uint32_t expected; // number of expected responses1356 1446 cxy_t client_cxy; // client cluster identifier 1357 1447 error_t error; … … 1370 1460 if ( error == 0 ) // One RPC request successfully extracted from RPC_FIFO 1371 1461 { 1372 rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n" 1462 rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n", 1373 1463 __FUNCTION__ , this->trdid , core->lid , local_cxy , count ); 1374 1464 … … 1377 1467 desc = (rpc_desc_t *)GET_PTR( xp ); 1378 1468 1379 // get rpc index and expected responsesfrom RPC descriptor1469 // get rpc index from RPC descriptor 1380 1470 index = hal_remote_lw( XPTR( client_cxy , &desc->index ) ); 1381 expected = hal_remote_lw( XPTR( client_cxy , &desc->response ) );1382 1471 1383 1472 // call the relevant server function … … 1388 1477 1389 1478 // notify RPC completion as required 1390 if( expected == 1 ) hal_remote_sw( XPTR(client_cxy,&desc->response) , 0 ); 1391 if( expected > 1 ) hal_remote_atomic_add( XPTR(client_cxy,&desc->response) , -1 ); 1479 hal_remote_atomic_add( XPTR(client_cxy,&desc->response) , -1 ); 1392 1480 } 1393 1481 … … 1400 1488 (count > CONFIG_RPC_PENDING_MAX) ) break; 1401 1489 } 1402 while( 1 ) 1403 1404 rpc_dmsg("\n[INFO] %s running on core %d in cluster %x exit\n" 1405 __FUNCTION__ , CURRENT_CORE->lid , local_cxy ); 1406 1490 while( 1 ); 1491 1407 1492 // update RPC_FIFO global counter 1408 1493 rpc_fifo->count += count; 1409 1494 1410 return 0;1411 1495 } // end rpc_execute_all() 1412 1496 … … 1422 1506 reg_t sr_save; 1423 1507 1508 1424 1509 this = CURRENT_THREAD; 1425 1510 core = this->core; … … 1427 1512 found = false; 1428 1513 1429 // calling thread must be the RPC_FIFO owner 1430 if( this->trdid != rpc_fifo->owner ) 1431 { 1432 printk("\n[PANIC] in %s : calling thread is not RPC_FIFO owner\n", __FUNCTION__ ); 1433 hal_core_sleep(); 1434 } 1514 assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ , 1515 "calling thread is not RPC_FIFO owner\n" ); 1435 1516 1436 1517 // makes the calling thread not preemptable … … 1443 1524 { 1444 1525 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 1445 if( (thread->type == THREAD_RPC) && (thread->blocked == 1526 if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) ) 1446 1527 { 1447 1528 found = true; … … 1453 1534 { 1454 1535 thread->blocked = 0; 1536 1537 rpc_dmsg("\n[INFO] %s : activate RPC thread %x on core %x in cluster %x at cycle %d\n", 1538 __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() ); 1455 1539 } 1456 1540 else // create a new RPC thread … … 1469 1553 } 1470 1554 1471 rpc_dmsg("\n[INFO] %s createsRPC thread %x on core %x in cluster %x at cycle %d\n",1555 rpc_dmsg("\n[INFO] %s : create RPC thread %x on core %x in cluster %x at cycle %d\n", 1472 1556 __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() ); 1473 1557 1474 1558 // update core descriptor counter 1475 hal_atomic_add( & core->rpc_threads , 1 );1559 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 1476 1560 } 1477 1561 1478 1562 // update owner in rpc_fifo 1479 1563 rpc_fifo->owner = thread->trdid; 1480 1481 rpc_dmsg ("\n[INFO] %s activates RPC thread %x on core %x in cluster %x at cycle %d\n",1482 __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );1483 1564 1484 1565 // current thread deschedules / RPC thread start execution … … 1506 1587 } 1507 1588 1508 // calling thread tries to take the light lock, 1509 // and activates an RPC thread if success 1589 // try to take the light lock, and activates an RPC thread if success 1510 1590 if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) ) 1511 1591 { … … 1543 1623 1544 1624 // this infinite loop is not preemptable 1545 // the RPC thread deschedule when the RPC_FIFO is empty1625 // the RPC thread deschedule only when the RPC_FIFO is empty 1546 1626 while(1) 1547 1627 { … … 1561 1641 1562 1642 1563 // suicide if too much RPC threads for this core1564 if( this->core->rpc_threads >CONFIG_RPC_THREADS_MAX )1643 // block and deschedule or sucide 1644 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) 1565 1645 { 1566 1646 rpc_dmsg("\n[INFO] RPC thread %x suicide on core %d in cluster %x at cycle %d\n", … … 1568 1648 1569 1649 // update core descriptor counter 1570 hal_atomic_add( & this->core->rpc_threads , -1 );1650 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 ); 1571 1651 1572 1652 // suicide 1573 1653 thread_exit(); 1574 1654 } 1575 1576 // block and deschedule 1577 rpc_dmsg("\n[INFO] RPC thread %x deschedule on core %d in cluster %x at cycle %d\n", 1578 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1579 1580 thread_block( this , THREAD_BLOCKED_IDLE ); 1581 sched_yield(); 1582 1583 rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n", 1584 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1585 } 1655 else 1656 { 1657 rpc_dmsg("\n[INFO] RPC thread %x blocks on core %d in cluster %x at cycle %d\n", 1658 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1659 1660 thread_block( this , THREAD_BLOCKED_IDLE ); 1661 sched_yield(); 1662 1663 rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n", 1664 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1665 } 1666 } // end while 1586 1667 } // end rpc_thread_func() 1587 1668 -
trunk/kernel/kern/rpc.h
r265 r279 158 158 * This function is the entry point for RPC handling on the server side. 159 159 * It can be executed by any thread running (in kernel mode) on any core. 160 * It first checks the core private RPC fifo, an then the cluster shared RPC fifo.161 * It calls the rpc_activate_thread() function to activate a dedicated RPC thread.162 *********************************************************************************** 163 * @ returns true if at least one RPC found/ false otherwise.160 * It checks the RPC fifo, try to take the light-lock and activates (or creates) 161 * an RPC thread in case of success. 162 *********************************************************************************** 163 * @ returns true if success / false otherwise. 164 164 **********************************************************************************/ 165 165 bool_t rpc_check(); … … 170 170 *********************************************************************************** 171 171 * @ rpc_fifo : pointer on the local RPC fifo 172 * @ returns 0 if success 173 **********************************************************************************/ 174 error_t rpc_execute_all( rpc_fifo_t * rpc_fifo ); 172 **********************************************************************************/ 173 void rpc_execute_all( rpc_fifo_t * rpc_fifo ); 175 174 176 175 /********************************************************************************** -
trunk/kernel/kern/scheduler.c
r278 r279 41 41 sched->k_threads_nr = 0; 42 42 43 sched->current = NULL;44 sched->idle = NULL; 45 sched->u_last = NULL; 46 sched->k_last = NULL; 43 sched->current = CURRENT_THREAD; 44 sched->idle = NULL; // initialized in kernel_init() 45 sched->u_last = NULL; // initialized in sched_register_thread() 46 sched->k_last = NULL; // initialized in sched_register_thread() 47 47 48 48 // initialise threads lists … … 62 62 spinlock_lock( &sched->lock ); 63 63 64 // register thread65 64 if( type == THREAD_USER ) 66 65 { 66 // register thread in scheduler user list 67 67 list_add_last( &sched->u_root , &thread->sched_list ); 68 68 sched->u_threads_nr++; 69 70 // initialize u_last field if first user thread 71 if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; 69 72 } 70 73 else // kernel thread 71 74 { 75 // register thread in scheduler kernel list 72 76 list_add_last( &sched->k_root , &thread->sched_list ); 73 77 sched->k_threads_nr++; 78 79 // initialize k_last field if first kernel thread 80 if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 74 81 } 75 82 … … 89 96 spinlock_lock( &sched->lock ); 90 97 91 // remove thread92 98 if( type == THREAD_USER ) 93 99 { 100 // remove thread from user list 94 101 list_unlink( &thread->sched_list ); 95 102 sched->u_threads_nr--; 103 104 // reset the u_last field if list empty 105 if( sched->u_threads_nr == 0 ) sched->u_last = NULL; 96 106 } 97 107 else // kernel thread 98 108 { 109 // remove thread from kernel list 99 110 list_unlink( &thread->sched_list ); 100 111 sched->k_threads_nr--; 112 113 // reset the k_last field if list empty 114 if( sched->k_threads_nr == 0 ) sched->k_last = NULL; 101 115 } 102 116 … … 140 154 list_entry_t * last; 141 155 142 // first scan the kernel threads 143 last = sched->k_last; 144 current = sched->k_last; 145 do 146 { 147 // get next entry in kernel list 148 current = list_next( &sched->k_root , current ); 149 150 // skip the list root that does not contain a thread 151 if( current == NULL ) continue; 152 153 // get thread pointer 154 thread = LIST_ELEMENT( current , thread_t , sched_list ); 155 156 // return thread if not blocked 157 if( thread->blocked == 0 ) 156 // first : scan the kernel threads list, 157 // only if this list is not empty 158 if( list_is_empty( &sched->k_root ) == false ) 159 { 160 last = sched->k_last; 161 current = sched->k_last; 162 do 158 163 { 159 // release lock 160 spinlock_unlock( &sched->lock ); 161 return thread; 164 // get next entry in kernel list 165 current = list_next( &sched->k_root , current ); 166 167 // skip the root that does not contain a thread 168 if( current == NULL ) current = sched->k_root.next; 169 170 // get thread pointer for this entry 171 thread = LIST_ELEMENT( current , thread_t , sched_list ); 172 173 // return thread if runnable 174 if( thread->blocked == 0 ) 175 { 176 // release lock 177 spinlock_unlock( &sched->lock ); 178 return thread; 179 } 162 180 } 163 } 164 while( current != last ); 165 166 // second scan the user threads 167 last = sched->u_last; 168 current = sched->u_last; 169 do 170 { 171 // get next entry in user list 172 current = list_next( &sched->u_root , current ); 173 174 // skip the list root that does not contain a thread 175 if( current == NULL ) continue; 176 177 // get thread pointer 178 thread = LIST_ELEMENT( current , thread_t , sched_list ); 179 180 // return thread if not blocked 181 if( thread->blocked == 0 ) 181 while( current != last ); 182 } 183 184 // second : scan the user threads list, 185 // only if this list is not empty 186 if( list_is_empty( &sched->u_root ) == false ) 187 { 188 last = sched->u_last; 189 current = sched->u_last; 190 do 182 191 { 183 // release lock 184 spinlock_unlock( &sched->lock ); 185 return thread; 192 // get next entry in user list 193 current = list_next( &sched->u_root , current ); 194 195 // skip the root that does not contain a thread 196 if( current == NULL ) current = sched->u_root.next; 197 198 // get thread pointer for this entry 199 thread = LIST_ELEMENT( current , thread_t , sched_list ); 200 201 // return thread if runnable 202 if( thread->blocked == 0 ) 203 { 204 // release lock 205 spinlock_unlock( &sched->lock ); 206 return thread; 207 } 186 208 } 187 }188 while( current != last );209 while( current != last ); 210 } 189 211 190 212 // release lock 191 213 spinlock_unlock( &sched->lock ); 192 214 193 // third ,return idle thread if no runnable thread215 // third : return idle thread if no runnable thread 194 216 return sched->idle; 195 217 … … 234 256 thread_t * current = CURRENT_THREAD; 235 257 core_t * core = current->core; 258 scheduler_t * sched = &core->scheduler; 236 259 237 260 if( thread_can_yield() == false ) … … 265 288 __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid ); 266 289 267 // switch contexts if new thread290 // switch contexts and update scheduler state if new thread 268 291 if( next != current ) 269 292 { 270 293 hal_cpu_context_save( current ); 271 294 hal_cpu_context_restore( next ); 295 296 if( current->type == THREAD_USER ) sched->u_last = ¤t->sched_list; 297 else sched->k_last = ¤t->sched_list; 298 299 sched->current = next; 272 300 } 273 301 -
trunk/kernel/kern/scheduler.h
r14 r279 34 34 struct thread_s; 35 35 36 /********************************************************************************************* **36 /********************************************************************************************* 37 37 * This structure define the scheduler associated to a given core. 38 38 * WARNING : the idle thread is executed when there is no runable thread in the list 39 39 * of attached threads, but is NOT part of the list of attached threads. 40 ******************************************************************************************** **/40 ********************************************************************************************/ 41 41 42 42 typedef struct scheduler_s 43 43 { 44 spinlock_t lock; /*! readlock protecting lists of threads 45 uint16_t u_threads_nr; /*! total numbre of attached user threads 46 uint16_t k_threads_nr; /*! total number of attached kernel threads 47 list_entry_t u_root; /*! root of list of user threads for this scheduler 48 list_entry_t k_root; /*! root of list of kernel threads for this scheduler 49 list_entry_t * u_last; /*! pointer on list_entry for last executed k ernel thread*/50 list_entry_t * k_last; /*! pointer on list entry for last executed u ser thread*/51 struct thread_s * idle; /*! pointer on idle thread 52 struct thread_s * current; /*! pointer on current running thread 44 spinlock_t lock; /*! readlock protecting lists of threads */ 45 uint16_t u_threads_nr; /*! total numbre of attached user threads */ 46 uint16_t k_threads_nr; /*! total number of attached kernel threads */ 47 list_entry_t u_root; /*! root of list of user threads for this scheduler */ 48 list_entry_t k_root; /*! root of list of kernel threads for this scheduler */ 49 list_entry_t * u_last; /*! pointer on list_entry for last executed k_thread */ 50 list_entry_t * k_last; /*! pointer on list entry for last executed u_thread */ 51 struct thread_s * idle; /*! pointer on idle thread */ 52 struct thread_s * current; /*! pointer on current running thread */ 53 53 } 54 54 scheduler_t; 55 55 56 /********************************************************************************************* **56 /********************************************************************************************* 57 57 * This function initialises the scheduler for a given core. 58 ******************************************************************************************** **/58 ********************************************************************************************/ 59 59 void sched_init( struct core_s * core ); 60 60 61 /********************************************************************************************* **61 /********************************************************************************************* 62 62 * This function register a new thread in a given core scheduler. 63 ********************************************************************************************* **63 ********************************************************************************************* 64 64 * @ core : local pointer on the core descriptor. 65 65 * @ thread : local pointer on the thread descriptor. 66 ******************************************************************************************** **/66 ********************************************************************************************/ 67 67 void sched_register_thread( struct core_s * core, 68 68 struct thread_s * thread ); 69 69 70 /********************************************************************************************* **70 /********************************************************************************************* 71 71 * This function removes a thread from the set of threads attached to a given core. 72 ********************************************************************************************* **72 ********************************************************************************************* 73 73 * @ thread : local pointer on the thread descriptor. 74 ******************************************************************************************** **/74 ********************************************************************************************/ 75 75 void sched_remove_thread( struct thread_s * thread ); 76 76 77 /********************************************************************************************* **77 /********************************************************************************************* 78 78 * This function handles pending signals for all registered threads, and tries to make 79 79 * a context switch for the core running the calling thread. … … 82 82 * - If there is no other runable thread, the calling thread continues execution. 83 83 * - If there is no runable thread, the idle thread is executed. 84 ******************************************************************************************** **/84 ********************************************************************************************/ 85 85 void sched_yield(); 86 86 87 /********************************************************************************************* **87 /********************************************************************************************* 88 88 * This function handles pending signals for all registered threads, and make 89 89 * a context switch to the thread defined by the <thread> argument. 90 90 * If the selected thread is not attached to the same core as the calling thread, 91 91 * or is blocked, it causes a kernel panic. 92 ********************************************************************************************* **92 ********************************************************************************************* 93 93 * @ new : local pointer on the thread to run. 94 ******************************************************************************************** **/94 ********************************************************************************************/ 95 95 void sched_switch_to( struct thread_s * new ); 96 96 97 /********************************************************************************************* **97 /********************************************************************************************* 98 98 * This function scan all threads attached to a given core scheduler, and executes 99 99 * the relevant actions for pending signals, such as the THREAD_SIG_KILL signal. 100 ********************************************************************************************* **100 ********************************************************************************************* 101 101 * @ core : local pointer on the core descriptor. 102 ******************************************************************************************** **/102 ********************************************************************************************/ 103 103 void sched_handle_signals( struct core_s * core ); 104 104 105 /********************************************************************************************* **105 /********************************************************************************************* 106 106 * This function is used by the scheduler of a given core to actually kill a thread that has 107 107 * the SIG_KILL signal set (following a thread_exit() or a thread_kill() event). … … 110 110 * - It removes the thread from the scheduler. 111 111 * - It release physical memory allocated for thread descriptor. 112 ********************************************************************************************* **112 ********************************************************************************************* 113 113 * @ thread : local pointer on the thread descriptor. 114 ******************************************************************************************** **/114 ********************************************************************************************/ 115 115 void sched_kill_thread( struct thread_s * thread ); 116 116 117 /********************************************************************************************* **117 /********************************************************************************************* 118 118 * This function does NOT modify the scheduler state. 119 119 * It just select a thread in the list of attached threads, implementing the following policy: … … 123 123 * the last executed one, and returns the first runable found (can be the current thread). 124 124 * 3) if no runable thread found, it returns the idle thread. 125 ********************************************************************************************* **125 ********************************************************************************************* 126 126 * @ core : local pointer on the core descriptor. 127 127 * @ returns pointer on selected thread descriptor 128 ******************************************************************************************** **/128 ********************************************************************************************/ 129 129 struct thread_s * sched_select( struct core_s * core ); 130 130 131 /********************************************************************************************* **131 /********************************************************************************************* 132 132 * This function scan the list of kernel threads to find an idle (blocked) RPC thread. 133 ********************************************************************************************* **133 ********************************************************************************************* 134 134 * @ core : local pointer on the core descriptor. 135 135 * @ returns pointer on RPC thread descriptor / returns NULL if no idle RPC thread. 136 ******************************************************************************************** **/136 ********************************************************************************************/ 137 137 struct thread_s * sched_get_rpc_thead( struct core_s * core ); 138 138 -
trunk/kernel/kern/thread.h
r174 r279 213 213 214 214 uint32_t dev_channel; /*! device channel for a DEV thread */ 215 union /*! embedded command for a DEV thread */ 216 { 217 ioc_command_t ioc; /*! IOC device generic command */ 218 txt_command_t txt; /*! TXT device generic command */ 219 nic_command_t nic; /*! NIC device generic command */ 220 mmc_command_t mmc; /*! MMC device generic command */ 221 dma_command_t dma; /*! DMA device generic command */ 222 } 223 command; 215 216 ioc_command_t ioc_cmd; /*! IOC device generic command */ 217 txt_command_t txt_cmd; /*! TXT device generic command */ 218 nic_command_t nic_cmd; /*! NIC device generic command */ 219 mmc_command_t mmc_cmd; /*! MMC device generic command */ 220 dma_command_t dma_cmd; /*! DMA device generic command */ 224 221 225 222 cxy_t rpc_client_cxy; /*! client cluster index (for a RPC thread) */ -
trunk/kernel/libk/elf.c
r270 r279 191 191 process->vmm.code_vpn_base = start >> CONFIG_PPM_PAGE_SHIFT; 192 192 193 elf_dmsg("\n[INFO] %s found CODE vseg / base = %x / size = %x\n",193 elf_dmsg("\n[INFO] %s : found CODE vseg / base = %x / size = %x\n", 194 194 __FUNCTION__ , start , mem_size ); 195 195 } … … 199 199 process->vmm.data_vpn_base = start >> CONFIG_PPM_PAGE_SHIFT; 200 200 201 elf_dmsg("\n[INFO] %s found DATA vseg / base = %x / size = %x\n",201 elf_dmsg("\n[INFO] %s : found DATA vseg / base = %x / size = %x\n", 202 202 __FUNCTION__, start , mem_size ); 203 203 } -
trunk/kernel/libk/remote_fifo.c
r124 r279 49 49 ////////////////////////////////////////////// 50 50 error_t remote_fifo_put_item( xptr_t fifo, 51 uint64_t *item,51 uint64_t item, 52 52 bool_t * first ) 53 53 { … … 112 112 113 113 // copy item to fifo 114 hal_remote_swd( XPTR( cxy , &ptr->data[ptw] ), *item ); 114 hal_remote_swd( XPTR( cxy , &ptr->data[ptw] ), item ); 115 115 116 hal_fence(); 116 117 -
trunk/kernel/libk/remote_fifo.h
r68 r279 40 40 * 41 41 * WARNING : the number of slots is statically defined by the global 42 * configuration parameter CONFIG_REMOTE_FIFO_SLOTS for all fifos ,requiring43 * 12 * CONFIG_REMOTE_FIFO_SLOTS bytes for each FIFO.42 * configuration parameter CONFIG_REMOTE_FIFO_SLOTS for all fifos. requiring 43 * Each FIFO requires 8 + (12 * CONFIG_REMOTE_FIFO_SLOTS) bytes. 44 44 ***********************************************************************************/ 45 45 … … 67 67 ************************************************************************************ 68 68 * @ fifo : pointer to the local fifo. 69 * @ item : pointer on destination buffer for extracted item. 70 * @ size : actual number of bytes in one item. 69 * @ item : [out] pointer on buffer for extracted item. 71 70 * @ return 0 on success, EAGAIN if the buffer is empty. 72 71 ***********************************************************************************/ … … 78 77 * by an extended pointer. 79 78 * This function gets a write ticket using a remote_atomic_increment on the 80 * write slot index andwaits until the slot is empty, using a descheduling81 * policy (without blocking).79 * write slot. Then, it waits until the slot is empty, using a descheduling 80 * policy without blocking. 82 81 ************************************************************************************ 83 82 * @ fifo : extended pointer to the fifo in remote cluster. 84 * @ item : pointer on a local buffer containing theitem to be stored.83 * @ item : item to be stored. 85 84 * @ first : [out] true if first item registered in remote fifo. 86 85 * @ return 0 on success / EBUSY if a contention has been detected. 87 86 ***********************************************************************************/ 88 87 error_t remote_fifo_put_item( xptr_t fifo, 89 uint64_t *item,88 uint64_t item, 90 89 bool_t * first ); 91 90 -
trunk/kernel/mm/mapper.c
r265 r279 30 30 #include <rwlock.h> 31 31 #include <printk.h> 32 #include <memcpy.h> 32 33 #include <thread.h> 33 34 #include <core.h> -
trunk/kernel/syscalls/syscalls.h
r50 r279 401 401 * @ return 0 if success / returns -1 if failure. 402 402 ********************************************************************************************/ 403 int sys_mkdir( char 403 int sys_mkdir( char * pathname, 404 404 uint32_t mode ); 405 405 -
trunk/kernel/vfs/devfs.c
r204 r279 79 79 error_t error; 80 80 81 devfs_dmsg("\n[INFO] %s : enter in cluster %x\n", 82 __FUNCTION__ , local_cxy ); 83 81 84 // creates DEVFS "dev" inode in cluster IO 82 85 error = vfs_add_child_in_parent( LOCAL_CLUSTER->io_cxy, … … 88 91 devfs_dev_inode_xp ); 89 92 90 nolock_assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" ); 93 assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" ); 94 95 devfs_dmsg("\n[INFO] %s : <dev> created in cluster %x\n", 96 __FUNCTION__ , local_cxy ); 91 97 92 98 // create DEVFS "external" inode in cluster IO … … 99 105 devfs_external_inode_xp ); 100 106 101 nolock_assert( (error == 0) , __FUNCTION__ , "cannot create <external>\n" ); 107 assert( (error == 0) , __FUNCTION__ , "cannot create <external>\n" ); 108 109 devfs_dmsg("\n[INFO] %s : <external> created in cluster %x\n", 110 __FUNCTION__ , local_cxy ); 102 111 } 103 112 -
trunk/kernel/vfs/fatfs.c
r265 r279 217 217 } // end get_name_from_long() 218 218 219 //////////////////////////////////////////////////////////////////////////////////////////220 // This function returns the FATFS cluster index of a page identified by its page221 // index in the file, using the FAT mapper. It scans the FAT mapper, starting from the222 // FATFS cluster index allocated to the first page of the file, until it reaches the223 // searched page. The FAT mapper is automatically updated in case of miss.224 // This function can be called by any thread running in any cluster, as it uses the225 // RPC_FATFS_GET_CLUSTER to access the remote FAT mapper if required.226 // We use a RPC to scan the FAT because the RPC_FIFO will avoid contention227 // in the cluster containing the FAT mapper, and the RPC latency is not critical228 // compared to the device access latency.229 //////////////////////////////////////////////////////////////////////////////////////////230 // @ ctx : pointer on local FATFS context.231 // @ first_cluster : first cluster allocated to a file in FATFS.232 // @ page_index : index of searched page in file (one page occupies one cluster).233 // @ cluster_index : [out] pointer on buffer for FATFS cluster index.234 // @ return 0 if success / return EIO if a FAT cluster miss cannot be solved.235 //////////////////////////////////////////////////////////////////////////////////////////236 static error_t fatfs_cluster_from_index( fatfs_ctx_t * ctx,237 uint32_t first_cluster,238 uint32_t page_index,239 uint32_t * cluster_index )240 {241 uint32_t searched_cluster; // searched FATFS cluster index242 error_t error;243 244 // get extended pointer on FAT mapper245 xptr_t fat_mapper_xp = ctx->fat_mapper_xp;246 247 // get cluster cxy and local pointer on FAT mapper248 cxy_t fat_mapper_cxy = GET_CXY( fat_mapper_xp );249 mapper_t * fat_mapper_ptr = (mapper_t *)GET_PTR( fat_mapper_xp );250 251 if( fat_mapper_cxy == local_cxy ) // FAT mapper is local252 {253 error = fatfs_get_cluster( fat_mapper_ptr,254 first_cluster,255 page_index,256 &searched_cluster );257 }258 else // FAT mapper is remote259 {260 rpc_fatfs_get_cluster_client( fat_mapper_cxy,261 fat_mapper_ptr,262 first_cluster,263 page_index,264 &searched_cluster,265 &error );266 }267 268 if( error )269 {270 printk("\n[ERROR] in %s : cannot access FAT\n", __FUNCTION__ );271 return error;272 }273 274 // return success275 *cluster_index = searched_cluster;276 return 0;277 278 } // end fatfs_cluster_from_index()279 219 280 220 ////////////////////////////////////////////////////////////////////////////////////////// … … 400 340 uint8_t * buffer; 401 341 402 fatfs_dmsg("\n[INFO] %s : enter sfor fatfs_ctx = %x\n",342 fatfs_dmsg("\n[INFO] %s : enter for fatfs_ctx = %x\n", 403 343 __FUNCTION__ , fatfs_ctx ); 404 344 … … 414 354 "cannot allocate memory for 512 bytes buffer\n" ); 415 355 356 fatfs_dmsg("\n[INFO] %s : allocated 512 bytes buffer\n", __FUNCTION__ ); 357 416 358 // load the boot record from device 417 359 // using a synchronous access to IOC device 418 360 error = dev_ioc_sync_read( buffer , 0 , 1 ); 361 362 fatfs_dmsg("\n[INFO] %s : buffer loaded\n", __FUNCTION__ ); 419 363 420 364 assert( (error == 0) , __FUNCTION__ , … … 441 385 uint32_t sector_size = fatfs_get_record( BPB_BYTSPERSEC , buffer , 1 ); 442 386 443 nolock_assert( (sector_size == 512) , __FUNCTION__ ,444 387 assert( (sector_size == 512) , __FUNCTION__ , 388 "sector size must be 512 bytes\n" ); 445 389 446 390 // check cluster size from boot record 447 391 uint32_t nb_sectors = fatfs_get_record( BPB_SECPERCLUS , buffer , 1 ); 448 392 449 nolock_assert( (nb_sectors == 8) , __FUNCTION__ ,450 393 assert( (nb_sectors == 8) , __FUNCTION__ , 394 "cluster size must be 8 sectors\n" ); 451 395 452 396 // check number of FAT copies from boot record 453 397 uint32_t nb_fats = fatfs_get_record( BPB_NUMFATS , buffer , 1 ); 454 398 455 nolock_assert( (nb_fats == 1) , __FUNCTION__ ,456 399 assert( (nb_fats == 1) , __FUNCTION__ , 400 "number of FAT copies must be 1\n" ); 457 401 458 402 // get & check number of sectors in FAT from boot record 459 403 uint32_t fat_sectors = fatfs_get_record( BPB_FAT32_FATSZ32 , buffer , 1 ); 460 404 461 nolock_assert( ((fat_sectors & 0xF) == 0) , __FUNCTION__ ,462 405 assert( ((fat_sectors & 0xF) == 0) , __FUNCTION__ , 406 "FAT not multiple of 16 sectors\n"); 463 407 464 408 // get and check root cluster from boot record 465 409 uint32_t root_cluster = fatfs_get_record( BPB_FAT32_ROOTCLUS , buffer , 1 ); 466 410 467 nolock_assert( (root_cluster == 2) , __FUNCTION__ ,468 411 assert( (root_cluster == 2) , __FUNCTION__ , 412 "root cluster index must be 2\n"); 469 413 470 414 // get FAT lba from boot record … … 475 419 req.ptr = buffer; 476 420 kmem_free( &req ); 421 422 fatfs_dmsg("\n[INFO] %s : boot record read & released\n", 423 __FUNCTION__ ); 477 424 478 425 // allocate a mapper for the FAT itself … … 494 441 fatfs_ctx->last_allocated_index = 0; // TODO ??? 495 442 fatfs_ctx->fat_mapper_xp = XPTR( local_cxy , fat_mapper ); 443 444 fatfs_dmsg("\n[INFO] %s : exit for fatfs_ctx = %x\n", 445 __FUNCTION__ , fatfs_ctx ); 496 446 497 447 } // end fatfs_ctx_init() -
trunk/kernel/vfs/vfs.c
r271 r279 154 154 error_t error; 155 155 156 vfs_dmsg("\n[INFO] %s : enter / local_cluster = %x / parent_cluster = %x\n", 157 __FUNCTION__ , local_cxy , GET_CXY( dentry_xp ) ); 158 156 159 // check fs type and get pointer on context 157 160 if ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS]; … … 224 227 remote_rwlock_init( XPTR( local_cxy , &inode->data_lock ) ); 225 228 remote_spinlock_init( XPTR( local_cxy , &inode->main_lock ) ); 229 230 vfs_dmsg("\n[INFO] %s : enter / local_cluster = %x / parent_cluster = %x\n", 231 __FUNCTION__ , local_cxy , GET_CXY( dentry_xp ) ); 226 232 227 233 // return extended pointer on inode … … 1516 1522 parent_ptr = (vfs_inode_t *)GET_PTR( parent_xp ); 1517 1523 1524 vfs_dmsg("\n[INFO] %s : enter in cluster %x / child_cxy = %x / parent_cxy = %x\n", 1525 __FUNCTION__ , local_cxy , child_cxy , parent_cxy ); 1526 1518 1527 // 1. create dentry 1519 1528 if( parent_cxy == local_cxy ) // parent cluster is the local cluster … … 1523 1532 parent_ptr, 1524 1533 &dentry_xp ); 1534 1535 vfs_dmsg("\n[INFO] %s : dentry created in local cluster %x\n", 1536 __FUNCTION__ , local_cxy ); 1525 1537 } 1526 1538 else // parent cluster is remote … … 1532 1544 &dentry_xp, 1533 1545 &error ); 1546 1547 vfs_dmsg("\n[INFO] %s : dentry created in remote cluster %x\n", 1548 __FUNCTION__ , parent_cxy ); 1534 1549 } 1535 1550 … … 1558 1573 gid, 1559 1574 &inode_xp ); 1575 1576 vfs_dmsg("\n[INFO] %s : inode created in local cluster %x\n", 1577 __FUNCTION__ , local_cxy ); 1560 1578 } 1561 1579 else // child cluster is remote … … 1572 1590 &inode_xp, 1573 1591 &error ); 1592 1593 vfs_dmsg("\n[INFO] %s : inodecreated in remote cluster %x\n", 1594 __FUNCTION__ , child_cxy ); 1574 1595 } 1575 1596
Note: See TracChangeset
for help on using the changeset viewer.