Ticket #21: ipi.diff
File ipi.diff, 18.3 KB (added by , 15 years ago) |
---|
-
mutek/include/mutek/scheduler.h
50 50 CONTAINER_TYPE (sched_queue, DLIST, struct sched_context_s 51 51 { 52 52 CONTAINER_ENTRY_TYPE(DLIST) list_entry; 53 s ched_queue_root_t *root; //< keep track of associated scheduler queue53 struct scheduler_s *scheduler; //< keep track of associated scheduler queue 54 54 struct context_s context; //< execution context 55 55 56 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) && defined(CONFIG_HEXO_IPI)57 void *cpu_cls; //< used as cpu identifier for IPIs58 #endif59 60 56 void *private; 61 57 62 58 #ifdef CONFIG_MUTEK_SCHEDULER_MIGRATION_AFFINITY -
mutek/scheduler.c
66 66 return next; 67 67 } 68 68 69 #if defined(CONFIG_HEXO_IPI) 70 #define CONTAINER_LOCK_idle_cpu_queue HEXO_SPIN 71 CONTAINER_TYPE(idle_cpu_queue, CLIST, struct ipi_endpoint_s, idle_cpu_queue_list_entry); 72 CONTAINER_FUNC(idle_cpu_queue, CLIST, static inline, idle_cpu_queue, list_entry); 73 #endif 74 75 struct scheduler_s 76 { 77 sched_queue_root_t root; 78 #if defined(CONFIG_HEXO_IPI) 79 # if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 80 idle_cpu_queue_root_t idle_cpu; 81 # elif defined (CONFIG_MUTEK_SCHEDULER_STATIC) 82 struct ipi_endpoint_s *ipi_endpoint; 83 # endif 84 #endif 85 }; 86 69 87 /************************************************************************/ 70 88 71 89 #if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 72 90 73 91 /* scheduler root */ 74 static s ched_queue_root_t CPU_NAME_DECL(sched_root);92 static struct scheduler_s CPU_NAME_DECL(scheduler); 75 93 76 # if defined(CONFIG_HEXO_IPI)77 /* sleeping cpu list */78 79 CONTAINER_TYPE(sched_cls_queue, CLIST, struct sched_cls_item_s80 {81 CONTAINER_ENTRY_TYPE(CLIST) list_entry;82 }, list_entry);83 84 CONTAINER_FUNC(sched_cls_queue, CLIST, static inline, sched_cls_queue, list_entry);85 86 static sched_cls_queue_root_t cls_queue;87 88 static CPU_LOCAL struct sched_cls_item_s sched_cls_item;89 90 # define GET_CLS_FROM_ITEM(item) ((void*)((uintptr_t)(item) - (uintptr_t)&sched_cls_item))91 # endif /* IPI */92 93 94 /* return scheduler root */ 94 static inline s ched_queue_root_t*95 __sched _root(void)95 static inline struct scheduler_s * 96 __scheduler_get(void) 96 97 { 97 return & CPU_NAME_DECL(sched _root);98 return & CPU_NAME_DECL(scheduler); 98 99 } 99 100 100 static inline101 void __sched_context_push(struct sched_context_s *sched_ctx)102 {103 sched_queue_pushback(sched_ctx->root, sched_ctx);104 #if defined(CONFIG_HEXO_IPI)105 struct sched_cls_item_s *idle = sched_cls_queue_pop(&cls_queue);106 if ( idle ) {107 ipi_post(GET_CLS_FROM_ITEM(idle));108 sched_cls_queue_pushback(&cls_queue, idle);109 }110 #endif /* IPI */111 }112 113 101 /************************************************************************/ 114 102 115 103 #elif defined (CONFIG_MUTEK_SCHEDULER_STATIC) 116 104 117 105 /* scheduler root */ 118 static CPU_LOCAL s ched_queue_root_t sched_root;106 static CPU_LOCAL struct scheduler_s scheduler; 119 107 120 108 /* return scheduler root */ 121 static inline s ched_queue_root_t*122 __sched _root(void)109 static inline struct scheduler_s * 110 __scheduler_get(void) 123 111 { 124 return CPU_LOCAL_ADDR(sched _root);112 return CPU_LOCAL_ADDR(scheduler); 125 113 } 126 114 115 #endif 116 127 117 static inline 128 118 void __sched_context_push(struct sched_context_s *sched_ctx) 129 119 { 130 sched_queue_pushback(sched_ctx->root, sched_ctx); 120 struct scheduler_s *sched = sched_ctx->scheduler; 121 sched_queue_pushback(&sched->root, sched_ctx); 131 122 #if defined(CONFIG_HEXO_IPI) 132 ipi_post(sched_ctx->cpu_cls); 123 # if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 124 struct ipi_endpoint_s *idle = idle_cpu_queue_pop(&sched->idle_cpu); 125 if ( idle ) 126 ipi_post(idle); 127 /* Push it back, we may have a race otherwise */ 128 idle_cpu_queue_pushback(&sched->idle_cpu, idle); 129 # elif defined (CONFIG_MUTEK_SCHEDULER_STATIC) 130 ipi_post(sched->ipi_endpoint); 131 # endif 133 132 #endif /* IPI */ 134 133 } 135 134 136 #endif137 138 135 /************************************************************************/ 139 136 140 137 /* idle context runtime */ 141 138 static CONTEXT_ENTRY(sched_context_idle) 142 139 { 143 s ched_queue_root_t *root = __sched_root();140 struct scheduler_s *sched = __scheduler_get(); 144 141 145 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)146 sched_cls_queue_push(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));147 #endif148 149 142 /* release lock acquired in previous sched_context_switch() call */ 150 143 sched_unlock(); 151 144 cpu_interrupt_disable(); … … 160 153 /* CPU sleep waiting for interrupts */ 161 154 cpu_interrupt_wait(); 162 155 #elif defined(CONFIG_HEXO_IPI) 163 if (CPU_LOCAL_GET(ipi_icu_dev)) 156 if (ipi_endpoint_isvalid(CPU_LOCAL_ADDR(ipi_endpoint))) { 157 # if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) 158 idle_cpu_queue_push(&sched->idle_cpu, CPU_LOCAL_ADDR(ipi_endpoint)); 164 159 cpu_interrupt_wait(); 160 idle_cpu_queue_remove(&sched->idle_cpu, CPU_LOCAL_ADDR(ipi_endpoint)); 161 # endif 162 } 165 163 #endif 166 164 167 165 /* Let enough time for pending interrupts to execute and assume … … 169 167 reloading after interrupts execution. */ 170 168 cpu_interrupt_process(); 171 169 172 sched_queue_wrlock( root);170 sched_queue_wrlock(&sched->root); 173 171 174 if ((next = __sched_candidate_noidle( root)) != NULL)172 if ((next = __sched_candidate_noidle(&sched->root)) != NULL) 175 173 { 176 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)177 sched_cls_queue_remove(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));178 #endif179 174 context_switch_to(&next->context); 180 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)181 sched_cls_queue_push(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));182 #endif183 175 // printk("(c%i idle)", cpu_id()); 184 176 } 185 177 186 sched_queue_unlock( root);178 sched_queue_unlock(&sched->root); 187 179 } 188 180 } 189 181 … … 195 187 with interrupts disabled */ 196 188 void sched_context_switch(void) 197 189 { 198 s ched_queue_root_t *root = __sched_root();190 struct scheduler_s *sched = __scheduler_get(); 199 191 struct sched_context_s *next; 200 192 201 193 assert(!cpu_is_interruptible()); 202 194 203 sched_queue_wrlock( root);195 sched_queue_wrlock(&sched->root); 204 196 205 if ((next = __sched_candidate_noidle( root)))197 if ((next = __sched_candidate_noidle(&sched->root))) 206 198 { 207 199 /* push context back in running queue */ 208 sched_queue_nolock_pushback( root, CONTEXT_LOCAL_GET(sched_cur));200 sched_queue_nolock_pushback(&sched->root, CONTEXT_LOCAL_GET(sched_cur)); 209 201 context_switch_to(&next->context); 210 202 } 211 203 212 sched_queue_unlock( root);204 sched_queue_unlock(&sched->root); 213 205 } 214 206 215 207 /* Must be called with interrupts disabled and sched locked */ … … 220 212 struct sched_context_s *next; 221 213 222 214 /* get next running context */ 223 next = __sched_candidate( __sched_root());215 next = __sched_candidate(&__scheduler_get()->root); 224 216 context_jump_to(&next->context); 225 217 } 226 218 … … 228 220 { 229 221 assert(!cpu_is_interruptible()); 230 222 231 sched_queue_wrlock( __sched_root());223 sched_queue_wrlock(&__scheduler_get()->root); 232 224 } 233 225 234 226 void sched_unlock(void) 235 227 { 236 228 assert(!cpu_is_interruptible()); 237 229 238 sched_queue_unlock( __sched_root());230 sched_queue_unlock(&__scheduler_get()->root); 239 231 } 240 232 241 233 void sched_context_init(struct sched_context_s *sched_ctx) … … 245 237 sched_cur, sched_ctx); 246 238 247 239 sched_ctx->private = NULL; 248 sched_ctx-> root = __sched_root();240 sched_ctx->scheduler = __scheduler_get(); 249 241 250 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) && defined(CONFIG_HEXO_IPI)251 sched_ctx->cpu_cls = (void*)CPU_GET_CLS();252 #endif253 254 242 #ifdef CONFIG_MUTEK_SCHEDULER_CANDIDATE_FCN 255 243 sched_ctx->is_candidate = NULL; 256 244 #endif … … 268 256 void sched_wait_callback(sched_queue_root_t *queue, 269 257 void (*callback)(void *ctx), void *ctx) 270 258 { 271 s ched_queue_root_t *root = __sched_root();259 struct scheduler_s *sched = __scheduler_get(); 272 260 struct sched_context_s *next; 273 261 274 262 assert(!cpu_is_interruptible()); … … 278 266 callback(ctx); 279 267 280 268 /* get next running context */ 281 sched_queue_wrlock( root);282 next = __sched_candidate( root);269 sched_queue_wrlock(&sched->root); 270 next = __sched_candidate(&sched->root); 283 271 context_switch_to(&next->context); 284 sched_queue_unlock( root);272 sched_queue_unlock(&sched->root); 285 273 } 286 274 287 275 /* push current context in the 'queue', unlock it and switch to next … … 289 277 interrupts disabled */ 290 278 void sched_wait_unlock(sched_queue_root_t *queue) 291 279 { 292 s ched_queue_root_t *root = __sched_root();280 struct scheduler_s *sched = __scheduler_get(); 293 281 struct sched_context_s *next; 294 282 295 283 assert(!cpu_is_interruptible()); … … 299 287 sched_queue_unlock(queue); 300 288 301 289 /* get next running context */ 302 sched_queue_wrlock( root);303 next = __sched_candidate( root);290 sched_queue_wrlock(&sched->root); 291 next = __sched_candidate(&sched->root); 304 292 context_switch_to(&next->context); 305 sched_queue_unlock( root);293 sched_queue_unlock(&sched->root); 306 294 } 307 295 308 296 /* Switch to next context available in the 'root' queue, do not put … … 311 299 disabled */ 312 300 void sched_context_stop(void) 313 301 { 314 s ched_queue_root_t *root = __sched_root();302 struct scheduler_s *sched = __scheduler_get(); 315 303 struct sched_context_s *next; 316 304 317 305 assert(!cpu_is_interruptible()); 318 306 319 307 /* get next running context */ 320 sched_queue_wrlock( root);321 next = __sched_candidate( root);308 sched_queue_wrlock(&sched->root); 309 next = __sched_candidate(&sched->root); 322 310 context_switch_to(&next->context); 323 sched_queue_unlock( root);311 sched_queue_unlock(&sched->root); 324 312 } 325 313 326 314 /* Same as sched_context_stop but unlock given spinlock before switching */ 327 315 void sched_context_stop_unlock(lock_t *lock) 328 316 { 329 s ched_queue_root_t *root = __sched_root();317 struct scheduler_s *sched = __scheduler_get(); 330 318 struct sched_context_s *next; 331 319 332 320 assert(!cpu_is_interruptible()); 333 321 334 322 /* get next running context */ 335 sched_queue_wrlock( root);323 sched_queue_wrlock(&sched->root); 336 324 lock_release(lock); 337 next = __sched_candidate( root);325 next = __sched_candidate(&sched->root); 338 326 context_switch_to(&next->context); 339 sched_queue_unlock( root);327 sched_queue_unlock(&sched->root); 340 328 } 341 329 342 330 /* Must be called with interrupts disabled and queue locked */ … … 352 340 return sched_ctx; 353 341 } 354 342 343 static void __sched_init(struct scheduler_s *sched) 344 { 345 sched_queue_init(&sched->root); 346 #if defined(CONFIG_HEXO_IPI) 347 # if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) 348 idle_cpu_queue_init(&sched->idle_cpu); 349 # elif defined(CONFIG_MUTEK_SCHEDULER_STATIC) 350 sched->ipi_endpoint = NULL; 351 # endif 352 #endif 353 } 354 355 355 void sched_global_init(void) 356 356 { 357 357 #if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 358 # if defined(CONFIG_HEXO_IPI) 359 sched_cls_queue_init(&cls_queue); 360 # endif 361 sched_queue_init(__sched_root()); 358 __sched_init(__scheduler_get()); 362 359 #endif 363 360 } 364 361 … … 379 376 assert(err == 0); 380 377 381 378 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) 382 sched_queue_init(__sched_root());379 __sched_init(__scheduler_get()); 383 380 #endif 384 381 } 385 382 … … 422 419 void sched_affinity_add(struct sched_context_s *sched_ctx, cpu_id_t cpu) 423 420 { 424 421 void *cls = CPU_GET_CLS_ID(cpu); 425 #if defined(CONFIG_HEXO_IPI) 426 sched_ctx->cpu_cls = cls; 427 #endif 428 sched_ctx->root = CPU_LOCAL_CLS_ADDR(cls, sched_root); 422 sched_ctx->scheduler = CPU_LOCAL_CLS_ADDR(cls, scheduler); 429 423 } 430 424 431 425 void sched_affinity_remove(struct sched_context_s *sched_ctx, cpu_id_t cpu) -
mutek/fdt_consumer.c
119 119 priv->state = IN_CPUS; 120 120 #if defined(CONFIG_HEXO_IPI) 121 121 if ( priv->ipi_dev && priv->ipi_dev->drv ) { 122 dprintk("Preparing ipi dev\n"); 123 void *foo = dev_icu_setupipi(priv->ipi_dev, priv->ipi_no); 124 dprintk(" CPU %d using %p:%d as ipi device, cls=%p, priv=%p\n", 125 priv->cpuid, priv->ipi_dev, priv->ipi_no, 126 cpu_local_storage[priv->cpuid], foo); 127 ipi_hook_cpu(cpu_local_storage[priv->cpuid], priv->ipi_dev, foo); 122 ipi_hook_endpoint( 123 CPU_LOCAL_CLS_ADDR(cpu_local_storage[priv->cpuid], ipi_endpoint), 124 priv->ipi_dev, 125 priv->ipi_no); 128 126 } else { 129 127 dprintk(" No IPI dev for CPU %d\n", priv->cpuid); 130 128 } -
hexo/ipi.c
25 25 #include <device/device.h> 26 26 #include <hexo/ipi.h> 27 27 28 static CPU_LOCAL ipi_queue_root_t ipi_fifo = CONTAINER_ROOT_INITIALIZER(ipi_queue, DLIST); 29 CPU_LOCAL struct device_s *ipi_icu_dev = 0; 30 CPU_LOCAL void *ipi_cpu_id; 28 CONTAINER_FUNC(ipi_queue, DLIST, static inline, ipi_queue); 31 29 32 error_t ipi_post(void *cpu_cls) 30 CPU_LOCAL struct ipi_endpoint_s ipi_endpoint = {}; 31 32 error_t ipi_post(struct ipi_endpoint_s *endpoint) 33 33 { 34 struct device_s *icu = *CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_icu_dev);34 struct device_s *icu = endpoint->icu_dev; 35 35 36 if (!icu)37 return -EOPNOTSUPP;36 if (!icu) 37 return -EOPNOTSUPP; 38 38 39 return dev_icu_sendipi(icu, *CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_cpu_id));39 return dev_icu_sendipi(icu, endpoint->priv); 40 40 } 41 41 42 error_t ipi_post_rq( void *cpu_cls, struct ipi_request_s *rq)42 error_t ipi_post_rq(struct ipi_endpoint_s *endpoint, struct ipi_request_s *rq) 43 43 { 44 if (ipi_queue_pushback(CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_fifo), rq))45 return ipi_post(cpu_cls);44 if (ipi_queue_pushback(&endpoint->ipi_fifo, rq)) 45 return ipi_post(endpoint); 46 46 47 return -ENOMEM;47 return -ENOMEM; 48 48 } 49 49 50 50 void ipi_process_rq() 51 51 { 52 struct ipi_request_s *rq; 52 struct ipi_request_s *rq; 53 ipi_queue_root_t *fifo = &(CPU_LOCAL_ADDR(ipi_endpoint)->ipi_fifo); 53 54 54 while ((rq = ipi_queue_pop(CPU_LOCAL_ADDR(ipi_fifo))))55 rq->func(rq->private);55 while ((rq = ipi_queue_pop(fifo))) 56 rq->func(rq->private); 56 57 } 57 58 58 void ipi_hook_cpu(void *cpu_cls,59 struct device_s *ipi_icudev,60 void *privdata)59 error_t ipi_hook_endpoint(struct ipi_endpoint_s *endpoint, 60 struct device_s *ipi_dev, 61 uint_fast8_t ipi_no) 61 62 { 62 struct device_s **icu = CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_icu_dev); 63 void ** priv = CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_cpu_id); 63 void *foo = dev_icu_setupipi(ipi_dev, ipi_no); 64 endpoint->icu_dev = ipi_dev; 65 endpoint->priv = foo; 64 66 65 *icu = ipi_icudev; 66 *priv = privdata; 67 return 0; 67 68 } -
hexo/include/hexo/ipi.h
40 40 41 41 typedef IPI_MSG_FUNC(ipi_msg_func_t); 42 42 43 extern CPU_LOCAL struct device_s *ipi_icu_dev;44 extern CPU_LOCAL void *ipi_cpu_id;45 46 43 #define CONTAINER_LOCK_ipi_queue HEXO_SPIN 47 44 48 45 CONTAINER_TYPE(ipi_queue, DLIST, … … 53 50 ipi_queue_entry_t queue_entry; 54 51 }, queue_entry); 55 52 56 CONTAINER_FUNC(ipi_queue, DLIST, static inline, ipi_queue); 53 struct ipi_endpoint_s 54 { 55 struct device_s *icu_dev; 56 void *priv; 57 ipi_queue_root_t ipi_fifo; 58 #if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 59 CONTAINER_ENTRY_TYPE(CLIST) idle_cpu_queue_list_entry; 60 #endif 61 }; 57 62 63 extern CPU_LOCAL struct ipi_endpoint_s ipi_endpoint; 64 65 58 66 /** 59 Send an ipi to given processor. Processor is identified using its 60 cpu local storage pointer. 67 Send an ipi to given endpoint. 68 69 @param endpoint Pointer to ipi endpoint 61 70 @return zero if ipi was sent 62 @see #CPU_LOCAL_ID_ADDR63 71 */ 64 error_t ipi_post( void *cpu_cls);72 error_t ipi_post(struct ipi_endpoint_s *endpoint); 65 73 66 74 /** 67 75 Attach the given callback for execution on target processor and 68 send an ipi to given processor on success Processor is identified using its 69 cpu local storage pointer. 76 send an ipi to given endpoint. 70 77 78 @param endpoint Pointer to ipi endpoint 79 @param rq Request buffer 71 80 @return zero if message was attached and ipi sent 72 81 @see #CPU_LOCAL_ID_ADDR 73 82 */ 74 error_t ipi_post_rq( void *cpu_cls, struct ipi_request_s *rq);83 error_t ipi_post_rq(struct ipi_endpoint_s *endpoint, struct ipi_request_s *rq); 75 84 76 85 /** 77 Request processing of pending messages on current processor. Called from icu driver 86 Request processing of pending messages on current processor. Must 87 be called from icu driver 78 88 */ 79 89 void ipi_process_rq(); 80 90 81 91 /** 82 Setup a IPI device for a given CPU.92 Setup a IPI device for a given endpoint. 83 93 84 @param cpu_cls CPU's cls to hook up in85 @param ipi_ icudev Icudev handling the IPIs86 @param privdata Icudev private data returned by @ref dev_icu_setupipi94 @param endpoint IPI endpoint to set up 95 @param ipi_dev ICU device handling the IPI 96 @param ipi_no IPI number in ICU device @tt ipi_dev 87 97 */ 88 void ipi_hook_cpu(void *cpu_cls,89 struct device_s *ipi_icudev,90 void *privdata);98 error_t ipi_hook_endpoint(struct ipi_endpoint_s *endpoint, 99 struct device_s *ipi_dev, 100 uint_fast8_t ipi_no); 91 101 102 /** 103 Checks whether a given endpoint may receive IPIs. 104 105 @param endpoint IPI endpoint to check 106 @return whether endpoint may receive IPIs 107 */ 108 static inline 109 bool_t ipi_endpoint_isvalid(struct ipi_endpoint_s *endpoint) 110 { 111 return endpoint->icu_dev != NULL; 112 } 113 92 114 #endif 93 -
examples/common/build_options.conf
27 27 28 28 %section thumb 29 29 CONFIG_CPU_ARM_THUMB 30 31 %section ipi 32 CONFIG_HEXO_IPI -
examples/hello/hello.c
2 2 #include <pthread.h> 3 3 #include <mutek/printk.h> 4 4 5 #define THREADS 4 6 5 7 pthread_mutex_t m; 6 pthread_t a, b;8 pthread_t thread[THREADS]; 7 9 8 10 void *f(void *param) 9 11 { … … 19 21 void app_start() 20 22 { 21 23 pthread_mutex_init(&m, NULL); 22 pthread_create(&a, NULL, f, "Hello world\n"); 23 pthread_create(&b, NULL, f, "Hello world\n"); 24 size_t i; 25 for (i = 0; i < THREADS; ++i) { 26 pthread_create(&thread[i], NULL, f, "Hello world\n"); 27 } 24 28 } 25 29 -
examples/hello/config
7 7 # Application license 8 8 CONFIG_LICENSE_APP_LGPL 9 9 10 # Mutek features11 CONFIG_HEXO_IPI undefined12 13 10 # Libs 14 11 CONFIG_PTHREAD 15 12