Ticket #21: ipi.2.diff
File ipi.2.diff, 20.8 KB (added by , 15 years ago) |
---|
-
mutek/include/mutek/scheduler.h
50 50 CONTAINER_TYPE (sched_queue, DLIST, struct sched_context_s 51 51 { 52 52 CONTAINER_ENTRY_TYPE(DLIST) list_entry; 53 s ched_queue_root_t *root; //< keep track of associated scheduler queue53 struct scheduler_s *scheduler; //< keep track of associated scheduler queue 54 54 struct context_s context; //< execution context 55 55 56 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) && defined(CONFIG_HEXO_IPI)57 void *cpu_cls; //< used as cpu identifier for IPIs58 #endif59 60 56 void *private; 61 57 62 58 #ifdef CONFIG_MUTEK_SCHEDULER_MIGRATION_AFFINITY -
mutek/scheduler.c
66 66 return next; 67 67 } 68 68 69 /************************************************************************/ 69 #if defined(CONFIG_HEXO_IPI) 70 #define CONTAINER_LOCK_idle_cpu_queue HEXO_SPIN 71 CONTAINER_TYPE(idle_cpu_queue, CLIST, struct ipi_endpoint_s, idle_cpu_queue_list_entry); 72 CONTAINER_FUNC(idle_cpu_queue, CLIST, static inline, idle_cpu_queue, list_entry); 73 #endif 70 74 71 #if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 75 struct scheduler_s 76 { 77 sched_queue_root_t root; 78 #if defined(CONFIG_HEXO_IPI) && defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) 79 idle_cpu_queue_root_t idle_cpu; 80 #elif defined(CONFIG_HEXO_IPI) && defined(CONFIG_MUTEK_SCHEDULER_STATIC) 81 struct ipi_endpoint_s *ipi_endpoint; 82 #endif 83 }; 72 84 73 /* scheduler root */ 74 static sched_queue_root_t CPU_NAME_DECL(sched_root); 75 76 # if defined(CONFIG_HEXO_IPI) 77 /* sleeping cpu list */ 78 79 CONTAINER_TYPE(sched_cls_queue, CLIST, struct sched_cls_item_s 85 static inline struct ipi_endpoint_s *__sched_pop_ipi_endpoint(struct scheduler_s *sched) 80 86 { 81 CONTAINER_ENTRY_TYPE(CLIST) list_entry; 82 }, list_entry); 87 #if defined(CONFIG_HEXO_IPI) && defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 88 return idle_cpu_queue_pop(&sched->idle_cpu); 89 #elif defined(CONFIG_HEXO_IPI) && defined (CONFIG_MUTEK_SCHEDULER_STATIC) 90 return sched->ipi_endpoint; 91 #endif 92 return NULL; 93 } 83 94 84 CONTAINER_FUNC(sched_cls_queue, CLIST, static inline, sched_cls_queue, list_entry); 95 /************************************************************************/ 85 96 86 static sched_cls_queue_root_t cls_queue; 97 #if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 87 98 88 static CPU_LOCAL struct sched_cls_item_s sched_cls_item; 99 /* scheduler root */ 100 static struct scheduler_s CPU_NAME_DECL(scheduler); 89 101 90 # define GET_CLS_FROM_ITEM(item) ((void*)((uintptr_t)(item) - (uintptr_t)&sched_cls_item))91 # endif /* IPI */92 93 102 /* return scheduler root */ 94 static inline s ched_queue_root_t*95 __sched _root(void)103 static inline struct scheduler_s * 104 __scheduler_get(void) 96 105 { 97 return & CPU_NAME_DECL(sched _root);106 return & CPU_NAME_DECL(scheduler); 98 107 } 99 108 100 static inline101 void __sched_context_push(struct sched_context_s *sched_ctx)102 {103 sched_queue_pushback(sched_ctx->root, sched_ctx);104 #if defined(CONFIG_HEXO_IPI)105 struct sched_cls_item_s *idle = sched_cls_queue_pop(&cls_queue);106 if ( idle ) {107 ipi_post(GET_CLS_FROM_ITEM(idle));108 sched_cls_queue_pushback(&cls_queue, idle);109 }110 #endif /* IPI */111 }112 113 /************************************************************************/114 115 109 #elif defined (CONFIG_MUTEK_SCHEDULER_STATIC) 116 110 117 111 /* scheduler root */ 118 static CPU_LOCAL s ched_queue_root_t sched_root;112 static CPU_LOCAL struct scheduler_s scheduler; 119 113 120 114 /* return scheduler root */ 121 static inline s ched_queue_root_t*122 __sched _root(void)115 static inline struct scheduler_s * 116 __scheduler_get(void) 123 117 { 124 return CPU_LOCAL_ADDR(sched _root);118 return CPU_LOCAL_ADDR(scheduler); 125 119 } 126 120 121 #endif 122 123 /************************************************************************/ 124 127 125 static inline 128 126 void __sched_context_push(struct sched_context_s *sched_ctx) 129 127 { 130 sched_queue_pushback(sched_ctx->root, sched_ctx); 131 #if defined(CONFIG_HEXO_IPI) 132 ipi_post(sched_ctx->cpu_cls); 133 #endif /* IPI */ 128 struct scheduler_s *sched = sched_ctx->scheduler; 129 sched_queue_pushback(&sched->root, sched_ctx); 130 131 struct ipi_endpoint_s *idle = __sched_pop_ipi_endpoint(sched); 132 if ( idle ) 133 ipi_post(idle); 134 134 } 135 135 136 static inline void __sched_yield_cpu(struct scheduler_s *sched) 137 { 138 ensure( !cpu_is_interruptible() ); 139 140 #if !defined(CONFIG_ARCH_SMP) 141 /* CPU sleep waiting for interrupts */ 142 cpu_interrupt_wait(); 143 #else /* We are SMP */ 144 /* do not always make CPU sleep if SMP because context may be put 145 in running queue by an other cpu with no signalling. IPI is the 146 only way to solve the "problem". 147 */ 148 # if defined(CONFIG_HEXO_IPI) 149 # if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) 150 struct ipi_endpoint_s *ipi_e = CPU_LOCAL_ADDR(ipi_endpoint); 151 152 if ( ipi_endpoint_isvalid(ipi_e) ) { 153 idle_cpu_queue_pushback(&sched->idle_cpu, ipi_e); 154 cpu_interrupt_wait(); 155 /* We may receive an IPI, but device IRQs are also possible, 156 * so remove us preventively */ 157 idle_cpu_queue_remove(&sched->idle_cpu, ipi_e); 158 } 159 # else 160 cpu_interrupt_wait(); 161 # endif 162 # endif 136 163 #endif 164 } 137 165 138 /************************************************************************/139 166 140 167 /* idle context runtime */ 141 168 static CONTEXT_ENTRY(sched_context_idle) 142 169 { 143 s ched_queue_root_t *root = __sched_root();170 struct scheduler_s *sched = __scheduler_get(); 144 171 145 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)146 sched_cls_queue_push(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));147 #endif148 149 172 /* release lock acquired in previous sched_context_switch() call */ 150 173 sched_unlock(); 151 174 cpu_interrupt_disable(); … … 154 177 { 155 178 struct sched_context_s *next; 156 179 157 /* do not wait if several cpus are running because context may 158 be put in running queue by an other cpu with no interrupt */ 159 #if !defined(CONFIG_ARCH_SMP) 160 /* CPU sleep waiting for interrupts */ 161 cpu_interrupt_wait(); 162 #elif defined(CONFIG_HEXO_IPI) 163 if (CPU_LOCAL_GET(ipi_icu_dev)) 164 cpu_interrupt_wait(); 165 #endif 180 __sched_yield_cpu(sched); 166 181 167 182 /* Let enough time for pending interrupts to execute and assume 168 183 memory is clobbered to force scheduler root queue 169 184 reloading after interrupts execution. */ 170 185 cpu_interrupt_process(); 171 186 172 sched_queue_wrlock( root);187 sched_queue_wrlock(&sched->root); 173 188 174 if ((next = __sched_candidate_noidle( root)) != NULL)189 if ((next = __sched_candidate_noidle(&sched->root)) != NULL) 175 190 { 176 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)177 sched_cls_queue_remove(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));178 #endif179 191 context_switch_to(&next->context); 180 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)181 sched_cls_queue_push(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));182 #endif183 192 // printk("(c%i idle)", cpu_id()); 184 193 } 185 194 186 sched_queue_unlock( root);195 sched_queue_unlock(&sched->root); 187 196 } 188 197 } 189 198 … … 195 204 with interrupts disabled */ 196 205 void sched_context_switch(void) 197 206 { 198 s ched_queue_root_t *root = __sched_root();207 struct scheduler_s *sched = __scheduler_get(); 199 208 struct sched_context_s *next; 200 209 201 210 assert(!cpu_is_interruptible()); 202 211 203 sched_queue_wrlock( root);212 sched_queue_wrlock(&sched->root); 204 213 205 if ((next = __sched_candidate_noidle( root)))214 if ((next = __sched_candidate_noidle(&sched->root))) 206 215 { 207 216 /* push context back in running queue */ 208 sched_queue_nolock_pushback( root, CONTEXT_LOCAL_GET(sched_cur));217 sched_queue_nolock_pushback(&sched->root, CONTEXT_LOCAL_GET(sched_cur)); 209 218 context_switch_to(&next->context); 210 219 } 211 220 212 sched_queue_unlock( root);221 sched_queue_unlock(&sched->root); 213 222 } 214 223 215 224 /* Must be called with interrupts disabled and sched locked */ … … 220 229 struct sched_context_s *next; 221 230 222 231 /* get next running context */ 223 next = __sched_candidate( __sched_root());232 next = __sched_candidate(&__scheduler_get()->root); 224 233 context_jump_to(&next->context); 225 234 } 226 235 … … 228 237 { 229 238 assert(!cpu_is_interruptible()); 230 239 231 sched_queue_wrlock( __sched_root());240 sched_queue_wrlock(&__scheduler_get()->root); 232 241 } 233 242 234 243 void sched_unlock(void) 235 244 { 236 245 assert(!cpu_is_interruptible()); 237 246 238 sched_queue_unlock( __sched_root());247 sched_queue_unlock(&__scheduler_get()->root); 239 248 } 240 249 241 250 void sched_context_init(struct sched_context_s *sched_ctx) … … 245 254 sched_cur, sched_ctx); 246 255 247 256 sched_ctx->private = NULL; 248 sched_ctx-> root = __sched_root();257 sched_ctx->scheduler = __scheduler_get(); 249 258 250 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) && defined(CONFIG_HEXO_IPI)251 sched_ctx->cpu_cls = (void*)CPU_GET_CLS();252 #endif253 254 259 #ifdef CONFIG_MUTEK_SCHEDULER_CANDIDATE_FCN 255 260 sched_ctx->is_candidate = NULL; 256 261 #endif … … 268 273 void sched_wait_callback(sched_queue_root_t *queue, 269 274 void (*callback)(void *ctx), void *ctx) 270 275 { 271 s ched_queue_root_t *root = __sched_root();276 struct scheduler_s *sched = __scheduler_get(); 272 277 struct sched_context_s *next; 273 278 274 279 assert(!cpu_is_interruptible()); … … 278 283 callback(ctx); 279 284 280 285 /* get next running context */ 281 sched_queue_wrlock( root);282 next = __sched_candidate( root);286 sched_queue_wrlock(&sched->root); 287 next = __sched_candidate(&sched->root); 283 288 context_switch_to(&next->context); 284 sched_queue_unlock( root);289 sched_queue_unlock(&sched->root); 285 290 } 286 291 287 292 /* push current context in the 'queue', unlock it and switch to next … … 289 294 interrupts disabled */ 290 295 void sched_wait_unlock(sched_queue_root_t *queue) 291 296 { 292 s ched_queue_root_t *root = __sched_root();297 struct scheduler_s *sched = __scheduler_get(); 293 298 struct sched_context_s *next; 294 299 295 300 assert(!cpu_is_interruptible()); … … 299 304 sched_queue_unlock(queue); 300 305 301 306 /* get next running context */ 302 sched_queue_wrlock( root);303 next = __sched_candidate( root);307 sched_queue_wrlock(&sched->root); 308 next = __sched_candidate(&sched->root); 304 309 context_switch_to(&next->context); 305 sched_queue_unlock( root);310 sched_queue_unlock(&sched->root); 306 311 } 307 312 308 313 /* Switch to next context available in the 'root' queue, do not put … … 311 316 disabled */ 312 317 void sched_context_stop(void) 313 318 { 314 s ched_queue_root_t *root = __sched_root();319 struct scheduler_s *sched = __scheduler_get(); 315 320 struct sched_context_s *next; 316 321 317 322 assert(!cpu_is_interruptible()); 318 323 319 324 /* get next running context */ 320 sched_queue_wrlock( root);321 next = __sched_candidate( root);325 sched_queue_wrlock(&sched->root); 326 next = __sched_candidate(&sched->root); 322 327 context_switch_to(&next->context); 323 sched_queue_unlock( root);328 sched_queue_unlock(&sched->root); 324 329 } 325 330 326 331 /* Same as sched_context_stop but unlock given spinlock before switching */ 327 332 void sched_context_stop_unlock(lock_t *lock) 328 333 { 329 s ched_queue_root_t *root = __sched_root();334 struct scheduler_s *sched = __scheduler_get(); 330 335 struct sched_context_s *next; 331 336 332 337 assert(!cpu_is_interruptible()); 333 338 334 339 /* get next running context */ 335 sched_queue_wrlock( root);340 sched_queue_wrlock(&sched->root); 336 341 lock_release(lock); 337 next = __sched_candidate( root);342 next = __sched_candidate(&sched->root); 338 343 context_switch_to(&next->context); 339 sched_queue_unlock( root);344 sched_queue_unlock(&sched->root); 340 345 } 341 346 342 347 /* Must be called with interrupts disabled and queue locked */ … … 354 359 355 360 void sched_global_init(void) 356 361 { 357 #if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 362 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) 363 struct scheduler_s *sched = __scheduler_get(); 364 365 sched_queue_init(&sched->root); 358 366 # if defined(CONFIG_HEXO_IPI) 359 sched_cls_queue_init(&cls_queue);367 idle_cpu_queue_init(&sched->idle_cpu); 360 368 # endif 361 sched_queue_init(__sched_root());362 369 #endif 363 370 } 364 371 … … 378 385 379 386 assert(err == 0); 380 387 381 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) 382 sched_queue_init(__sched_root()); 388 #if defined(CONFIG_MUTEK_SCHEDULER_STATIC) 389 struct scheduler_s *sched = __scheduler_get(); 390 391 sched_queue_init(&sched->root); 392 # if defined(CONFIG_HEXO_IPI) 393 sched->ipi_endpoint = NULL; 394 # endif 383 395 #endif 384 396 } 385 397 … … 422 434 void sched_affinity_add(struct sched_context_s *sched_ctx, cpu_id_t cpu) 423 435 { 424 436 void *cls = CPU_GET_CLS_ID(cpu); 425 #if defined(CONFIG_HEXO_IPI) 426 sched_ctx->cpu_cls = cls; 427 #endif 428 sched_ctx->root = CPU_LOCAL_CLS_ADDR(cls, sched_root); 437 sched_ctx->scheduler = CPU_LOCAL_CLS_ADDR(cls, scheduler); 429 438 } 430 439 431 440 void sched_affinity_remove(struct sched_context_s *sched_ctx, cpu_id_t cpu) -
mutek/fdt_consumer.c
119 119 priv->state = IN_CPUS; 120 120 #if defined(CONFIG_HEXO_IPI) 121 121 if ( priv->ipi_dev && priv->ipi_dev->drv ) { 122 dprintk("Preparing ipi dev\n"); 123 void *foo = dev_icu_setupipi(priv->ipi_dev, priv->ipi_no); 124 dprintk(" CPU %d using %p:%d as ipi device, cls=%p, priv=%p\n", 125 priv->cpuid, priv->ipi_dev, priv->ipi_no, 126 cpu_local_storage[priv->cpuid], foo); 127 ipi_hook_cpu(cpu_local_storage[priv->cpuid], priv->ipi_dev, foo); 122 ipi_hook_endpoint( 123 CPU_LOCAL_CLS_ADDR(cpu_local_storage[priv->cpuid], ipi_endpoint), 124 priv->ipi_dev, 125 priv->ipi_no); 128 126 } else { 129 127 dprintk(" No IPI dev for CPU %d\n", priv->cpuid); 130 128 } -
hexo/ipi.c
25 25 #include <device/device.h> 26 26 #include <hexo/ipi.h> 27 27 28 static CPU_LOCAL ipi_queue_root_t ipi_fifo = CONTAINER_ROOT_INITIALIZER(ipi_queue, DLIST); 29 CPU_LOCAL struct device_s *ipi_icu_dev = 0; 30 CPU_LOCAL void *ipi_cpu_id; 28 CONTAINER_FUNC(ipi_queue, DLIST, static inline, ipi_queue); 31 29 32 error_t ipi_post(void *cpu_cls) 30 CPU_LOCAL struct ipi_endpoint_s ipi_endpoint = {}; 31 32 error_t ipi_post(struct ipi_endpoint_s *endpoint) 33 33 { 34 struct device_s *icu = *CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_icu_dev);34 struct device_s *icu = endpoint->icu_dev; 35 35 36 if (!icu)37 return -EOPNOTSUPP;36 if (!icu) 37 return -EOPNOTSUPP; 38 38 39 return dev_icu_sendipi(icu, *CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_cpu_id));39 return dev_icu_sendipi(icu, endpoint->priv); 40 40 } 41 41 42 error_t ipi_post_rq( void *cpu_cls, struct ipi_request_s *rq)42 error_t ipi_post_rq(struct ipi_endpoint_s *endpoint, struct ipi_request_s *rq) 43 43 { 44 if (ipi_queue_pushback(CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_fifo), rq))45 return ipi_post(cpu_cls);44 if (ipi_queue_pushback(&endpoint->ipi_fifo, rq)) 45 return ipi_post(endpoint); 46 46 47 return -ENOMEM;47 return -ENOMEM; 48 48 } 49 49 50 50 void ipi_process_rq() 51 51 { 52 struct ipi_request_s *rq; 52 struct ipi_request_s *rq; 53 ipi_queue_root_t *fifo = &(CPU_LOCAL_ADDR(ipi_endpoint)->ipi_fifo); 53 54 54 while ((rq = ipi_queue_pop(CPU_LOCAL_ADDR(ipi_fifo))))55 rq->func(rq->private);55 while ((rq = ipi_queue_pop(fifo))) 56 rq->func(rq->private); 56 57 } 57 58 58 void ipi_hook_cpu(void *cpu_cls,59 struct device_s *ipi_icudev,60 void *privdata)59 error_t ipi_hook_endpoint(struct ipi_endpoint_s *endpoint, 60 struct device_s *ipi_dev, 61 uint_fast8_t ipi_no) 61 62 { 62 struct device_s **icu = CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_icu_dev); 63 void ** priv = CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_cpu_id); 63 void *foo = dev_icu_setupipi(ipi_dev, ipi_no); 64 endpoint->icu_dev = ipi_dev; 65 endpoint->priv = foo; 64 66 65 *icu = ipi_icudev; 66 *priv = privdata; 67 return 0; 67 68 } -
hexo/include/hexo/ipi.h
40 40 41 41 typedef IPI_MSG_FUNC(ipi_msg_func_t); 42 42 43 extern CPU_LOCAL struct device_s *ipi_icu_dev;44 extern CPU_LOCAL void *ipi_cpu_id;45 46 43 #define CONTAINER_LOCK_ipi_queue HEXO_SPIN 47 44 48 45 CONTAINER_TYPE(ipi_queue, DLIST, … … 53 50 ipi_queue_entry_t queue_entry; 54 51 }, queue_entry); 55 52 56 CONTAINER_FUNC(ipi_queue, DLIST, static inline, ipi_queue); 53 struct ipi_endpoint_s 54 { 55 struct device_s *icu_dev; 56 void *priv; 57 ipi_queue_root_t ipi_fifo; 58 #if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION) 59 CONTAINER_ENTRY_TYPE(CLIST) idle_cpu_queue_list_entry; 60 #endif 61 }; 57 62 63 extern CPU_LOCAL struct ipi_endpoint_s ipi_endpoint; 64 65 58 66 /** 59 Send an ipi to given processor. Processor is identified using its 60 cpu local storage pointer. 67 Send an ipi to given endpoint. 68 69 @param endpoint Pointer to ipi endpoint 61 70 @return zero if ipi was sent 62 @see #CPU_LOCAL_ID_ADDR63 71 */ 64 error_t ipi_post( void *cpu_cls);72 error_t ipi_post(struct ipi_endpoint_s *endpoint); 65 73 66 74 /** 67 75 Attach the given callback for execution on target processor and 68 send an ipi to given processor on success Processor is identified using its 69 cpu local storage pointer. 76 send an ipi to given endpoint. 70 77 78 @param endpoint Pointer to ipi endpoint 79 @param rq Request buffer 71 80 @return zero if message was attached and ipi sent 72 81 @see #CPU_LOCAL_ID_ADDR 73 82 */ 74 error_t ipi_post_rq( void *cpu_cls, struct ipi_request_s *rq);83 error_t ipi_post_rq(struct ipi_endpoint_s *endpoint, struct ipi_request_s *rq); 75 84 76 85 /** 77 Request processing of pending messages on current processor. Called from icu driver 86 Request processing of pending messages on current processor. Must 87 be called from icu driver 78 88 */ 79 89 void ipi_process_rq(); 80 90 81 91 /** 82 Setup a IPI device for a given CPU.92 Setup a IPI device for a given endpoint. 83 93 84 @param cpu_cls CPU's cls to hook up in85 @param ipi_ icudev Icudev handling the IPIs86 @param privdata Icudev private data returned by @ref dev_icu_setupipi94 @param endpoint IPI endpoint to set up 95 @param ipi_dev ICU device handling the IPI 96 @param ipi_no IPI number in ICU device @tt ipi_dev 87 97 */ 88 void ipi_hook_cpu(void *cpu_cls,89 struct device_s *ipi_icudev,90 void *privdata);98 error_t ipi_hook_endpoint(struct ipi_endpoint_s *endpoint, 99 struct device_s *ipi_dev, 100 uint_fast8_t ipi_no); 91 101 102 /** 103 Checks whether a given endpoint may receive IPIs. 104 105 @param endpoint IPI endpoint to check 106 @return whether endpoint may receive IPIs 107 */ 108 static inline 109 bool_t ipi_endpoint_isvalid(struct ipi_endpoint_s *endpoint) 110 { 111 return endpoint != NULL && endpoint->icu_dev != NULL; 112 } 113 92 114 #endif 93 -
hexo/include/hexo/cpu.h
62 62 63 63 cpu_cycle_t cpu_cycle_count(void); 64 64 65 static inline 66 void cpu_cycle_wait(cpu_cycle_t delta) 67 { 68 delta += cpu_cycle_count(); 69 while ( cpu_cycle_count() < delta ) 70 ; 71 } 72 65 73 /** cpu trap instruction */ 66 74 void cpu_trap(); 67 75 -
arch/soclib/include/arch/hexo/lock.h
137 137 while (cpu_atomic_bit_testset(&lock->a, 0)) 138 138 assert(deadline-- > 0); 139 139 #else 140 cpu_atomic_bit_waitset(&lock->a, 0); 140 while (arch_lock_try(lock)) 141 cpu_cycle_wait(1000); 141 142 #endif 142 143 } 143 144 -
examples/common/build_options.conf
27 27 28 28 %section thumb 29 29 CONFIG_CPU_ARM_THUMB 30 31 %section ipi 32 CONFIG_HEXO_IPI -
examples/hello/hello.c
2 2 #include <pthread.h> 3 3 #include <mutek/printk.h> 4 4 5 #define THREADS 16 6 7 #define SPIN 0 8 9 #if SPIN 10 lock_t m; 11 #else 5 12 pthread_mutex_t m; 6 pthread_t a, b; 13 #endif 7 14 15 pthread_t thread[THREADS]; 16 8 17 void *f(void *param) 9 18 { 10 19 while (1) 11 20 { 21 #if SPIN 22 lock_spin(&m); 23 #else 12 24 pthread_mutex_lock(&m); 25 #endif 13 26 printk("(%s:%i) %s", cpu_type_name(), cpu_id(), param); 27 #if SPIN 28 lock_release(&m); 29 #else 14 30 pthread_mutex_unlock(&m); 31 #endif 15 32 pthread_yield(); 16 33 } 17 34 } 18 35 19 36 void app_start() 20 37 { 21 pthread_mutex_init(&m, NULL); 22 pthread_create(&a, NULL, f, "Hello world\n"); 23 pthread_create(&b, NULL, f, "Hello world\n"); 38 #if SPIN 39 lock_init(&m); 40 #else 41 pthread_mutex_init(&m, NULL); 42 #endif 43 size_t i; 44 for (i = 0; i < THREADS; ++i) 45 pthread_create(&thread[i], NULL, f, "Hello world\n"); 24 46 } 25 47 -
examples/hello/config
7 7 # Application license 8 8 CONFIG_LICENSE_APP_LGPL 9 9 10 # Mutek features11 CONFIG_HEXO_IPI undefined12 13 10 # Libs 14 11 CONFIG_PTHREAD 15 12