Ticket #21: ipi.diff

File ipi.diff, 18.3 KB (added by Nicolas Pouillon, 15 years ago)

Patch

  • mutek/include/mutek/scheduler.h

     
    5050CONTAINER_TYPE       (sched_queue, DLIST, struct sched_context_s
    5151{
    5252  CONTAINER_ENTRY_TYPE(DLIST) list_entry;
    53   sched_queue_root_t    *root;          //< keep track of associated scheduler queue
     53  struct scheduler_s *scheduler;                //< keep track of associated scheduler queue
    5454  struct context_s      context;        //< execution context
    5555
    56 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) && defined(CONFIG_HEXO_IPI)
    57   void                  *cpu_cls;       //< used as cpu identifier for IPIs
    58 #endif
    59 
    6056  void                  *private;
    6157
    6258#ifdef CONFIG_MUTEK_SCHEDULER_MIGRATION_AFFINITY
  • mutek/scheduler.c

     
    6666  return next;
    6767}
    6868
     69#if defined(CONFIG_HEXO_IPI)
     70#define CONTAINER_LOCK_idle_cpu_queue HEXO_SPIN
     71CONTAINER_TYPE(idle_cpu_queue, CLIST, struct ipi_endpoint_s, idle_cpu_queue_list_entry);
     72CONTAINER_FUNC(idle_cpu_queue, CLIST, static inline, idle_cpu_queue, list_entry);
     73#endif
     74
     75struct scheduler_s
     76{
     77    sched_queue_root_t root;
     78#if defined(CONFIG_HEXO_IPI)
     79# if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
     80    idle_cpu_queue_root_t idle_cpu;
     81# elif defined (CONFIG_MUTEK_SCHEDULER_STATIC)
     82    struct ipi_endpoint_s *ipi_endpoint;
     83# endif
     84#endif
     85};
     86
    6987/************************************************************************/
    7088
    7189#if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
    7290
    7391/* scheduler root */
    74 static sched_queue_root_t       CPU_NAME_DECL(sched_root);
     92static struct scheduler_s CPU_NAME_DECL(scheduler);
    7593
    76 # if defined(CONFIG_HEXO_IPI)
    77 /* sleeping cpu list */
    78 
    79 CONTAINER_TYPE(sched_cls_queue, CLIST, struct sched_cls_item_s
    80 {
    81   CONTAINER_ENTRY_TYPE(CLIST)   list_entry;
    82 }, list_entry);
    83 
    84 CONTAINER_FUNC(sched_cls_queue, CLIST, static inline, sched_cls_queue, list_entry);
    85 
    86 static sched_cls_queue_root_t cls_queue;
    87 
    88 static CPU_LOCAL struct sched_cls_item_s sched_cls_item;
    89 
    90 #  define GET_CLS_FROM_ITEM(item) ((void*)((uintptr_t)(item) - (uintptr_t)&sched_cls_item))
    91 # endif /* IPI */
    92 
    9394/* return scheduler root */
    94 static inline sched_queue_root_t *
    95 __sched_root(void)
     95static inline struct scheduler_s *
     96__scheduler_get(void)
    9697{
    97   return & CPU_NAME_DECL(sched_root);
     98  return & CPU_NAME_DECL(scheduler);
    9899}
    99100
    100 static inline
    101 void __sched_context_push(struct sched_context_s *sched_ctx)
    102 {
    103         sched_queue_pushback(sched_ctx->root, sched_ctx);
    104 #if defined(CONFIG_HEXO_IPI)
    105         struct sched_cls_item_s *idle = sched_cls_queue_pop(&cls_queue);
    106         if ( idle ) {
    107                 ipi_post(GET_CLS_FROM_ITEM(idle));
    108                 sched_cls_queue_pushback(&cls_queue, idle);
    109         }
    110 #endif /* IPI */
    111 }
    112 
    113101/************************************************************************/
    114102
    115103#elif defined (CONFIG_MUTEK_SCHEDULER_STATIC)
    116104
    117105/* scheduler root */
    118 static CPU_LOCAL sched_queue_root_t     sched_root;
     106static CPU_LOCAL struct scheduler_s     scheduler;
    119107
    120108/* return scheduler root */
    121 static inline sched_queue_root_t *
    122 __sched_root(void)
     109static inline struct scheduler_s *
     110__scheduler_get(void)
    123111{
    124   return CPU_LOCAL_ADDR(sched_root);
     112  return CPU_LOCAL_ADDR(scheduler);
    125113}
    126114
     115#endif
     116
    127117static inline
    128118void __sched_context_push(struct sched_context_s *sched_ctx)
    129119{
    130         sched_queue_pushback(sched_ctx->root, sched_ctx);
     120    struct scheduler_s *sched = sched_ctx->scheduler;
     121        sched_queue_pushback(&sched->root, sched_ctx);
    131122#if defined(CONFIG_HEXO_IPI)
    132         ipi_post(sched_ctx->cpu_cls);
     123# if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
     124        struct ipi_endpoint_s *idle = idle_cpu_queue_pop(&sched->idle_cpu);
     125        if ( idle )
     126                ipi_post(idle);
     127    /* Push it back, we may have a race otherwise */
     128    idle_cpu_queue_pushback(&sched->idle_cpu, idle);
     129# elif defined (CONFIG_MUTEK_SCHEDULER_STATIC)
     130    ipi_post(sched->ipi_endpoint);
     131# endif
    133132#endif /* IPI */
    134133}
    135134
    136 #endif
    137 
    138135/************************************************************************/
    139136
    140137/* idle context runtime */
    141138static CONTEXT_ENTRY(sched_context_idle)
    142139{
    143   sched_queue_root_t *root = __sched_root();
     140  struct scheduler_s *sched = __scheduler_get();
    144141
    145 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)
    146   sched_cls_queue_push(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));
    147 #endif
    148 
    149142  /* release lock acquired in previous sched_context_switch() call */
    150143  sched_unlock();
    151144  cpu_interrupt_disable();
     
    160153      /* CPU sleep waiting for interrupts */
    161154      cpu_interrupt_wait();
    162155#elif defined(CONFIG_HEXO_IPI)
    163       if (CPU_LOCAL_GET(ipi_icu_dev))
     156      if (ipi_endpoint_isvalid(CPU_LOCAL_ADDR(ipi_endpoint))) {
     157# if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION)
     158          idle_cpu_queue_push(&sched->idle_cpu, CPU_LOCAL_ADDR(ipi_endpoint));
    164159                  cpu_interrupt_wait();
     160          idle_cpu_queue_remove(&sched->idle_cpu, CPU_LOCAL_ADDR(ipi_endpoint));
     161# endif
     162      }
    165163#endif
    166164
    167165      /* Let enough time for pending interrupts to execute and assume
     
    169167         reloading after interrupts execution. */
    170168      cpu_interrupt_process();
    171169
    172       sched_queue_wrlock(root);
     170      sched_queue_wrlock(&sched->root);
    173171
    174       if ((next = __sched_candidate_noidle(root)) != NULL)
     172      if ((next = __sched_candidate_noidle(&sched->root)) != NULL)
    175173        {
    176 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)
    177           sched_cls_queue_remove(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));
    178 #endif
    179174          context_switch_to(&next->context);
    180 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)
    181           sched_cls_queue_push(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));
    182 #endif
    183175          //      printk("(c%i idle)", cpu_id());
    184176        }
    185177
    186       sched_queue_unlock(root);
     178      sched_queue_unlock(&sched->root);
    187179    }
    188180}
    189181
     
    195187   with interrupts disabled */
    196188void sched_context_switch(void)
    197189{
    198   sched_queue_root_t *root = __sched_root();
     190  struct scheduler_s *sched = __scheduler_get();
    199191  struct sched_context_s *next;
    200192
    201193  assert(!cpu_is_interruptible());
    202194
    203   sched_queue_wrlock(root);
     195  sched_queue_wrlock(&sched->root);
    204196
    205   if ((next = __sched_candidate_noidle(root)))
     197  if ((next = __sched_candidate_noidle(&sched->root)))
    206198    {
    207199      /* push context back in running queue */
    208       sched_queue_nolock_pushback(root, CONTEXT_LOCAL_GET(sched_cur));
     200      sched_queue_nolock_pushback(&sched->root, CONTEXT_LOCAL_GET(sched_cur));
    209201      context_switch_to(&next->context);
    210202    }
    211203
    212   sched_queue_unlock(root);
     204  sched_queue_unlock(&sched->root);
    213205}
    214206
    215207/* Must be called with interrupts disabled and sched locked */
     
    220212  struct sched_context_s        *next;
    221213
    222214  /* get next running context */
    223   next = __sched_candidate(__sched_root());
     215  next = __sched_candidate(&__scheduler_get()->root);
    224216  context_jump_to(&next->context);
    225217}
    226218
     
    228220{
    229221  assert(!cpu_is_interruptible());
    230222
    231   sched_queue_wrlock(__sched_root());
     223  sched_queue_wrlock(&__scheduler_get()->root);
    232224}
    233225
    234226void sched_unlock(void)
    235227{
    236228  assert(!cpu_is_interruptible());
    237229
    238   sched_queue_unlock(__sched_root());
     230  sched_queue_unlock(&__scheduler_get()->root);
    239231}
    240232
    241233void sched_context_init(struct sched_context_s *sched_ctx)
     
    245237                        sched_cur, sched_ctx);
    246238
    247239  sched_ctx->private = NULL;
    248   sched_ctx->root = __sched_root();
     240  sched_ctx->scheduler = __scheduler_get();
    249241
    250 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) && defined(CONFIG_HEXO_IPI)
    251   sched_ctx->cpu_cls = (void*)CPU_GET_CLS();
    252 #endif
    253 
    254242#ifdef CONFIG_MUTEK_SCHEDULER_CANDIDATE_FCN
    255243  sched_ctx->is_candidate = NULL;
    256244#endif
     
    268256void sched_wait_callback(sched_queue_root_t *queue,
    269257                         void (*callback)(void *ctx), void *ctx)
    270258{
    271   sched_queue_root_t *root = __sched_root();
     259  struct scheduler_s *sched = __scheduler_get();
    272260  struct sched_context_s *next;
    273261
    274262  assert(!cpu_is_interruptible());
     
    278266  callback(ctx);
    279267
    280268  /* get next running context */
    281   sched_queue_wrlock(root);
    282   next = __sched_candidate(root);
     269  sched_queue_wrlock(&sched->root);
     270  next = __sched_candidate(&sched->root);
    283271  context_switch_to(&next->context);
    284   sched_queue_unlock(root);
     272  sched_queue_unlock(&sched->root);
    285273}
    286274
    287275/* push current context in the 'queue', unlock it and switch to next
     
    289277   interrupts disabled */
    290278void sched_wait_unlock(sched_queue_root_t *queue)
    291279{
    292   sched_queue_root_t *root = __sched_root();
     280  struct scheduler_s *sched = __scheduler_get();
    293281  struct sched_context_s *next;
    294282
    295283  assert(!cpu_is_interruptible());
     
    299287  sched_queue_unlock(queue);
    300288
    301289  /* get next running context */
    302   sched_queue_wrlock(root);
    303   next = __sched_candidate(root);
     290  sched_queue_wrlock(&sched->root);
     291  next = __sched_candidate(&sched->root);
    304292  context_switch_to(&next->context);
    305   sched_queue_unlock(root);
     293  sched_queue_unlock(&sched->root);
    306294}
    307295
    308296/* Switch to next context available in the 'root' queue, do not put
     
    311299   disabled */
    312300void sched_context_stop(void)
    313301{
    314   sched_queue_root_t *root = __sched_root();
     302  struct scheduler_s *sched = __scheduler_get();
    315303  struct sched_context_s *next;
    316304
    317305  assert(!cpu_is_interruptible());
    318306
    319307  /* get next running context */
    320   sched_queue_wrlock(root);
    321   next = __sched_candidate(root);
     308  sched_queue_wrlock(&sched->root);
     309  next = __sched_candidate(&sched->root);
    322310  context_switch_to(&next->context);
    323   sched_queue_unlock(root);
     311  sched_queue_unlock(&sched->root);
    324312}
    325313
    326314/* Same as sched_context_stop but unlock given spinlock before switching */
    327315void sched_context_stop_unlock(lock_t *lock)
    328316{
    329   sched_queue_root_t *root = __sched_root();
     317  struct scheduler_s *sched = __scheduler_get();
    330318  struct sched_context_s *next;
    331319
    332320  assert(!cpu_is_interruptible());
    333321
    334322  /* get next running context */
    335   sched_queue_wrlock(root);
     323  sched_queue_wrlock(&sched->root);
    336324  lock_release(lock);
    337   next = __sched_candidate(root);
     325  next = __sched_candidate(&sched->root);
    338326  context_switch_to(&next->context);
    339   sched_queue_unlock(root);
     327  sched_queue_unlock(&sched->root);
    340328}
    341329
    342330/* Must be called with interrupts disabled and queue locked */
     
    352340  return sched_ctx;
    353341}
    354342
     343static void __sched_init(struct scheduler_s *sched)
     344{
     345  sched_queue_init(&sched->root);
     346#if defined(CONFIG_HEXO_IPI)
     347# if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION)
     348    idle_cpu_queue_init(&sched->idle_cpu);
     349# elif defined(CONFIG_MUTEK_SCHEDULER_STATIC)
     350    sched->ipi_endpoint = NULL;
     351# endif
     352#endif
     353}
     354
    355355void sched_global_init(void)
    356356{
    357357#if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
    358 # if defined(CONFIG_HEXO_IPI)
    359   sched_cls_queue_init(&cls_queue);
    360 # endif
    361   sched_queue_init(__sched_root());
     358    __sched_init(__scheduler_get());
    362359#endif
    363360}
    364361
     
    379376  assert(err == 0);
    380377
    381378#if defined (CONFIG_MUTEK_SCHEDULER_STATIC)
    382   sched_queue_init(__sched_root());
     379  __sched_init(__scheduler_get());
    383380#endif
    384381}
    385382
     
    422419void sched_affinity_add(struct sched_context_s *sched_ctx, cpu_id_t cpu)
    423420{
    424421  void *cls = CPU_GET_CLS_ID(cpu);
    425 #if defined(CONFIG_HEXO_IPI)
    426   sched_ctx->cpu_cls = cls;
    427 #endif
    428   sched_ctx->root = CPU_LOCAL_CLS_ADDR(cls, sched_root);
     422  sched_ctx->scheduler = CPU_LOCAL_CLS_ADDR(cls, scheduler);
    429423}
    430424
    431425void sched_affinity_remove(struct sched_context_s *sched_ctx, cpu_id_t cpu)
  • mutek/fdt_consumer.c

     
    119119                priv->state = IN_CPUS;
    120120#if defined(CONFIG_HEXO_IPI)
    121121                if ( priv->ipi_dev && priv->ipi_dev->drv ) {
    122                         dprintk("Preparing ipi dev\n");
    123                         void *foo = dev_icu_setupipi(priv->ipi_dev, priv->ipi_no);
    124                         dprintk("  CPU %d using %p:%d as ipi device, cls=%p, priv=%p\n",
    125                                    priv->cpuid, priv->ipi_dev, priv->ipi_no,
    126                                    cpu_local_storage[priv->cpuid], foo);
    127                         ipi_hook_cpu(cpu_local_storage[priv->cpuid], priv->ipi_dev, foo);
     122            ipi_hook_endpoint(
     123                CPU_LOCAL_CLS_ADDR(cpu_local_storage[priv->cpuid], ipi_endpoint),
     124                priv->ipi_dev,
     125                priv->ipi_no);
    128126                } else {
    129127                        dprintk("  No IPI dev for CPU %d\n", priv->cpuid);
    130128                }
  • hexo/ipi.c

     
    2525#include <device/device.h>
    2626#include <hexo/ipi.h>
    2727
    28 static CPU_LOCAL ipi_queue_root_t ipi_fifo = CONTAINER_ROOT_INITIALIZER(ipi_queue, DLIST);
    29 CPU_LOCAL struct device_s *ipi_icu_dev = 0;
    30 CPU_LOCAL void *ipi_cpu_id;
     28CONTAINER_FUNC(ipi_queue, DLIST, static inline, ipi_queue);
    3129
    32 error_t ipi_post(void *cpu_cls)
     30CPU_LOCAL struct ipi_endpoint_s ipi_endpoint = {};
     31
     32error_t ipi_post(struct ipi_endpoint_s *endpoint)
    3333{
    34   struct device_s *icu = *CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_icu_dev);
     34    struct device_s *icu = endpoint->icu_dev;
    3535
    36   if (!icu)
    37     return -EOPNOTSUPP;
     36    if (!icu)
     37        return -EOPNOTSUPP;
    3838
    39   return dev_icu_sendipi(icu, *CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_cpu_id));
     39    return dev_icu_sendipi(icu, endpoint->priv);
    4040}
    4141
    42 error_t ipi_post_rq(void *cpu_cls, struct ipi_request_s *rq)
     42error_t ipi_post_rq(struct ipi_endpoint_s *endpoint, struct ipi_request_s *rq)
    4343{
    44   if (ipi_queue_pushback(CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_fifo), rq))
    45     return ipi_post(cpu_cls);
     44    if (ipi_queue_pushback(&endpoint->ipi_fifo, rq))
     45        return ipi_post(endpoint);
    4646
    47   return -ENOMEM;
     47    return -ENOMEM;
    4848}
    4949
    5050void ipi_process_rq()
    5151{
    52   struct ipi_request_s *rq;
     52    struct ipi_request_s *rq;
     53    ipi_queue_root_t *fifo = &(CPU_LOCAL_ADDR(ipi_endpoint)->ipi_fifo);
    5354
    54   while ((rq = ipi_queue_pop(CPU_LOCAL_ADDR(ipi_fifo))))
    55     rq->func(rq->private);
     55    while ((rq = ipi_queue_pop(fifo)))
     56        rq->func(rq->private);
    5657}
    5758
    58 void ipi_hook_cpu(void *cpu_cls,
    59                                   struct device_s *ipi_icudev,
    60                                   void *privdata)
     59error_t ipi_hook_endpoint(struct ipi_endpoint_s *endpoint,
     60                          struct device_s *ipi_dev,
     61                          uint_fast8_t ipi_no)
    6162{
    62         struct device_s **icu = CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_icu_dev);
    63         void ** priv = CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_cpu_id);
     63    void *foo = dev_icu_setupipi(ipi_dev, ipi_no);
     64    endpoint->icu_dev = ipi_dev;
     65    endpoint->priv = foo;
    6466
    65         *icu = ipi_icudev;
    66         *priv = privdata;
     67    return 0;
    6768}
  • hexo/include/hexo/ipi.h

     
    4040
    4141typedef IPI_MSG_FUNC(ipi_msg_func_t);
    4242
    43 extern CPU_LOCAL struct device_s *ipi_icu_dev;
    44 extern CPU_LOCAL void *ipi_cpu_id;
    45 
    4643#define CONTAINER_LOCK_ipi_queue HEXO_SPIN
    4744
    4845CONTAINER_TYPE(ipi_queue, DLIST,
     
    5350  ipi_queue_entry_t queue_entry;
    5451}, queue_entry);
    5552
    56 CONTAINER_FUNC(ipi_queue, DLIST, static inline, ipi_queue);
     53struct ipi_endpoint_s
     54{
     55    struct device_s *icu_dev;
     56    void *priv;
     57    ipi_queue_root_t ipi_fifo;
     58#if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
     59    CONTAINER_ENTRY_TYPE(CLIST) idle_cpu_queue_list_entry;
     60#endif
     61};
    5762
     63extern CPU_LOCAL struct ipi_endpoint_s ipi_endpoint;
     64
     65
    5866/**
    59    Send an ipi to given processor. Processor is identified using its
    60    cpu local storage pointer.
     67   Send an ipi to given endpoint.
     68
     69   @param endpoint Pointer to ipi endpoint
    6170   @return zero if ipi was sent
    62    @see #CPU_LOCAL_ID_ADDR
    6371 */
    64 error_t ipi_post(void *cpu_cls);
     72error_t ipi_post(struct ipi_endpoint_s *endpoint);
    6573
    6674/**
    6775   Attach the given callback for execution on target processor and
    68    send an ipi to given processor on success  Processor is identified using its
    69    cpu local storage pointer.
     76   send an ipi to given endpoint.
    7077
     78   @param endpoint Pointer to ipi endpoint
     79   @param rq Request buffer
    7180   @return zero if message was attached and ipi sent
    7281   @see #CPU_LOCAL_ID_ADDR
    7382 */
    74 error_t ipi_post_rq(void *cpu_cls, struct ipi_request_s *rq);
     83error_t ipi_post_rq(struct ipi_endpoint_s *endpoint, struct ipi_request_s *rq);
    7584
    7685/**
    77    Request processing of pending messages on current processor. Called from icu driver
     86   Request processing of pending messages on current processor. Must
     87   be called from icu driver
    7888 */
    7989void ipi_process_rq();
    8090
    8191/**
    82    Setup a IPI device for a given CPU.
     92   Setup a IPI device for a given endpoint.
    8393
    84    @param cpu_cls CPU's cls to hook up in
    85    @param ipi_icudev Icudev handling the IPIs
    86    @param privdata Icudev private data returned by @ref dev_icu_setupipi
     94   @param endpoint IPI endpoint to set up
     95   @param ipi_dev ICU device handling the IPI
     96   @param ipi_no IPI number in ICU device @tt ipi_dev
    8797 */
    88 void ipi_hook_cpu(void *cpu_cls,
    89                                   struct device_s *ipi_icudev,
    90                                   void *privdata);
     98error_t ipi_hook_endpoint(struct ipi_endpoint_s *endpoint,
     99                          struct device_s *ipi_dev,
     100                          uint_fast8_t ipi_no);
    91101
     102/**
     103   Checks whether a given endpoint may receive IPIs.
     104
     105   @param endpoint IPI endpoint to check
     106   @return whether endpoint may receive IPIs
     107*/
     108static inline
     109bool_t ipi_endpoint_isvalid(struct ipi_endpoint_s *endpoint)
     110{
     111    return endpoint->icu_dev != NULL;
     112}
     113
    92114#endif
    93 
  • examples/common/build_options.conf

     
    2727
    2828%section thumb
    2929  CONFIG_CPU_ARM_THUMB
     30
     31%section ipi
     32  CONFIG_HEXO_IPI
  • examples/hello/hello.c

     
    22#include <pthread.h>
    33#include <mutek/printk.h>
    44
     5#define THREADS 4
     6
    57pthread_mutex_t m;
    6 pthread_t a, b;
     8pthread_t thread[THREADS];
    79
    810void *f(void *param)
    911{
     
    1921void app_start()
    2022{
    2123  pthread_mutex_init(&m, NULL);
    22   pthread_create(&a, NULL, f, "Hello world\n");
    23   pthread_create(&b, NULL, f, "Hello world\n");
     24  size_t i;
     25  for (i = 0; i < THREADS; ++i) {
     26      pthread_create(&thread[i], NULL, f, "Hello world\n");
     27  }
    2428}
    2529
  • examples/hello/config

     
    77  # Application license
    88  CONFIG_LICENSE_APP_LGPL
    99
    10   # Mutek features
    11   CONFIG_HEXO_IPI undefined
    12 
    1310  # Libs
    1411  CONFIG_PTHREAD
    1512