Ticket #21: ipi.2.diff

File ipi.2.diff, 20.8 KB (added by Nicolas Pouillon, 14 years ago)

v2

  • mutek/include/mutek/scheduler.h

     
    5050CONTAINER_TYPE       (sched_queue, DLIST, struct sched_context_s
    5151{
    5252  CONTAINER_ENTRY_TYPE(DLIST) list_entry;
    53   sched_queue_root_t    *root;          //< keep track of associated scheduler queue
     53  struct scheduler_s *scheduler;                //< keep track of associated scheduler queue
    5454  struct context_s      context;        //< execution context
    5555
    56 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) && defined(CONFIG_HEXO_IPI)
    57   void                  *cpu_cls;       //< used as cpu identifier for IPIs
    58 #endif
    59 
    6056  void                  *private;
    6157
    6258#ifdef CONFIG_MUTEK_SCHEDULER_MIGRATION_AFFINITY
  • mutek/scheduler.c

     
    6666  return next;
    6767}
    6868
    69 /************************************************************************/
     69#if defined(CONFIG_HEXO_IPI)
     70#define CONTAINER_LOCK_idle_cpu_queue HEXO_SPIN
     71CONTAINER_TYPE(idle_cpu_queue, CLIST, struct ipi_endpoint_s, idle_cpu_queue_list_entry);
     72CONTAINER_FUNC(idle_cpu_queue, CLIST, static inline, idle_cpu_queue, list_entry);
     73#endif
    7074
    71 #if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
     75struct scheduler_s
     76{
     77    sched_queue_root_t root;
     78#if defined(CONFIG_HEXO_IPI) && defined(CONFIG_MUTEK_SCHEDULER_MIGRATION)
     79    idle_cpu_queue_root_t idle_cpu;
     80#elif defined(CONFIG_HEXO_IPI) && defined(CONFIG_MUTEK_SCHEDULER_STATIC)
     81    struct ipi_endpoint_s *ipi_endpoint;
     82#endif
     83};
    7284
    73 /* scheduler root */
    74 static sched_queue_root_t       CPU_NAME_DECL(sched_root);
    75 
    76 # if defined(CONFIG_HEXO_IPI)
    77 /* sleeping cpu list */
    78 
    79 CONTAINER_TYPE(sched_cls_queue, CLIST, struct sched_cls_item_s
     85static inline struct ipi_endpoint_s *__sched_pop_ipi_endpoint(struct scheduler_s *sched)
    8086{
    81   CONTAINER_ENTRY_TYPE(CLIST)   list_entry;
    82 }, list_entry);
     87#if defined(CONFIG_HEXO_IPI) && defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
     88        return idle_cpu_queue_pop(&sched->idle_cpu);
     89#elif defined(CONFIG_HEXO_IPI) && defined (CONFIG_MUTEK_SCHEDULER_STATIC)
     90    return sched->ipi_endpoint;
     91#endif
     92    return NULL;
     93}
    8394
    84 CONTAINER_FUNC(sched_cls_queue, CLIST, static inline, sched_cls_queue, list_entry);
     95/************************************************************************/
    8596
    86 static sched_cls_queue_root_t cls_queue;
     97#if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
    8798
    88 static CPU_LOCAL struct sched_cls_item_s sched_cls_item;
     99/* scheduler root */
     100static struct scheduler_s CPU_NAME_DECL(scheduler);
    89101
    90 #  define GET_CLS_FROM_ITEM(item) ((void*)((uintptr_t)(item) - (uintptr_t)&sched_cls_item))
    91 # endif /* IPI */
    92 
    93102/* return scheduler root */
    94 static inline sched_queue_root_t *
    95 __sched_root(void)
     103static inline struct scheduler_s *
     104__scheduler_get(void)
    96105{
    97   return & CPU_NAME_DECL(sched_root);
     106  return & CPU_NAME_DECL(scheduler);
    98107}
    99108
    100 static inline
    101 void __sched_context_push(struct sched_context_s *sched_ctx)
    102 {
    103         sched_queue_pushback(sched_ctx->root, sched_ctx);
    104 #if defined(CONFIG_HEXO_IPI)
    105         struct sched_cls_item_s *idle = sched_cls_queue_pop(&cls_queue);
    106         if ( idle ) {
    107                 ipi_post(GET_CLS_FROM_ITEM(idle));
    108                 sched_cls_queue_pushback(&cls_queue, idle);
    109         }
    110 #endif /* IPI */
    111 }
    112 
    113 /************************************************************************/
    114 
    115109#elif defined (CONFIG_MUTEK_SCHEDULER_STATIC)
    116110
    117111/* scheduler root */
    118 static CPU_LOCAL sched_queue_root_t     sched_root;
     112static CPU_LOCAL struct scheduler_s     scheduler;
    119113
    120114/* return scheduler root */
    121 static inline sched_queue_root_t *
    122 __sched_root(void)
     115static inline struct scheduler_s *
     116__scheduler_get(void)
    123117{
    124   return CPU_LOCAL_ADDR(sched_root);
     118  return CPU_LOCAL_ADDR(scheduler);
    125119}
    126120
     121#endif
     122
     123/************************************************************************/
     124
    127125static inline
    128126void __sched_context_push(struct sched_context_s *sched_ctx)
    129127{
    130         sched_queue_pushback(sched_ctx->root, sched_ctx);
    131 #if defined(CONFIG_HEXO_IPI)
    132         ipi_post(sched_ctx->cpu_cls);
    133 #endif /* IPI */
     128    struct scheduler_s *sched = sched_ctx->scheduler;
     129        sched_queue_pushback(&sched->root, sched_ctx);
     130
     131    struct ipi_endpoint_s *idle = __sched_pop_ipi_endpoint(sched);
     132        if ( idle )
     133                ipi_post(idle);
    134134}
    135135
     136static inline void __sched_yield_cpu(struct scheduler_s *sched)
     137{
     138    ensure( !cpu_is_interruptible() );
     139
     140#if !defined(CONFIG_ARCH_SMP)
     141    /* CPU sleep waiting for interrupts */
     142    cpu_interrupt_wait();
     143#else /* We are SMP */
     144    /* do not always make CPU sleep if SMP because context may be put
     145       in running queue by an other cpu with no signalling. IPI is the
     146       only way to solve the "problem".
     147    */
     148# if defined(CONFIG_HEXO_IPI)
     149#  if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION)
     150    struct ipi_endpoint_s *ipi_e = CPU_LOCAL_ADDR(ipi_endpoint);
     151
     152    if ( ipi_endpoint_isvalid(ipi_e) ) {
     153        idle_cpu_queue_pushback(&sched->idle_cpu, ipi_e);
     154        cpu_interrupt_wait();
     155        /* We may receive an IPI, but device IRQs are also possible,
     156         * so remove us preventively */
     157        idle_cpu_queue_remove(&sched->idle_cpu, ipi_e);
     158    }
     159#  else
     160    cpu_interrupt_wait();
     161#  endif
     162# endif
    136163#endif
     164}
    137165
    138 /************************************************************************/
    139166
    140167/* idle context runtime */
    141168static CONTEXT_ENTRY(sched_context_idle)
    142169{
    143   sched_queue_root_t *root = __sched_root();
     170  struct scheduler_s *sched = __scheduler_get();
    144171
    145 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)
    146   sched_cls_queue_push(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));
    147 #endif
    148 
    149172  /* release lock acquired in previous sched_context_switch() call */
    150173  sched_unlock();
    151174  cpu_interrupt_disable();
     
    154177    {
    155178      struct sched_context_s    *next;
    156179
    157       /* do not wait if several cpus are running because context may
    158          be put in running queue by an other cpu with no interrupt */
    159 #if !defined(CONFIG_ARCH_SMP)
    160       /* CPU sleep waiting for interrupts */
    161       cpu_interrupt_wait();
    162 #elif defined(CONFIG_HEXO_IPI)
    163       if (CPU_LOCAL_GET(ipi_icu_dev))
    164                   cpu_interrupt_wait();
    165 #endif
     180      __sched_yield_cpu(sched);
    166181
    167182      /* Let enough time for pending interrupts to execute and assume
    168183         memory is clobbered to force scheduler root queue
    169184         reloading after interrupts execution. */
    170185      cpu_interrupt_process();
    171186
    172       sched_queue_wrlock(root);
     187      sched_queue_wrlock(&sched->root);
    173188
    174       if ((next = __sched_candidate_noidle(root)) != NULL)
     189      if ((next = __sched_candidate_noidle(&sched->root)) != NULL)
    175190        {
    176 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)
    177           sched_cls_queue_remove(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));
    178 #endif
    179191          context_switch_to(&next->context);
    180 #if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION) && defined(CONFIG_HEXO_IPI)
    181           sched_cls_queue_push(&cls_queue, CPU_LOCAL_ADDR(sched_cls_item));
    182 #endif
    183192          //      printk("(c%i idle)", cpu_id());
    184193        }
    185194
    186       sched_queue_unlock(root);
     195      sched_queue_unlock(&sched->root);
    187196    }
    188197}
    189198
     
    195204   with interrupts disabled */
    196205void sched_context_switch(void)
    197206{
    198   sched_queue_root_t *root = __sched_root();
     207  struct scheduler_s *sched = __scheduler_get();
    199208  struct sched_context_s *next;
    200209
    201210  assert(!cpu_is_interruptible());
    202211
    203   sched_queue_wrlock(root);
     212  sched_queue_wrlock(&sched->root);
    204213
    205   if ((next = __sched_candidate_noidle(root)))
     214  if ((next = __sched_candidate_noidle(&sched->root)))
    206215    {
    207216      /* push context back in running queue */
    208       sched_queue_nolock_pushback(root, CONTEXT_LOCAL_GET(sched_cur));
     217      sched_queue_nolock_pushback(&sched->root, CONTEXT_LOCAL_GET(sched_cur));
    209218      context_switch_to(&next->context);
    210219    }
    211220
    212   sched_queue_unlock(root);
     221  sched_queue_unlock(&sched->root);
    213222}
    214223
    215224/* Must be called with interrupts disabled and sched locked */
     
    220229  struct sched_context_s        *next;
    221230
    222231  /* get next running context */
    223   next = __sched_candidate(__sched_root());
     232  next = __sched_candidate(&__scheduler_get()->root);
    224233  context_jump_to(&next->context);
    225234}
    226235
     
    228237{
    229238  assert(!cpu_is_interruptible());
    230239
    231   sched_queue_wrlock(__sched_root());
     240  sched_queue_wrlock(&__scheduler_get()->root);
    232241}
    233242
    234243void sched_unlock(void)
    235244{
    236245  assert(!cpu_is_interruptible());
    237246
    238   sched_queue_unlock(__sched_root());
     247  sched_queue_unlock(&__scheduler_get()->root);
    239248}
    240249
    241250void sched_context_init(struct sched_context_s *sched_ctx)
     
    245254                        sched_cur, sched_ctx);
    246255
    247256  sched_ctx->private = NULL;
    248   sched_ctx->root = __sched_root();
     257  sched_ctx->scheduler = __scheduler_get();
    249258
    250 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC) && defined(CONFIG_HEXO_IPI)
    251   sched_ctx->cpu_cls = (void*)CPU_GET_CLS();
    252 #endif
    253 
    254259#ifdef CONFIG_MUTEK_SCHEDULER_CANDIDATE_FCN
    255260  sched_ctx->is_candidate = NULL;
    256261#endif
     
    268273void sched_wait_callback(sched_queue_root_t *queue,
    269274                         void (*callback)(void *ctx), void *ctx)
    270275{
    271   sched_queue_root_t *root = __sched_root();
     276  struct scheduler_s *sched = __scheduler_get();
    272277  struct sched_context_s *next;
    273278
    274279  assert(!cpu_is_interruptible());
     
    278283  callback(ctx);
    279284
    280285  /* get next running context */
    281   sched_queue_wrlock(root);
    282   next = __sched_candidate(root);
     286  sched_queue_wrlock(&sched->root);
     287  next = __sched_candidate(&sched->root);
    283288  context_switch_to(&next->context);
    284   sched_queue_unlock(root);
     289  sched_queue_unlock(&sched->root);
    285290}
    286291
    287292/* push current context in the 'queue', unlock it and switch to next
     
    289294   interrupts disabled */
    290295void sched_wait_unlock(sched_queue_root_t *queue)
    291296{
    292   sched_queue_root_t *root = __sched_root();
     297  struct scheduler_s *sched = __scheduler_get();
    293298  struct sched_context_s *next;
    294299
    295300  assert(!cpu_is_interruptible());
     
    299304  sched_queue_unlock(queue);
    300305
    301306  /* get next running context */
    302   sched_queue_wrlock(root);
    303   next = __sched_candidate(root);
     307  sched_queue_wrlock(&sched->root);
     308  next = __sched_candidate(&sched->root);
    304309  context_switch_to(&next->context);
    305   sched_queue_unlock(root);
     310  sched_queue_unlock(&sched->root);
    306311}
    307312
    308313/* Switch to next context available in the 'root' queue, do not put
     
    311316   disabled */
    312317void sched_context_stop(void)
    313318{
    314   sched_queue_root_t *root = __sched_root();
     319  struct scheduler_s *sched = __scheduler_get();
    315320  struct sched_context_s *next;
    316321
    317322  assert(!cpu_is_interruptible());
    318323
    319324  /* get next running context */
    320   sched_queue_wrlock(root);
    321   next = __sched_candidate(root);
     325  sched_queue_wrlock(&sched->root);
     326  next = __sched_candidate(&sched->root);
    322327  context_switch_to(&next->context);
    323   sched_queue_unlock(root);
     328  sched_queue_unlock(&sched->root);
    324329}
    325330
    326331/* Same as sched_context_stop but unlock given spinlock before switching */
    327332void sched_context_stop_unlock(lock_t *lock)
    328333{
    329   sched_queue_root_t *root = __sched_root();
     334  struct scheduler_s *sched = __scheduler_get();
    330335  struct sched_context_s *next;
    331336
    332337  assert(!cpu_is_interruptible());
    333338
    334339  /* get next running context */
    335   sched_queue_wrlock(root);
     340  sched_queue_wrlock(&sched->root);
    336341  lock_release(lock);
    337   next = __sched_candidate(root);
     342  next = __sched_candidate(&sched->root);
    338343  context_switch_to(&next->context);
    339   sched_queue_unlock(root);
     344  sched_queue_unlock(&sched->root);
    340345}
    341346
    342347/* Must be called with interrupts disabled and queue locked */
     
    354359
    355360void sched_global_init(void)
    356361{
    357 #if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
     362#if defined(CONFIG_MUTEK_SCHEDULER_MIGRATION)
     363    struct scheduler_s *sched = __scheduler_get();
     364
     365    sched_queue_init(&sched->root);
    358366# if defined(CONFIG_HEXO_IPI)
    359   sched_cls_queue_init(&cls_queue);
     367    idle_cpu_queue_init(&sched->idle_cpu);
    360368# endif
    361   sched_queue_init(__sched_root());
    362369#endif
    363370}
    364371
     
    378385
    379386  assert(err == 0);
    380387
    381 #if defined (CONFIG_MUTEK_SCHEDULER_STATIC)
    382   sched_queue_init(__sched_root());
     388#if defined(CONFIG_MUTEK_SCHEDULER_STATIC)
     389    struct scheduler_s *sched = __scheduler_get();
     390
     391    sched_queue_init(&sched->root);
     392# if defined(CONFIG_HEXO_IPI)
     393    sched->ipi_endpoint = NULL;
     394# endif
    383395#endif
    384396}
    385397
     
    422434void sched_affinity_add(struct sched_context_s *sched_ctx, cpu_id_t cpu)
    423435{
    424436  void *cls = CPU_GET_CLS_ID(cpu);
    425 #if defined(CONFIG_HEXO_IPI)
    426   sched_ctx->cpu_cls = cls;
    427 #endif
    428   sched_ctx->root = CPU_LOCAL_CLS_ADDR(cls, sched_root);
     437  sched_ctx->scheduler = CPU_LOCAL_CLS_ADDR(cls, scheduler);
    429438}
    430439
    431440void sched_affinity_remove(struct sched_context_s *sched_ctx, cpu_id_t cpu)
  • mutek/fdt_consumer.c

     
    119119                priv->state = IN_CPUS;
    120120#if defined(CONFIG_HEXO_IPI)
    121121                if ( priv->ipi_dev && priv->ipi_dev->drv ) {
    122                         dprintk("Preparing ipi dev\n");
    123                         void *foo = dev_icu_setupipi(priv->ipi_dev, priv->ipi_no);
    124                         dprintk("  CPU %d using %p:%d as ipi device, cls=%p, priv=%p\n",
    125                                    priv->cpuid, priv->ipi_dev, priv->ipi_no,
    126                                    cpu_local_storage[priv->cpuid], foo);
    127                         ipi_hook_cpu(cpu_local_storage[priv->cpuid], priv->ipi_dev, foo);
     122            ipi_hook_endpoint(
     123                CPU_LOCAL_CLS_ADDR(cpu_local_storage[priv->cpuid], ipi_endpoint),
     124                priv->ipi_dev,
     125                priv->ipi_no);
    128126                } else {
    129127                        dprintk("  No IPI dev for CPU %d\n", priv->cpuid);
    130128                }
  • hexo/ipi.c

     
    2525#include <device/device.h>
    2626#include <hexo/ipi.h>
    2727
    28 static CPU_LOCAL ipi_queue_root_t ipi_fifo = CONTAINER_ROOT_INITIALIZER(ipi_queue, DLIST);
    29 CPU_LOCAL struct device_s *ipi_icu_dev = 0;
    30 CPU_LOCAL void *ipi_cpu_id;
     28CONTAINER_FUNC(ipi_queue, DLIST, static inline, ipi_queue);
    3129
    32 error_t ipi_post(void *cpu_cls)
     30CPU_LOCAL struct ipi_endpoint_s ipi_endpoint = {};
     31
     32error_t ipi_post(struct ipi_endpoint_s *endpoint)
    3333{
    34   struct device_s *icu = *CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_icu_dev);
     34    struct device_s *icu = endpoint->icu_dev;
    3535
    36   if (!icu)
    37     return -EOPNOTSUPP;
     36    if (!icu)
     37        return -EOPNOTSUPP;
    3838
    39   return dev_icu_sendipi(icu, *CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_cpu_id));
     39    return dev_icu_sendipi(icu, endpoint->priv);
    4040}
    4141
    42 error_t ipi_post_rq(void *cpu_cls, struct ipi_request_s *rq)
     42error_t ipi_post_rq(struct ipi_endpoint_s *endpoint, struct ipi_request_s *rq)
    4343{
    44   if (ipi_queue_pushback(CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_fifo), rq))
    45     return ipi_post(cpu_cls);
     44    if (ipi_queue_pushback(&endpoint->ipi_fifo, rq))
     45        return ipi_post(endpoint);
    4646
    47   return -ENOMEM;
     47    return -ENOMEM;
    4848}
    4949
    5050void ipi_process_rq()
    5151{
    52   struct ipi_request_s *rq;
     52    struct ipi_request_s *rq;
     53    ipi_queue_root_t *fifo = &(CPU_LOCAL_ADDR(ipi_endpoint)->ipi_fifo);
    5354
    54   while ((rq = ipi_queue_pop(CPU_LOCAL_ADDR(ipi_fifo))))
    55     rq->func(rq->private);
     55    while ((rq = ipi_queue_pop(fifo)))
     56        rq->func(rq->private);
    5657}
    5758
    58 void ipi_hook_cpu(void *cpu_cls,
    59                                   struct device_s *ipi_icudev,
    60                                   void *privdata)
     59error_t ipi_hook_endpoint(struct ipi_endpoint_s *endpoint,
     60                          struct device_s *ipi_dev,
     61                          uint_fast8_t ipi_no)
    6162{
    62         struct device_s **icu = CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_icu_dev);
    63         void ** priv = CPU_LOCAL_CLS_ADDR(cpu_cls, ipi_cpu_id);
     63    void *foo = dev_icu_setupipi(ipi_dev, ipi_no);
     64    endpoint->icu_dev = ipi_dev;
     65    endpoint->priv = foo;
    6466
    65         *icu = ipi_icudev;
    66         *priv = privdata;
     67    return 0;
    6768}
  • hexo/include/hexo/ipi.h

     
    4040
    4141typedef IPI_MSG_FUNC(ipi_msg_func_t);
    4242
    43 extern CPU_LOCAL struct device_s *ipi_icu_dev;
    44 extern CPU_LOCAL void *ipi_cpu_id;
    45 
    4643#define CONTAINER_LOCK_ipi_queue HEXO_SPIN
    4744
    4845CONTAINER_TYPE(ipi_queue, DLIST,
     
    5350  ipi_queue_entry_t queue_entry;
    5451}, queue_entry);
    5552
    56 CONTAINER_FUNC(ipi_queue, DLIST, static inline, ipi_queue);
     53struct ipi_endpoint_s
     54{
     55    struct device_s *icu_dev;
     56    void *priv;
     57    ipi_queue_root_t ipi_fifo;
     58#if defined (CONFIG_MUTEK_SCHEDULER_MIGRATION)
     59    CONTAINER_ENTRY_TYPE(CLIST) idle_cpu_queue_list_entry;
     60#endif
     61};
    5762
     63extern CPU_LOCAL struct ipi_endpoint_s ipi_endpoint;
     64
     65
    5866/**
    59    Send an ipi to given processor. Processor is identified using its
    60    cpu local storage pointer.
     67   Send an ipi to given endpoint.
     68
     69   @param endpoint Pointer to ipi endpoint
    6170   @return zero if ipi was sent
    62    @see #CPU_LOCAL_ID_ADDR
    6371 */
    64 error_t ipi_post(void *cpu_cls);
     72error_t ipi_post(struct ipi_endpoint_s *endpoint);
    6573
    6674/**
    6775   Attach the given callback for execution on target processor and
    68    send an ipi to given processor on success  Processor is identified using its
    69    cpu local storage pointer.
     76   send an ipi to given endpoint.
    7077
     78   @param endpoint Pointer to ipi endpoint
     79   @param rq Request buffer
    7180   @return zero if message was attached and ipi sent
    7281   @see #CPU_LOCAL_ID_ADDR
    7382 */
    74 error_t ipi_post_rq(void *cpu_cls, struct ipi_request_s *rq);
     83error_t ipi_post_rq(struct ipi_endpoint_s *endpoint, struct ipi_request_s *rq);
    7584
    7685/**
    77    Request processing of pending messages on current processor. Called from icu driver
     86   Request processing of pending messages on current processor. Must
     87   be called from icu driver
    7888 */
    7989void ipi_process_rq();
    8090
    8191/**
    82    Setup a IPI device for a given CPU.
     92   Setup a IPI device for a given endpoint.
    8393
    84    @param cpu_cls CPU's cls to hook up in
    85    @param ipi_icudev Icudev handling the IPIs
    86    @param privdata Icudev private data returned by @ref dev_icu_setupipi
     94   @param endpoint IPI endpoint to set up
     95   @param ipi_dev ICU device handling the IPI
     96   @param ipi_no IPI number in ICU device @tt ipi_dev
    8797 */
    88 void ipi_hook_cpu(void *cpu_cls,
    89                                   struct device_s *ipi_icudev,
    90                                   void *privdata);
     98error_t ipi_hook_endpoint(struct ipi_endpoint_s *endpoint,
     99                          struct device_s *ipi_dev,
     100                          uint_fast8_t ipi_no);
    91101
     102/**
     103   Checks whether a given endpoint may receive IPIs.
     104
     105   @param endpoint IPI endpoint to check
     106   @return whether endpoint may receive IPIs
     107*/
     108static inline
     109bool_t ipi_endpoint_isvalid(struct ipi_endpoint_s *endpoint)
     110{
     111    return endpoint != NULL && endpoint->icu_dev != NULL;
     112}
     113
    92114#endif
    93 
  • hexo/include/hexo/cpu.h

     
    6262
    6363cpu_cycle_t cpu_cycle_count(void);
    6464
     65static inline
     66void cpu_cycle_wait(cpu_cycle_t delta)
     67{
     68    delta += cpu_cycle_count();
     69    while ( cpu_cycle_count() < delta )
     70        ;
     71}
     72
    6573/** cpu trap instruction */
    6674void cpu_trap();
    6775
  • arch/soclib/include/arch/hexo/lock.h

     
    137137  while (cpu_atomic_bit_testset(&lock->a, 0))
    138138    assert(deadline-- > 0);
    139139#else
    140   cpu_atomic_bit_waitset(&lock->a, 0);
     140  while (arch_lock_try(lock))
     141      cpu_cycle_wait(1000);
    141142#endif
    142143}
    143144
  • examples/common/build_options.conf

     
    2727
    2828%section thumb
    2929  CONFIG_CPU_ARM_THUMB
     30
     31%section ipi
     32  CONFIG_HEXO_IPI
  • examples/hello/hello.c

     
    22#include <pthread.h>
    33#include <mutek/printk.h>
    44
     5#define THREADS 16
     6
     7#define SPIN 0
     8
     9#if SPIN
     10lock_t m;
     11#else
    512pthread_mutex_t m;
    6 pthread_t a, b;
     13#endif
    714
     15pthread_t thread[THREADS];
     16
    817void *f(void *param)
    918{
    1019  while (1)
    1120    {
     21#if SPIN
     22      lock_spin(&m);
     23#else
    1224      pthread_mutex_lock(&m);
     25#endif
    1326      printk("(%s:%i) %s", cpu_type_name(), cpu_id(), param);
     27#if SPIN
     28      lock_release(&m);
     29#else
    1430      pthread_mutex_unlock(&m);
     31#endif
    1532      pthread_yield();
    1633    }
    1734}
    1835
    1936void app_start()
    2037{
    21   pthread_mutex_init(&m, NULL);
    22   pthread_create(&a, NULL, f, "Hello world\n");
    23   pthread_create(&b, NULL, f, "Hello world\n");
     38#if SPIN
     39      lock_init(&m);
     40#else
     41      pthread_mutex_init(&m, NULL);
     42#endif
     43  size_t i;
     44  for (i = 0; i < THREADS; ++i)
     45      pthread_create(&thread[i], NULL, f, "Hello world\n");
    2446}
    2547
  • examples/hello/config

     
    77  # Application license
    88  CONFIG_LICENSE_APP_LGPL
    99
    10   # Mutek features
    11   CONFIG_HEXO_IPI undefined
    12 
    1310  # Libs
    1411  CONFIG_PTHREAD
    1512