Lines Matching full:spu

2 /* sched.c - SPU scheduler.
34 #include <asm/spu.h>
60 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
68 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
133 /* Save the current cpu id for spu interrupt routing. */ in __spu_update_sched_info()
142 node = ctx->spu->node; in spu_update_sched_info()
186 struct spu *spu; in do_notify_spus_active() local
189 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in do_notify_spus_active()
190 if (spu->alloc_state != SPU_FREE) { in do_notify_spus_active()
191 struct spu_context *ctx = spu->ctx; in do_notify_spus_active()
203 * spu_bind_context - bind spu context to physical spu
204 * @spu: physical spu to bind to
207 static void spu_bind_context(struct spu *spu, struct spu_context *ctx) in spu_bind_context() argument
209 spu_context_trace(spu_bind_context__enter, ctx, spu); in spu_bind_context()
214 atomic_inc(&cbe_spu_info[spu->node].reserved_spus); in spu_bind_context()
216 ctx->stats.slb_flt_base = spu->stats.slb_flt; in spu_bind_context()
217 ctx->stats.class2_intr_base = spu->stats.class2_intr; in spu_bind_context()
219 spu_associate_mm(spu, ctx->owner); in spu_bind_context()
221 spin_lock_irq(&spu->register_lock); in spu_bind_context()
222 spu->ctx = ctx; in spu_bind_context()
223 spu->flags = 0; in spu_bind_context()
224 ctx->spu = spu; in spu_bind_context()
226 spu->pid = current->pid; in spu_bind_context()
227 spu->tgid = current->tgid; in spu_bind_context()
228 spu->ibox_callback = spufs_ibox_callback; in spu_bind_context()
229 spu->wbox_callback = spufs_wbox_callback; in spu_bind_context()
230 spu->stop_callback = spufs_stop_callback; in spu_bind_context()
231 spu->mfc_callback = spufs_mfc_callback; in spu_bind_context()
232 spin_unlock_irq(&spu->register_lock); in spu_bind_context()
236 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); in spu_bind_context()
237 spu_restore(&ctx->csa, spu); in spu_bind_context()
238 spu->timestamp = jiffies; in spu_bind_context()
247 static inline int sched_spu(struct spu *spu) in sched_spu() argument
249 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex)); in sched_spu()
251 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); in sched_spu()
288 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, in aff_ref_location()
291 struct spu *spu; in aff_ref_location() local
295 * TODO: A better algorithm could be used to find a good spu to be in aff_ref_location()
317 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in aff_ref_location()
318 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset in aff_ref_location()
319 && spu->ctx->gang->aff_ref_spu) in aff_ref_location()
320 available_spus -= spu->ctx->gang->contexts; in aff_ref_location()
328 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in aff_ref_location()
329 if ((!mem_aff || spu->has_mem_affinity) && in aff_ref_location()
330 sched_spu(spu)) { in aff_ref_location()
332 return spu; in aff_ref_location()
363 static struct spu *ctx_location(struct spu *ref, int offset, int node) in ctx_location()
365 struct spu *spu; in ctx_location() local
367 spu = NULL; in ctx_location()
369 list_for_each_entry(spu, ref->aff_list.prev, aff_list) { in ctx_location()
370 BUG_ON(spu->node != node); in ctx_location()
373 if (sched_spu(spu)) in ctx_location()
377 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) { in ctx_location()
378 BUG_ON(spu->node != node); in ctx_location()
381 if (sched_spu(spu)) in ctx_location()
386 return spu; in ctx_location()
391 * It returns the spu ptr on which the context must run.
415 * spu_unbind_context - unbind spu context from physical spu
416 * @spu: physical spu to unbind from
419 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) in spu_unbind_context() argument
423 spu_context_trace(spu_unbind_context__enter, ctx, spu); in spu_unbind_context()
427 if (spu->ctx->flags & SPU_CREATE_NOSCHED) in spu_unbind_context()
428 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); in spu_unbind_context()
432 * If ctx->gang->aff_sched_count is positive, SPU affinity is in spu_unbind_context()
439 spu_save(&ctx->csa, spu); in spu_unbind_context()
440 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); in spu_unbind_context()
442 spin_lock_irq(&spu->register_lock); in spu_unbind_context()
443 spu->timestamp = jiffies; in spu_unbind_context()
445 spu->ibox_callback = NULL; in spu_unbind_context()
446 spu->wbox_callback = NULL; in spu_unbind_context()
447 spu->stop_callback = NULL; in spu_unbind_context()
448 spu->mfc_callback = NULL; in spu_unbind_context()
449 spu->pid = 0; in spu_unbind_context()
450 spu->tgid = 0; in spu_unbind_context()
452 spu->flags = 0; in spu_unbind_context()
453 spu->ctx = NULL; in spu_unbind_context()
454 spin_unlock_irq(&spu->register_lock); in spu_unbind_context()
456 spu_associate_mm(spu, NULL); in spu_unbind_context()
459 (spu->stats.slb_flt - ctx->stats.slb_flt_base); in spu_unbind_context()
461 (spu->stats.class2_intr - ctx->stats.class2_intr_base); in spu_unbind_context()
463 /* This maps the underlying spu state to idle */ in spu_unbind_context()
465 ctx->spu = NULL; in spu_unbind_context()
533 * queues the context and waits for an spu event or error. in spu_prio_wait()
553 static struct spu *spu_get_idle(struct spu_context *ctx) in spu_get_idle()
555 struct spu *spu, *aff_ref_spu; in spu_get_idle() local
569 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); in spu_get_idle()
570 if (spu && spu->alloc_state == SPU_FREE) in spu_get_idle()
586 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in spu_get_idle()
587 if (spu->alloc_state == SPU_FREE) in spu_get_idle()
598 spu->alloc_state = SPU_USED; in spu_get_idle()
600 spu_context_trace(spu_get_idle__found, ctx, spu); in spu_get_idle()
601 spu_init_channels(spu); in spu_get_idle()
602 return spu; in spu_get_idle()
609 * Returns the freed physical spu to run the new context on.
611 static struct spu *find_victim(struct spu_context *ctx) in find_victim()
614 struct spu *spu; in find_victim() local
622 * exactly fair, but so far the whole spu scheduler tries to keep in find_victim()
634 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in find_victim()
635 struct spu_context *tmp = spu->ctx; in find_victim()
640 victim = spu->ctx; in find_victim()
664 spu = victim->spu; in find_victim()
665 if (!spu || victim->prio <= ctx->prio) { in find_victim()
677 spu_context_trace(__spu_deactivate__unload, ctx, spu); in find_victim()
681 spu_unbind_context(spu, victim); in find_victim()
685 spu->stats.invol_ctx_switch++; in find_victim()
692 return spu; in find_victim()
699 static void __spu_schedule(struct spu *spu, struct spu_context *ctx) in __spu_schedule() argument
701 int node = spu->node; in __spu_schedule()
707 if (spu->ctx == NULL) { in __spu_schedule()
708 spu_bind_context(spu, ctx); in __spu_schedule()
710 spu->alloc_state = SPU_USED; in __spu_schedule()
721 static void spu_schedule(struct spu *spu, struct spu_context *ctx) in spu_schedule() argument
727 __spu_schedule(spu, ctx); in spu_schedule()
732 * spu_unschedule - remove a context from a spu, and possibly release it.
733 * @spu: The SPU to unschedule from
734 * @ctx: The context currently scheduled on the SPU
735 * @free_spu Whether to free the SPU for other contexts
737 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
738 * SPU is made available for other contexts (ie, may be returned by
740 * context to this spu.
744 static void spu_unschedule(struct spu *spu, struct spu_context *ctx, in spu_unschedule() argument
747 int node = spu->node; in spu_unschedule()
752 spu->alloc_state = SPU_FREE; in spu_unschedule()
753 spu_unbind_context(spu, ctx); in spu_unschedule()
755 spu->stats.invol_ctx_switch++; in spu_unschedule()
760 * spu_activate - find a free spu for a context and execute it
761 * @ctx: spu context to schedule
764 * Tries to find a free spu to run @ctx. If no free spu is available
765 * add the context to the runqueue so it gets woken up once an spu
770 struct spu *spu; in spu_activate() local
778 if (ctx->spu) in spu_activate()
785 spu = spu_get_idle(ctx); in spu_activate()
790 if (!spu && rt_prio(ctx->prio)) in spu_activate()
791 spu = find_victim(ctx); in spu_activate()
792 if (spu) { in spu_activate()
796 __spu_schedule(spu, ctx); in spu_activate()
846 struct spu *spu = ctx->spu; in __spu_deactivate() local
849 if (spu) { in __spu_deactivate()
850 new = grab_runnable_context(max_prio, spu->node); in __spu_deactivate()
852 spu_unschedule(spu, ctx, new == NULL); in __spu_deactivate()
858 spu_schedule(spu, new); in __spu_deactivate()
871 * spu_deactivate - unbind a context from its physical spu
872 * @ctx: spu context to unbind
874 * Unbind @ctx from the physical spu it is running on and schedule
875 * the highest priority context to run on the freed physical spu.
884 * spu_yield - yield a physical spu if others are waiting
885 * @ctx: spu context to yield
888 * unbind @ctx from the physical spu and schedule the highest
889 * priority context to run on the freed physical spu instead.
904 struct spu *spu = NULL; in spusched_tick() local
919 spu = ctx->spu; in spusched_tick()
921 spu_context_trace(spusched_tick__preempt, ctx, spu); in spusched_tick()
923 new = grab_runnable_context(ctx->prio + 1, spu->node); in spusched_tick()
925 spu_unschedule(spu, ctx, 0); in spusched_tick()
937 spu_schedule(spu, new); in spusched_tick()
990 struct spu *spu; in spusched_thread() local
1000 list_for_each_entry(spu, &cbe_spu_info[node].spus, in spusched_thread()
1002 struct spu_context *ctx = spu->ctx; in spusched_thread()
1024 struct spu *spu; in spuctx_switch_state() local
1034 spu = ctx->spu; in spuctx_switch_state()
1040 * Update the physical SPU utilization statistics. in spuctx_switch_state()
1042 if (spu) { in spuctx_switch_state()
1044 spu->stats.times[old_state] += delta; in spuctx_switch_state()
1045 spu->stats.util_state = new_state; in spuctx_switch_state()
1046 spu->stats.tstamp = curtime; in spuctx_switch_state()
1047 node = spu->node; in spuctx_switch_state()
1066 * SPU loadavg (it even seems very odd on the CPU side...), in show_spu_loadavg()
1124 struct spu *spu; in spu_sched_exit() local
1135 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) in spu_sched_exit()
1136 if (spu->alloc_state != SPU_FREE) in spu_sched_exit()
1137 spu->alloc_state = SPU_FREE; in spu_sched_exit()