Lines Matching refs:work
57 static bool irq_work_claim(struct irq_work *work)
61 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
63 * If the work is already pending, no need to raise the IPI.
79 static __always_inline void irq_work_raise(struct irq_work *work)
82 trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func);
87 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
88 static void __irq_work_queue_local(struct irq_work *work)
95 work_flags = atomic_read(&work->node.a_flags);
107 if (!llist_add(&work->node.llist, list))
110 /* If the work is "lazy", handle it from next tick if any */
112 irq_work_raise(work);
115 /* Enqueue the irq work @work on the current CPU */
116 bool irq_work_queue(struct irq_work *work)
119 if (!irq_work_claim(work))
124 __irq_work_queue_local(work);
132 * Enqueue the irq_work @work on @cpu unless it's already pending
137 bool irq_work_queue_on(struct irq_work *work, int cpu)
140 return irq_work_queue(work);
143 /* All work should have been flushed before going offline */
147 if (!irq_work_claim(work))
150 kasan_record_aux_stack(work);
159 * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work
163 !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) {
165 if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu)))
168 work = &per_cpu(irq_work_wakeup, cpu);
169 if (!irq_work_claim(work))
173 __smp_call_single_queue(cpu, &work->node.llist);
175 __irq_work_queue_local(work);
195 /* All work should have been flushed before going offline */
203 struct irq_work *work = arg;
207 * Clear the PENDING bit, after this point the @work can be re-used.
211 flags = atomic_read(&work->node.a_flags);
213 atomic_set(&work->node.a_flags, flags);
221 work->func(work);
228 (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
230 if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
232 rcuwait_wake_up(&work->irqwait);
237 struct irq_work *work, *tmp;
241 * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed
251 llist_for_each_entry_safe(work, tmp, llnode, node.llist)
252 irq_work_single(work);
286 void irq_work_sync(struct irq_work *work)
291 if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
293 rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work),
298 while (irq_work_is_busy(work))