1ca2ad6bdSHans Petter Selasky /*-
216732c19SHans Petter Selasky * Copyright (c) 2017-2019 Hans Petter Selasky
3ca2ad6bdSHans Petter Selasky * All rights reserved.
4ca2ad6bdSHans Petter Selasky *
5ca2ad6bdSHans Petter Selasky * Redistribution and use in source and binary forms, with or without
6ca2ad6bdSHans Petter Selasky * modification, are permitted provided that the following conditions
7ca2ad6bdSHans Petter Selasky * are met:
8ca2ad6bdSHans Petter Selasky * 1. Redistributions of source code must retain the above copyright
9ca2ad6bdSHans Petter Selasky * notice unmodified, this list of conditions, and the following
10ca2ad6bdSHans Petter Selasky * disclaimer.
11ca2ad6bdSHans Petter Selasky * 2. Redistributions in binary form must reproduce the above copyright
12ca2ad6bdSHans Petter Selasky * notice, this list of conditions and the following disclaimer in the
13ca2ad6bdSHans Petter Selasky * documentation and/or other materials provided with the distribution.
14ca2ad6bdSHans Petter Selasky *
15ca2ad6bdSHans Petter Selasky * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16ca2ad6bdSHans Petter Selasky * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17ca2ad6bdSHans Petter Selasky * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18ca2ad6bdSHans Petter Selasky * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19ca2ad6bdSHans Petter Selasky * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20ca2ad6bdSHans Petter Selasky * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21ca2ad6bdSHans Petter Selasky * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22ca2ad6bdSHans Petter Selasky * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23ca2ad6bdSHans Petter Selasky * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24ca2ad6bdSHans Petter Selasky * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25ca2ad6bdSHans Petter Selasky */
26ca2ad6bdSHans Petter Selasky
27ca2ad6bdSHans Petter Selasky #include <sys/cdefs.h>
28ca2ad6bdSHans Petter Selasky #include <linux/workqueue.h>
29ca2ad6bdSHans Petter Selasky #include <linux/wait.h>
30ca2ad6bdSHans Petter Selasky #include <linux/compat.h>
31ca2ad6bdSHans Petter Selasky #include <linux/spinlock.h>
322491b25cSEmmanuel Vadot #include <linux/rcupdate.h>
33ec25b6faSVladimir Kondratyev #include <linux/irq_work.h>
34ca2ad6bdSHans Petter Selasky
35ca2ad6bdSHans Petter Selasky #include <sys/kernel.h>
36ca2ad6bdSHans Petter Selasky
37ca2ad6bdSHans Petter Selasky /*
38ca2ad6bdSHans Petter Selasky * Define all work struct states
39ca2ad6bdSHans Petter Selasky */
40ca2ad6bdSHans Petter Selasky enum {
41ca2ad6bdSHans Petter Selasky WORK_ST_IDLE, /* idle - not started */
42ca2ad6bdSHans Petter Selasky WORK_ST_TIMER, /* timer is being started */
43ca2ad6bdSHans Petter Selasky WORK_ST_TASK, /* taskqueue is being queued */
44ca2ad6bdSHans Petter Selasky WORK_ST_EXEC, /* callback is being called */
45ca2ad6bdSHans Petter Selasky WORK_ST_CANCEL, /* cancel is being requested */
46ca2ad6bdSHans Petter Selasky WORK_ST_MAX,
47ca2ad6bdSHans Petter Selasky };
48ca2ad6bdSHans Petter Selasky
49ca2ad6bdSHans Petter Selasky /*
50ca2ad6bdSHans Petter Selasky * Define global workqueues
51ca2ad6bdSHans Petter Selasky */
52ca2ad6bdSHans Petter Selasky static struct workqueue_struct *linux_system_short_wq;
53ca2ad6bdSHans Petter Selasky static struct workqueue_struct *linux_system_long_wq;
54ca2ad6bdSHans Petter Selasky
55ca2ad6bdSHans Petter Selasky struct workqueue_struct *system_wq;
56ca2ad6bdSHans Petter Selasky struct workqueue_struct *system_long_wq;
57ca2ad6bdSHans Petter Selasky struct workqueue_struct *system_unbound_wq;
587a13eebaSHans Petter Selasky struct workqueue_struct *system_highpri_wq;
59ca2ad6bdSHans Petter Selasky struct workqueue_struct *system_power_efficient_wq;
60ca2ad6bdSHans Petter Selasky
61ec25b6faSVladimir Kondratyev struct taskqueue *linux_irq_work_tq;
62ec25b6faSVladimir Kondratyev
637a742c41SHans Petter Selasky static int linux_default_wq_cpus = 4;
647a742c41SHans Petter Selasky
65ca2ad6bdSHans Petter Selasky static void linux_delayed_work_timer_fn(void *);
66ca2ad6bdSHans Petter Selasky
67ca2ad6bdSHans Petter Selasky /*
68ca2ad6bdSHans Petter Selasky * This function atomically updates the work state and returns the
69ca2ad6bdSHans Petter Selasky * previous state at the time of update.
70ca2ad6bdSHans Petter Selasky */
7143ee32f7SHans Petter Selasky static uint8_t
linux_update_state(atomic_t * v,const uint8_t * pstate)72ca2ad6bdSHans Petter Selasky linux_update_state(atomic_t *v, const uint8_t *pstate)
73ca2ad6bdSHans Petter Selasky {
74ca2ad6bdSHans Petter Selasky int c, old;
75ca2ad6bdSHans Petter Selasky
76ca2ad6bdSHans Petter Selasky c = v->counter;
77ca2ad6bdSHans Petter Selasky
78ca2ad6bdSHans Petter Selasky while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
79ca2ad6bdSHans Petter Selasky c = old;
80ca2ad6bdSHans Petter Selasky
81ca2ad6bdSHans Petter Selasky return (c);
82ca2ad6bdSHans Petter Selasky }
83ca2ad6bdSHans Petter Selasky
84ca2ad6bdSHans Petter Selasky /*
85ca2ad6bdSHans Petter Selasky * A LinuxKPI task is allowed to free itself inside the callback function
86ca2ad6bdSHans Petter Selasky * and cannot safely be referred after the callback function has
87ca2ad6bdSHans Petter Selasky * completed. This function gives the linux_work_fn() function a hint,
88ca2ad6bdSHans Petter Selasky * that the task is not going away and can have its state checked
89ca2ad6bdSHans Petter Selasky * again. Without this extra hint LinuxKPI tasks cannot be serialized
908cf0d094SGordon Bergling * across multiple worker threads.
91ca2ad6bdSHans Petter Selasky */
9243ee32f7SHans Petter Selasky static bool
linux_work_exec_unblock(struct work_struct * work)93ca2ad6bdSHans Petter Selasky linux_work_exec_unblock(struct work_struct *work)
94ca2ad6bdSHans Petter Selasky {
95ca2ad6bdSHans Petter Selasky struct workqueue_struct *wq;
96ca2ad6bdSHans Petter Selasky struct work_exec *exec;
974c8ba7d9SHans Petter Selasky bool retval = false;
98ca2ad6bdSHans Petter Selasky
99ca2ad6bdSHans Petter Selasky wq = work->work_queue;
100ca2ad6bdSHans Petter Selasky if (unlikely(wq == NULL))
101ca2ad6bdSHans Petter Selasky goto done;
102ca2ad6bdSHans Petter Selasky
103ca2ad6bdSHans Petter Selasky WQ_EXEC_LOCK(wq);
104ca2ad6bdSHans Petter Selasky TAILQ_FOREACH(exec, &wq->exec_head, entry) {
105ca2ad6bdSHans Petter Selasky if (exec->target == work) {
106ca2ad6bdSHans Petter Selasky exec->target = NULL;
1074c8ba7d9SHans Petter Selasky retval = true;
108ca2ad6bdSHans Petter Selasky break;
109ca2ad6bdSHans Petter Selasky }
110ca2ad6bdSHans Petter Selasky }
111ca2ad6bdSHans Petter Selasky WQ_EXEC_UNLOCK(wq);
112ca2ad6bdSHans Petter Selasky done:
113ca2ad6bdSHans Petter Selasky return (retval);
114ca2ad6bdSHans Petter Selasky }
115ca2ad6bdSHans Petter Selasky
116ca2ad6bdSHans Petter Selasky static void
linux_delayed_work_enqueue(struct delayed_work * dwork)117ca2ad6bdSHans Petter Selasky linux_delayed_work_enqueue(struct delayed_work *dwork)
118ca2ad6bdSHans Petter Selasky {
119ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
120ca2ad6bdSHans Petter Selasky
121ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
122ca2ad6bdSHans Petter Selasky taskqueue_enqueue(tq, &dwork->work.work_task);
123ca2ad6bdSHans Petter Selasky }
124ca2ad6bdSHans Petter Selasky
125ca2ad6bdSHans Petter Selasky /*
126ca2ad6bdSHans Petter Selasky * This function queues the given work structure on the given
127ca2ad6bdSHans Petter Selasky * workqueue. It returns non-zero if the work was successfully
128ca2ad6bdSHans Petter Selasky * [re-]queued. Else the work is already pending for completion.
129ca2ad6bdSHans Petter Selasky */
130ca2ad6bdSHans Petter Selasky bool
linux_queue_work_on(int cpu __unused,struct workqueue_struct * wq,struct work_struct * work)131ca2ad6bdSHans Petter Selasky linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq,
132ca2ad6bdSHans Petter Selasky struct work_struct *work)
133ca2ad6bdSHans Petter Selasky {
134ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
135ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_TASK, /* start queuing task */
136ca2ad6bdSHans Petter Selasky [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
137ca2ad6bdSHans Petter Selasky [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
138ca2ad6bdSHans Petter Selasky [WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */
139ca2ad6bdSHans Petter Selasky [WORK_ST_CANCEL] = WORK_ST_TASK, /* start queuing task again */
140ca2ad6bdSHans Petter Selasky };
141ca2ad6bdSHans Petter Selasky
142ca2ad6bdSHans Petter Selasky if (atomic_read(&wq->draining) != 0)
143ca2ad6bdSHans Petter Selasky return (!work_pending(work));
144ca2ad6bdSHans Petter Selasky
145ca2ad6bdSHans Petter Selasky switch (linux_update_state(&work->state, states)) {
146ca2ad6bdSHans Petter Selasky case WORK_ST_EXEC:
147ca2ad6bdSHans Petter Selasky case WORK_ST_CANCEL:
148ca2ad6bdSHans Petter Selasky if (linux_work_exec_unblock(work) != 0)
1494c8ba7d9SHans Petter Selasky return (true);
150ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
151ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
152ca2ad6bdSHans Petter Selasky work->work_queue = wq;
153ca2ad6bdSHans Petter Selasky taskqueue_enqueue(wq->taskqueue, &work->work_task);
1544c8ba7d9SHans Petter Selasky return (true);
155ca2ad6bdSHans Petter Selasky default:
1564c8ba7d9SHans Petter Selasky return (false); /* already on a queue */
157ca2ad6bdSHans Petter Selasky }
158ca2ad6bdSHans Petter Selasky }
159ca2ad6bdSHans Petter Selasky
160ca2ad6bdSHans Petter Selasky /*
1612491b25cSEmmanuel Vadot * Callback func for linux_queue_rcu_work
1622491b25cSEmmanuel Vadot */
1632491b25cSEmmanuel Vadot static void
rcu_work_func(struct rcu_head * rcu)1642491b25cSEmmanuel Vadot rcu_work_func(struct rcu_head *rcu)
1652491b25cSEmmanuel Vadot {
1662491b25cSEmmanuel Vadot struct rcu_work *rwork;
1672491b25cSEmmanuel Vadot
1682491b25cSEmmanuel Vadot rwork = container_of(rcu, struct rcu_work, rcu);
1692491b25cSEmmanuel Vadot linux_queue_work_on(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1702491b25cSEmmanuel Vadot }
1712491b25cSEmmanuel Vadot
1722491b25cSEmmanuel Vadot /*
1732491b25cSEmmanuel Vadot * This function queue a work after a grace period
1742491b25cSEmmanuel Vadot * If the work was already pending it returns false,
1752491b25cSEmmanuel Vadot * if not it calls call_rcu and returns true.
1762491b25cSEmmanuel Vadot */
1772491b25cSEmmanuel Vadot bool
linux_queue_rcu_work(struct workqueue_struct * wq,struct rcu_work * rwork)1782491b25cSEmmanuel Vadot linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1792491b25cSEmmanuel Vadot {
1802491b25cSEmmanuel Vadot
1812491b25cSEmmanuel Vadot if (!linux_work_pending(&rwork->work)) {
1822491b25cSEmmanuel Vadot rwork->wq = wq;
1832491b25cSEmmanuel Vadot linux_call_rcu(RCU_TYPE_REGULAR, &rwork->rcu, rcu_work_func);
1842491b25cSEmmanuel Vadot return (true);
1852491b25cSEmmanuel Vadot }
1862491b25cSEmmanuel Vadot return (false);
1872491b25cSEmmanuel Vadot }
1882491b25cSEmmanuel Vadot
1892491b25cSEmmanuel Vadot /*
1902491b25cSEmmanuel Vadot * This function waits for the last execution of a work and then
1912491b25cSEmmanuel Vadot * flush the work.
1922491b25cSEmmanuel Vadot * It returns true if the work was pending and we waited, it returns
1932491b25cSEmmanuel Vadot * false otherwise.
1942491b25cSEmmanuel Vadot */
1952491b25cSEmmanuel Vadot bool
linux_flush_rcu_work(struct rcu_work * rwork)1962491b25cSEmmanuel Vadot linux_flush_rcu_work(struct rcu_work *rwork)
1972491b25cSEmmanuel Vadot {
1982491b25cSEmmanuel Vadot
1992491b25cSEmmanuel Vadot if (linux_work_pending(&rwork->work)) {
2002491b25cSEmmanuel Vadot linux_rcu_barrier(RCU_TYPE_REGULAR);
2012491b25cSEmmanuel Vadot linux_flush_work(&rwork->work);
2022491b25cSEmmanuel Vadot return (true);
2032491b25cSEmmanuel Vadot }
2042491b25cSEmmanuel Vadot return (linux_flush_work(&rwork->work));
2052491b25cSEmmanuel Vadot }
2062491b25cSEmmanuel Vadot
2072491b25cSEmmanuel Vadot /*
208ca2ad6bdSHans Petter Selasky * This function queues the given work structure on the given
20996cb1d70SKonstantin Belousov * workqueue after a given delay in ticks. It returns true if the
210ca2ad6bdSHans Petter Selasky * work was successfully [re-]queued. Else the work is already pending
211ca2ad6bdSHans Petter Selasky * for completion.
212ca2ad6bdSHans Petter Selasky */
213ca2ad6bdSHans Petter Selasky bool
linux_queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)214ca2ad6bdSHans Petter Selasky linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
215325aa4dbSMark Johnston struct delayed_work *dwork, unsigned long delay)
216ca2ad6bdSHans Petter Selasky {
217ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
218ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_TIMER, /* start timeout */
219ca2ad6bdSHans Petter Selasky [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
220ca2ad6bdSHans Petter Selasky [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
221ca2ad6bdSHans Petter Selasky [WORK_ST_EXEC] = WORK_ST_TIMER, /* start timeout */
222ca2ad6bdSHans Petter Selasky [WORK_ST_CANCEL] = WORK_ST_TIMER, /* start timeout */
223ca2ad6bdSHans Petter Selasky };
22405fe8245SKonstantin Belousov bool res;
225ca2ad6bdSHans Petter Selasky
226ca2ad6bdSHans Petter Selasky if (atomic_read(&wq->draining) != 0)
227ca2ad6bdSHans Petter Selasky return (!work_pending(&dwork->work));
228ca2ad6bdSHans Petter Selasky
229*eed3be47SMark Johnston /*
230*eed3be47SMark Johnston * Clamp the delay to a valid ticks value, some consumers pass
231*eed3be47SMark Johnston * MAX_SCHEDULE_TIMEOUT.
232*eed3be47SMark Johnston */
233*eed3be47SMark Johnston if (delay > INT_MAX)
234*eed3be47SMark Johnston delay = INT_MAX;
235*eed3be47SMark Johnston
23605fe8245SKonstantin Belousov mtx_lock(&dwork->timer.mtx);
237ca2ad6bdSHans Petter Selasky switch (linux_update_state(&dwork->work.state, states)) {
238ca2ad6bdSHans Petter Selasky case WORK_ST_EXEC:
239ca2ad6bdSHans Petter Selasky case WORK_ST_CANCEL:
24096cb1d70SKonstantin Belousov if (delay == 0 && linux_work_exec_unblock(&dwork->work)) {
241ca2ad6bdSHans Petter Selasky dwork->timer.expires = jiffies;
24205fe8245SKonstantin Belousov res = true;
24305fe8245SKonstantin Belousov goto out;
244ca2ad6bdSHans Petter Selasky }
245ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
246ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
247ca2ad6bdSHans Petter Selasky dwork->work.work_queue = wq;
248ca2ad6bdSHans Petter Selasky dwork->timer.expires = jiffies + delay;
249ca2ad6bdSHans Petter Selasky
250ca2ad6bdSHans Petter Selasky if (delay == 0) {
251ca2ad6bdSHans Petter Selasky linux_delayed_work_enqueue(dwork);
252ca2ad6bdSHans Petter Selasky } else if (unlikely(cpu != WORK_CPU_UNBOUND)) {
253ca2ad6bdSHans Petter Selasky callout_reset_on(&dwork->timer.callout, delay,
254ca2ad6bdSHans Petter Selasky &linux_delayed_work_timer_fn, dwork, cpu);
255ca2ad6bdSHans Petter Selasky } else {
256ca2ad6bdSHans Petter Selasky callout_reset(&dwork->timer.callout, delay,
257ca2ad6bdSHans Petter Selasky &linux_delayed_work_timer_fn, dwork);
258ca2ad6bdSHans Petter Selasky }
25905fe8245SKonstantin Belousov res = true;
26005fe8245SKonstantin Belousov break;
261ca2ad6bdSHans Petter Selasky default:
26205fe8245SKonstantin Belousov res = false;
26305fe8245SKonstantin Belousov break;
264ca2ad6bdSHans Petter Selasky }
26505fe8245SKonstantin Belousov out:
26605fe8245SKonstantin Belousov mtx_unlock(&dwork->timer.mtx);
26705fe8245SKonstantin Belousov return (res);
268ca2ad6bdSHans Petter Selasky }
269ca2ad6bdSHans Petter Selasky
270ca2ad6bdSHans Petter Selasky void
linux_work_fn(void * context,int pending)271ca2ad6bdSHans Petter Selasky linux_work_fn(void *context, int pending)
272ca2ad6bdSHans Petter Selasky {
273ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
274ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
275ca2ad6bdSHans Petter Selasky [WORK_ST_TIMER] = WORK_ST_EXEC, /* delayed work w/o timeout */
276ca2ad6bdSHans Petter Selasky [WORK_ST_TASK] = WORK_ST_EXEC, /* call callback */
277ca2ad6bdSHans Petter Selasky [WORK_ST_EXEC] = WORK_ST_IDLE, /* complete callback */
2783a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_EXEC, /* failed to cancel */
279ca2ad6bdSHans Petter Selasky };
280ca2ad6bdSHans Petter Selasky struct work_struct *work;
281ca2ad6bdSHans Petter Selasky struct workqueue_struct *wq;
282ca2ad6bdSHans Petter Selasky struct work_exec exec;
283549dcdb3SHans Petter Selasky struct task_struct *task;
284ca2ad6bdSHans Petter Selasky
285549dcdb3SHans Petter Selasky task = current;
286ca2ad6bdSHans Petter Selasky
287ca2ad6bdSHans Petter Selasky /* setup local variables */
288ca2ad6bdSHans Petter Selasky work = context;
289ca2ad6bdSHans Petter Selasky wq = work->work_queue;
290ca2ad6bdSHans Petter Selasky
291ca2ad6bdSHans Petter Selasky /* store target pointer */
292ca2ad6bdSHans Petter Selasky exec.target = work;
293ca2ad6bdSHans Petter Selasky
294ca2ad6bdSHans Petter Selasky /* insert executor into list */
295ca2ad6bdSHans Petter Selasky WQ_EXEC_LOCK(wq);
296ca2ad6bdSHans Petter Selasky TAILQ_INSERT_TAIL(&wq->exec_head, &exec, entry);
297ca2ad6bdSHans Petter Selasky while (1) {
298ca2ad6bdSHans Petter Selasky switch (linux_update_state(&work->state, states)) {
299ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
300ca2ad6bdSHans Petter Selasky case WORK_ST_TASK:
3013a150601SAlexander Motin case WORK_ST_CANCEL:
302ca2ad6bdSHans Petter Selasky WQ_EXEC_UNLOCK(wq);
303ca2ad6bdSHans Petter Selasky
304549dcdb3SHans Petter Selasky /* set current work structure */
305549dcdb3SHans Petter Selasky task->work = work;
306549dcdb3SHans Petter Selasky
307ca2ad6bdSHans Petter Selasky /* call work function */
308ca2ad6bdSHans Petter Selasky work->func(work);
309ca2ad6bdSHans Petter Selasky
310549dcdb3SHans Petter Selasky /* set current work structure */
311549dcdb3SHans Petter Selasky task->work = NULL;
312549dcdb3SHans Petter Selasky
313ca2ad6bdSHans Petter Selasky WQ_EXEC_LOCK(wq);
314ca2ad6bdSHans Petter Selasky /* check if unblocked */
315ca2ad6bdSHans Petter Selasky if (exec.target != work) {
316ca2ad6bdSHans Petter Selasky /* reapply block */
317ca2ad6bdSHans Petter Selasky exec.target = work;
318ca2ad6bdSHans Petter Selasky break;
319ca2ad6bdSHans Petter Selasky }
320ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
321ca2ad6bdSHans Petter Selasky default:
322ca2ad6bdSHans Petter Selasky goto done;
323ca2ad6bdSHans Petter Selasky }
324ca2ad6bdSHans Petter Selasky }
325ca2ad6bdSHans Petter Selasky done:
326ca2ad6bdSHans Petter Selasky /* remove executor from list */
327ca2ad6bdSHans Petter Selasky TAILQ_REMOVE(&wq->exec_head, &exec, entry);
328ca2ad6bdSHans Petter Selasky WQ_EXEC_UNLOCK(wq);
329ca2ad6bdSHans Petter Selasky }
330ca2ad6bdSHans Petter Selasky
33187a567f1SHans Petter Selasky void
linux_delayed_work_fn(void * context,int pending)33287a567f1SHans Petter Selasky linux_delayed_work_fn(void *context, int pending)
33387a567f1SHans Petter Selasky {
33487a567f1SHans Petter Selasky struct delayed_work *dwork = context;
33587a567f1SHans Petter Selasky
33687a567f1SHans Petter Selasky /*
33787a567f1SHans Petter Selasky * Make sure the timer belonging to the delayed work gets
33887a567f1SHans Petter Selasky * drained before invoking the work function. Else the timer
33987a567f1SHans Petter Selasky * mutex may still be in use which can lead to use-after-free
34087a567f1SHans Petter Selasky * situations, because the work function might free the work
34187a567f1SHans Petter Selasky * structure before returning.
34287a567f1SHans Petter Selasky */
34387a567f1SHans Petter Selasky callout_drain(&dwork->timer.callout);
34487a567f1SHans Petter Selasky
34587a567f1SHans Petter Selasky linux_work_fn(&dwork->work, pending);
34687a567f1SHans Petter Selasky }
34787a567f1SHans Petter Selasky
348ca2ad6bdSHans Petter Selasky static void
linux_delayed_work_timer_fn(void * arg)349ca2ad6bdSHans Petter Selasky linux_delayed_work_timer_fn(void *arg)
350ca2ad6bdSHans Petter Selasky {
351ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
352ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
353ca2ad6bdSHans Petter Selasky [WORK_ST_TIMER] = WORK_ST_TASK, /* start queueing task */
354ca2ad6bdSHans Petter Selasky [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
3553a150601SAlexander Motin [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
3563a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_TASK, /* failed to cancel */
357ca2ad6bdSHans Petter Selasky };
358ca2ad6bdSHans Petter Selasky struct delayed_work *dwork = arg;
359ca2ad6bdSHans Petter Selasky
360ca2ad6bdSHans Petter Selasky switch (linux_update_state(&dwork->work.state, states)) {
361ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
3623a150601SAlexander Motin case WORK_ST_CANCEL:
363ca2ad6bdSHans Petter Selasky linux_delayed_work_enqueue(dwork);
364ca2ad6bdSHans Petter Selasky break;
365ca2ad6bdSHans Petter Selasky default:
366ca2ad6bdSHans Petter Selasky break;
367ca2ad6bdSHans Petter Selasky }
368ca2ad6bdSHans Petter Selasky }
369ca2ad6bdSHans Petter Selasky
370ca2ad6bdSHans Petter Selasky /*
3711b2f43a7SVladimir Kondratyev * This function cancels the given work structure in a
3721b2f43a7SVladimir Kondratyev * non-blocking fashion. It returns non-zero if the work was
3731b2f43a7SVladimir Kondratyev * successfully cancelled. Else the work may still be busy or already
3741b2f43a7SVladimir Kondratyev * cancelled.
3751b2f43a7SVladimir Kondratyev */
3761b2f43a7SVladimir Kondratyev bool
linux_cancel_work(struct work_struct * work)3771b2f43a7SVladimir Kondratyev linux_cancel_work(struct work_struct *work)
3781b2f43a7SVladimir Kondratyev {
3791b2f43a7SVladimir Kondratyev static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
3801b2f43a7SVladimir Kondratyev [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
3811b2f43a7SVladimir Kondratyev [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */
3821b2f43a7SVladimir Kondratyev [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel */
3831b2f43a7SVladimir Kondratyev [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
3841b2f43a7SVladimir Kondratyev [WORK_ST_CANCEL] = WORK_ST_IDLE, /* can't happen */
3851b2f43a7SVladimir Kondratyev };
3861b2f43a7SVladimir Kondratyev struct taskqueue *tq;
3871b2f43a7SVladimir Kondratyev
3881b2f43a7SVladimir Kondratyev MPASS(atomic_read(&work->state) != WORK_ST_TIMER);
3891b2f43a7SVladimir Kondratyev MPASS(atomic_read(&work->state) != WORK_ST_CANCEL);
3901b2f43a7SVladimir Kondratyev
3911b2f43a7SVladimir Kondratyev switch (linux_update_state(&work->state, states)) {
3921b2f43a7SVladimir Kondratyev case WORK_ST_TASK:
3931b2f43a7SVladimir Kondratyev tq = work->work_queue->taskqueue;
3941b2f43a7SVladimir Kondratyev if (taskqueue_cancel(tq, &work->work_task, NULL) == 0)
3951b2f43a7SVladimir Kondratyev return (true);
3961b2f43a7SVladimir Kondratyev /* FALLTHROUGH */
3971b2f43a7SVladimir Kondratyev default:
3981b2f43a7SVladimir Kondratyev return (false);
3991b2f43a7SVladimir Kondratyev }
4001b2f43a7SVladimir Kondratyev }
4011b2f43a7SVladimir Kondratyev
4021b2f43a7SVladimir Kondratyev /*
403ca2ad6bdSHans Petter Selasky * This function cancels the given work structure in a synchronous
404ca2ad6bdSHans Petter Selasky * fashion. It returns non-zero if the work was successfully
405ca2ad6bdSHans Petter Selasky * cancelled. Else the work was already cancelled.
406ca2ad6bdSHans Petter Selasky */
407ca2ad6bdSHans Petter Selasky bool
linux_cancel_work_sync(struct work_struct * work)408ca2ad6bdSHans Petter Selasky linux_cancel_work_sync(struct work_struct *work)
409ca2ad6bdSHans Petter Selasky {
410ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
411ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
4123a150601SAlexander Motin [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */
4133a150601SAlexander Motin [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */
4143a150601SAlexander Motin [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */
4153a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */
416ca2ad6bdSHans Petter Selasky };
417ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
41816732c19SHans Petter Selasky bool retval = false;
419ca2ad6bdSHans Petter Selasky
420ca2ad6bdSHans Petter Selasky WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
421ca2ad6bdSHans Petter Selasky "linux_cancel_work_sync() might sleep");
42216732c19SHans Petter Selasky retry:
423ca2ad6bdSHans Petter Selasky switch (linux_update_state(&work->state, states)) {
424ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
4253a150601SAlexander Motin case WORK_ST_TIMER:
42616732c19SHans Petter Selasky return (retval);
4273a150601SAlexander Motin case WORK_ST_EXEC:
4283a150601SAlexander Motin tq = work->work_queue->taskqueue;
4293a150601SAlexander Motin if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
4303a150601SAlexander Motin taskqueue_drain(tq, &work->work_task);
43116732c19SHans Petter Selasky goto retry; /* work may have restarted itself */
432ca2ad6bdSHans Petter Selasky default:
433ca2ad6bdSHans Petter Selasky tq = work->work_queue->taskqueue;
434ca2ad6bdSHans Petter Selasky if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
435ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &work->work_task);
43616732c19SHans Petter Selasky retval = true;
43716732c19SHans Petter Selasky goto retry;
438ca2ad6bdSHans Petter Selasky }
439ca2ad6bdSHans Petter Selasky }
440ca2ad6bdSHans Petter Selasky
441ca2ad6bdSHans Petter Selasky /*
442ca2ad6bdSHans Petter Selasky * This function atomically stops the timer and callback. The timer
443ca2ad6bdSHans Petter Selasky * callback will not be called after this function returns. This
444ca2ad6bdSHans Petter Selasky * functions returns true when the timeout was cancelled. Else the
445ca2ad6bdSHans Petter Selasky * timeout was not started or has already been called.
446ca2ad6bdSHans Petter Selasky */
447ca2ad6bdSHans Petter Selasky static inline bool
linux_cancel_timer(struct delayed_work * dwork,bool drain)448ca2ad6bdSHans Petter Selasky linux_cancel_timer(struct delayed_work *dwork, bool drain)
449ca2ad6bdSHans Petter Selasky {
450ca2ad6bdSHans Petter Selasky bool cancelled;
451ca2ad6bdSHans Petter Selasky
452ca2ad6bdSHans Petter Selasky mtx_lock(&dwork->timer.mtx);
453ca2ad6bdSHans Petter Selasky cancelled = (callout_stop(&dwork->timer.callout) == 1);
454ca2ad6bdSHans Petter Selasky mtx_unlock(&dwork->timer.mtx);
455ca2ad6bdSHans Petter Selasky
456ca2ad6bdSHans Petter Selasky /* check if we should drain */
457ca2ad6bdSHans Petter Selasky if (drain)
458ca2ad6bdSHans Petter Selasky callout_drain(&dwork->timer.callout);
459ca2ad6bdSHans Petter Selasky return (cancelled);
460ca2ad6bdSHans Petter Selasky }
461ca2ad6bdSHans Petter Selasky
462ca2ad6bdSHans Petter Selasky /*
463ca2ad6bdSHans Petter Selasky * This function cancels the given delayed work structure in a
464ca2ad6bdSHans Petter Selasky * non-blocking fashion. It returns non-zero if the work was
465ca2ad6bdSHans Petter Selasky * successfully cancelled. Else the work may still be busy or already
466ca2ad6bdSHans Petter Selasky * cancelled.
467ca2ad6bdSHans Petter Selasky */
468ca2ad6bdSHans Petter Selasky bool
linux_cancel_delayed_work(struct delayed_work * dwork)469ca2ad6bdSHans Petter Selasky linux_cancel_delayed_work(struct delayed_work *dwork)
470ca2ad6bdSHans Petter Selasky {
471ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
472ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
4733a150601SAlexander Motin [WORK_ST_TIMER] = WORK_ST_CANCEL, /* try to cancel */
4743a150601SAlexander Motin [WORK_ST_TASK] = WORK_ST_CANCEL, /* try to cancel */
4753a150601SAlexander Motin [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
4763a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */
477ca2ad6bdSHans Petter Selasky };
478ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
479b58cf1cbSRyan Stone bool cancelled;
480ca2ad6bdSHans Petter Selasky
481b58cf1cbSRyan Stone mtx_lock(&dwork->timer.mtx);
482ca2ad6bdSHans Petter Selasky switch (linux_update_state(&dwork->work.state, states)) {
483ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
4843a150601SAlexander Motin case WORK_ST_CANCEL:
485b58cf1cbSRyan Stone cancelled = (callout_stop(&dwork->timer.callout) == 1);
486b58cf1cbSRyan Stone if (cancelled) {
4873a150601SAlexander Motin atomic_cmpxchg(&dwork->work.state,
4883a150601SAlexander Motin WORK_ST_CANCEL, WORK_ST_IDLE);
489b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
4904c8ba7d9SHans Petter Selasky return (true);
4913a150601SAlexander Motin }
492ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
493ca2ad6bdSHans Petter Selasky case WORK_ST_TASK:
494ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
4953a150601SAlexander Motin if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
4963a150601SAlexander Motin atomic_cmpxchg(&dwork->work.state,
4973a150601SAlexander Motin WORK_ST_CANCEL, WORK_ST_IDLE);
498b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
4994c8ba7d9SHans Petter Selasky return (true);
5003a150601SAlexander Motin }
501ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
502ca2ad6bdSHans Petter Selasky default:
503b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
5044c8ba7d9SHans Petter Selasky return (false);
505ca2ad6bdSHans Petter Selasky }
506ca2ad6bdSHans Petter Selasky }
507ca2ad6bdSHans Petter Selasky
508ca2ad6bdSHans Petter Selasky /*
509ca2ad6bdSHans Petter Selasky * This function cancels the given work structure in a synchronous
51096cb1d70SKonstantin Belousov * fashion. It returns true if the work was successfully
511ca2ad6bdSHans Petter Selasky * cancelled. Else the work was already cancelled.
512ca2ad6bdSHans Petter Selasky */
51305fe8245SKonstantin Belousov static bool
linux_cancel_delayed_work_sync_int(struct delayed_work * dwork)51405fe8245SKonstantin Belousov linux_cancel_delayed_work_sync_int(struct delayed_work *dwork)
515ca2ad6bdSHans Petter Selasky {
516ca2ad6bdSHans Petter Selasky static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
517ca2ad6bdSHans Petter Selasky [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
5183a150601SAlexander Motin [WORK_ST_TIMER] = WORK_ST_IDLE, /* cancel and drain */
5193a150601SAlexander Motin [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */
5203a150601SAlexander Motin [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */
5213a150601SAlexander Motin [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */
522ca2ad6bdSHans Petter Selasky };
523ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
524b58cf1cbSRyan Stone int ret, state;
525b58cf1cbSRyan Stone bool cancelled;
526ca2ad6bdSHans Petter Selasky
527ca2ad6bdSHans Petter Selasky WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
528ca2ad6bdSHans Petter Selasky "linux_cancel_delayed_work_sync() might sleep");
529b58cf1cbSRyan Stone mtx_lock(&dwork->timer.mtx);
530b58cf1cbSRyan Stone
531b58cf1cbSRyan Stone state = linux_update_state(&dwork->work.state, states);
532b58cf1cbSRyan Stone switch (state) {
533ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
534b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
53505fe8245SKonstantin Belousov return (false);
536ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
5373a150601SAlexander Motin case WORK_ST_CANCEL:
538b58cf1cbSRyan Stone cancelled = (callout_stop(&dwork->timer.callout) == 1);
539b58cf1cbSRyan Stone
540ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
541b58cf1cbSRyan Stone ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
542b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
543b58cf1cbSRyan Stone
544b58cf1cbSRyan Stone callout_drain(&dwork->timer.callout);
545ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &dwork->work.work_task);
546b58cf1cbSRyan Stone return (cancelled || (ret != 0));
547ca2ad6bdSHans Petter Selasky default:
548ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
549b58cf1cbSRyan Stone ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
550b58cf1cbSRyan Stone mtx_unlock(&dwork->timer.mtx);
551b58cf1cbSRyan Stone if (ret != 0)
552ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &dwork->work.work_task);
553b58cf1cbSRyan Stone return (ret != 0);
554ca2ad6bdSHans Petter Selasky }
555ca2ad6bdSHans Petter Selasky }
556ca2ad6bdSHans Petter Selasky
55705fe8245SKonstantin Belousov bool
linux_cancel_delayed_work_sync(struct delayed_work * dwork)55805fe8245SKonstantin Belousov linux_cancel_delayed_work_sync(struct delayed_work *dwork)
55905fe8245SKonstantin Belousov {
56005fe8245SKonstantin Belousov bool res;
56105fe8245SKonstantin Belousov
56205fe8245SKonstantin Belousov res = false;
56305fe8245SKonstantin Belousov while (linux_cancel_delayed_work_sync_int(dwork))
56405fe8245SKonstantin Belousov res = true;
56505fe8245SKonstantin Belousov return (res);
56605fe8245SKonstantin Belousov }
56705fe8245SKonstantin Belousov
568ca2ad6bdSHans Petter Selasky /*
569ca2ad6bdSHans Petter Selasky * This function waits until the given work structure is completed.
570ca2ad6bdSHans Petter Selasky * It returns non-zero if the work was successfully
571ca2ad6bdSHans Petter Selasky * waited for. Else the work was not waited for.
572ca2ad6bdSHans Petter Selasky */
573ca2ad6bdSHans Petter Selasky bool
linux_flush_work(struct work_struct * work)574ca2ad6bdSHans Petter Selasky linux_flush_work(struct work_struct *work)
575ca2ad6bdSHans Petter Selasky {
576ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
5774c8ba7d9SHans Petter Selasky bool retval;
578ca2ad6bdSHans Petter Selasky
579ca2ad6bdSHans Petter Selasky WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
580ca2ad6bdSHans Petter Selasky "linux_flush_work() might sleep");
581ca2ad6bdSHans Petter Selasky
582ca2ad6bdSHans Petter Selasky switch (atomic_read(&work->state)) {
583ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
5844c8ba7d9SHans Petter Selasky return (false);
585ca2ad6bdSHans Petter Selasky default:
586ca2ad6bdSHans Petter Selasky tq = work->work_queue->taskqueue;
587b44247b1SHans Petter Selasky retval = taskqueue_poll_is_busy(tq, &work->work_task);
588ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &work->work_task);
589b44247b1SHans Petter Selasky return (retval);
590ca2ad6bdSHans Petter Selasky }
591ca2ad6bdSHans Petter Selasky }
592ca2ad6bdSHans Petter Selasky
593ca2ad6bdSHans Petter Selasky /*
594ca2ad6bdSHans Petter Selasky * This function waits until the given delayed work structure is
595ca2ad6bdSHans Petter Selasky * completed. It returns non-zero if the work was successfully waited
596ca2ad6bdSHans Petter Selasky * for. Else the work was not waited for.
597ca2ad6bdSHans Petter Selasky */
598ca2ad6bdSHans Petter Selasky bool
linux_flush_delayed_work(struct delayed_work * dwork)599ca2ad6bdSHans Petter Selasky linux_flush_delayed_work(struct delayed_work *dwork)
600ca2ad6bdSHans Petter Selasky {
601ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
6024c8ba7d9SHans Petter Selasky bool retval;
603ca2ad6bdSHans Petter Selasky
604ca2ad6bdSHans Petter Selasky WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
605ca2ad6bdSHans Petter Selasky "linux_flush_delayed_work() might sleep");
606ca2ad6bdSHans Petter Selasky
607ca2ad6bdSHans Petter Selasky switch (atomic_read(&dwork->work.state)) {
608ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
6094c8ba7d9SHans Petter Selasky return (false);
610ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
611ca2ad6bdSHans Petter Selasky if (linux_cancel_timer(dwork, 1))
612ca2ad6bdSHans Petter Selasky linux_delayed_work_enqueue(dwork);
613ca2ad6bdSHans Petter Selasky /* FALLTHROUGH */
614ca2ad6bdSHans Petter Selasky default:
615ca2ad6bdSHans Petter Selasky tq = dwork->work.work_queue->taskqueue;
616b44247b1SHans Petter Selasky retval = taskqueue_poll_is_busy(tq, &dwork->work.work_task);
617ca2ad6bdSHans Petter Selasky taskqueue_drain(tq, &dwork->work.work_task);
618b44247b1SHans Petter Selasky return (retval);
619ca2ad6bdSHans Petter Selasky }
620ca2ad6bdSHans Petter Selasky }
621ca2ad6bdSHans Petter Selasky
622ca2ad6bdSHans Petter Selasky /*
623ca2ad6bdSHans Petter Selasky * This function returns true if the given work is pending, and not
624ca2ad6bdSHans Petter Selasky * yet executing:
625ca2ad6bdSHans Petter Selasky */
626ca2ad6bdSHans Petter Selasky bool
linux_work_pending(struct work_struct * work)627ca2ad6bdSHans Petter Selasky linux_work_pending(struct work_struct *work)
628ca2ad6bdSHans Petter Selasky {
629ca2ad6bdSHans Petter Selasky switch (atomic_read(&work->state)) {
630ca2ad6bdSHans Petter Selasky case WORK_ST_TIMER:
631ca2ad6bdSHans Petter Selasky case WORK_ST_TASK:
6323a150601SAlexander Motin case WORK_ST_CANCEL:
6334c8ba7d9SHans Petter Selasky return (true);
634ca2ad6bdSHans Petter Selasky default:
6354c8ba7d9SHans Petter Selasky return (false);
636ca2ad6bdSHans Petter Selasky }
637ca2ad6bdSHans Petter Selasky }
638ca2ad6bdSHans Petter Selasky
639ca2ad6bdSHans Petter Selasky /*
640ca2ad6bdSHans Petter Selasky * This function returns true if the given work is busy.
641ca2ad6bdSHans Petter Selasky */
642ca2ad6bdSHans Petter Selasky bool
linux_work_busy(struct work_struct * work)643ca2ad6bdSHans Petter Selasky linux_work_busy(struct work_struct *work)
644ca2ad6bdSHans Petter Selasky {
645ca2ad6bdSHans Petter Selasky struct taskqueue *tq;
646ca2ad6bdSHans Petter Selasky
647ca2ad6bdSHans Petter Selasky switch (atomic_read(&work->state)) {
648ca2ad6bdSHans Petter Selasky case WORK_ST_IDLE:
6494c8ba7d9SHans Petter Selasky return (false);
650ca2ad6bdSHans Petter Selasky case WORK_ST_EXEC:
651ca2ad6bdSHans Petter Selasky tq = work->work_queue->taskqueue;
652ca2ad6bdSHans Petter Selasky return (taskqueue_poll_is_busy(tq, &work->work_task));
653ca2ad6bdSHans Petter Selasky default:
6544c8ba7d9SHans Petter Selasky return (true);
655ca2ad6bdSHans Petter Selasky }
656ca2ad6bdSHans Petter Selasky }
657ca2ad6bdSHans Petter Selasky
658ca2ad6bdSHans Petter Selasky struct workqueue_struct *
linux_create_workqueue_common(const char * name,int cpus)659ca2ad6bdSHans Petter Selasky linux_create_workqueue_common(const char *name, int cpus)
660ca2ad6bdSHans Petter Selasky {
661ca2ad6bdSHans Petter Selasky struct workqueue_struct *wq;
662ca2ad6bdSHans Petter Selasky
6637a742c41SHans Petter Selasky /*
6647a742c41SHans Petter Selasky * If zero CPUs are specified use the default number of CPUs:
6657a742c41SHans Petter Selasky */
6667a742c41SHans Petter Selasky if (cpus == 0)
6677a742c41SHans Petter Selasky cpus = linux_default_wq_cpus;
6687a742c41SHans Petter Selasky
669ca2ad6bdSHans Petter Selasky wq = kmalloc(sizeof(*wq), M_WAITOK | M_ZERO);
670ca2ad6bdSHans Petter Selasky wq->taskqueue = taskqueue_create(name, M_WAITOK,
671ca2ad6bdSHans Petter Selasky taskqueue_thread_enqueue, &wq->taskqueue);
672ca2ad6bdSHans Petter Selasky atomic_set(&wq->draining, 0);
673ca2ad6bdSHans Petter Selasky taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
674ca2ad6bdSHans Petter Selasky TAILQ_INIT(&wq->exec_head);
675ca2ad6bdSHans Petter Selasky mtx_init(&wq->exec_mtx, "linux_wq_exec", NULL, MTX_DEF);
676ca2ad6bdSHans Petter Selasky
677ca2ad6bdSHans Petter Selasky return (wq);
678ca2ad6bdSHans Petter Selasky }
679ca2ad6bdSHans Petter Selasky
680ca2ad6bdSHans Petter Selasky void
linux_destroy_workqueue(struct workqueue_struct * wq)681ca2ad6bdSHans Petter Selasky linux_destroy_workqueue(struct workqueue_struct *wq)
682ca2ad6bdSHans Petter Selasky {
683ca2ad6bdSHans Petter Selasky atomic_inc(&wq->draining);
684ca2ad6bdSHans Petter Selasky drain_workqueue(wq);
685ca2ad6bdSHans Petter Selasky taskqueue_free(wq->taskqueue);
686ca2ad6bdSHans Petter Selasky mtx_destroy(&wq->exec_mtx);
687ca2ad6bdSHans Petter Selasky kfree(wq);
688ca2ad6bdSHans Petter Selasky }
689ca2ad6bdSHans Petter Selasky
690ca2ad6bdSHans Petter Selasky void
linux_init_delayed_work(struct delayed_work * dwork,work_func_t func)691ca2ad6bdSHans Petter Selasky linux_init_delayed_work(struct delayed_work *dwork, work_func_t func)
692ca2ad6bdSHans Petter Selasky {
693ca2ad6bdSHans Petter Selasky memset(dwork, 0, sizeof(*dwork));
69487a567f1SHans Petter Selasky dwork->work.func = func;
69587a567f1SHans Petter Selasky TASK_INIT(&dwork->work.work_task, 0, linux_delayed_work_fn, dwork);
696ca2ad6bdSHans Petter Selasky mtx_init(&dwork->timer.mtx, spin_lock_name("lkpi-dwork"), NULL,
697ca2ad6bdSHans Petter Selasky MTX_DEF | MTX_NOWITNESS);
698ca2ad6bdSHans Petter Selasky callout_init_mtx(&dwork->timer.callout, &dwork->timer.mtx, 0);
699ca2ad6bdSHans Petter Selasky }
700ca2ad6bdSHans Petter Selasky
701549dcdb3SHans Petter Selasky struct work_struct *
linux_current_work(void)702549dcdb3SHans Petter Selasky linux_current_work(void)
703549dcdb3SHans Petter Selasky {
704549dcdb3SHans Petter Selasky return (current->work);
705549dcdb3SHans Petter Selasky }
706549dcdb3SHans Petter Selasky
707ca2ad6bdSHans Petter Selasky static void
linux_work_init(void * arg)708ca2ad6bdSHans Petter Selasky linux_work_init(void *arg)
709ca2ad6bdSHans Petter Selasky {
710ca2ad6bdSHans Petter Selasky int max_wq_cpus = mp_ncpus + 1;
711ca2ad6bdSHans Petter Selasky
712ca2ad6bdSHans Petter Selasky /* avoid deadlock when there are too few threads */
713ca2ad6bdSHans Petter Selasky if (max_wq_cpus < 4)
714ca2ad6bdSHans Petter Selasky max_wq_cpus = 4;
715ca2ad6bdSHans Petter Selasky
7167a742c41SHans Petter Selasky /* set default number of CPUs */
7177a742c41SHans Petter Selasky linux_default_wq_cpus = max_wq_cpus;
7187a742c41SHans Petter Selasky
719ca2ad6bdSHans Petter Selasky linux_system_short_wq = alloc_workqueue("linuxkpi_short_wq", 0, max_wq_cpus);
720ca2ad6bdSHans Petter Selasky linux_system_long_wq = alloc_workqueue("linuxkpi_long_wq", 0, max_wq_cpus);
721ca2ad6bdSHans Petter Selasky
722ca2ad6bdSHans Petter Selasky /* populate the workqueue pointers */
723ca2ad6bdSHans Petter Selasky system_long_wq = linux_system_long_wq;
724ca2ad6bdSHans Petter Selasky system_wq = linux_system_short_wq;
725ca2ad6bdSHans Petter Selasky system_power_efficient_wq = linux_system_short_wq;
726ca2ad6bdSHans Petter Selasky system_unbound_wq = linux_system_short_wq;
7277a13eebaSHans Petter Selasky system_highpri_wq = linux_system_short_wq;
728ca2ad6bdSHans Petter Selasky }
7299657edd7SConrad Meyer SYSINIT(linux_work_init, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_init, NULL);
730ca2ad6bdSHans Petter Selasky
731ca2ad6bdSHans Petter Selasky static void
linux_work_uninit(void * arg)732ca2ad6bdSHans Petter Selasky linux_work_uninit(void *arg)
733ca2ad6bdSHans Petter Selasky {
734ca2ad6bdSHans Petter Selasky destroy_workqueue(linux_system_short_wq);
735ca2ad6bdSHans Petter Selasky destroy_workqueue(linux_system_long_wq);
736ca2ad6bdSHans Petter Selasky
737ca2ad6bdSHans Petter Selasky /* clear workqueue pointers */
738ca2ad6bdSHans Petter Selasky system_long_wq = NULL;
739ca2ad6bdSHans Petter Selasky system_wq = NULL;
740ca2ad6bdSHans Petter Selasky system_power_efficient_wq = NULL;
741ca2ad6bdSHans Petter Selasky system_unbound_wq = NULL;
7427a13eebaSHans Petter Selasky system_highpri_wq = NULL;
743ca2ad6bdSHans Petter Selasky }
7449657edd7SConrad Meyer SYSUNINIT(linux_work_uninit, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_uninit, NULL);
745ec25b6faSVladimir Kondratyev
746ec25b6faSVladimir Kondratyev void
linux_irq_work_fn(void * context,int pending)747ec25b6faSVladimir Kondratyev linux_irq_work_fn(void *context, int pending)
748ec25b6faSVladimir Kondratyev {
749ec25b6faSVladimir Kondratyev struct irq_work *irqw = context;
750ec25b6faSVladimir Kondratyev
751ec25b6faSVladimir Kondratyev irqw->func(irqw);
752ec25b6faSVladimir Kondratyev }
753ec25b6faSVladimir Kondratyev
754ec25b6faSVladimir Kondratyev static void
linux_irq_work_init_fn(void * context,int pending)755ec25b6faSVladimir Kondratyev linux_irq_work_init_fn(void *context, int pending)
756ec25b6faSVladimir Kondratyev {
757ec25b6faSVladimir Kondratyev /*
758ec25b6faSVladimir Kondratyev * LinuxKPI performs lazy allocation of memory structures required by
759ec25b6faSVladimir Kondratyev * current on the first access to it. As some irq_work clients read
760ec25b6faSVladimir Kondratyev * it with spinlock taken, we have to preallocate td_lkpi_task before
761ec25b6faSVladimir Kondratyev * first call to irq_work_queue(). As irq_work uses a single thread,
762ec25b6faSVladimir Kondratyev * it is enough to read current once at SYSINIT stage.
763ec25b6faSVladimir Kondratyev */
764ec25b6faSVladimir Kondratyev if (current == NULL)
765ec25b6faSVladimir Kondratyev panic("irq_work taskqueue is not initialized");
766ec25b6faSVladimir Kondratyev }
767ec25b6faSVladimir Kondratyev static struct task linux_irq_work_init_task =
768ec25b6faSVladimir Kondratyev TASK_INITIALIZER(0, linux_irq_work_init_fn, &linux_irq_work_init_task);
769ec25b6faSVladimir Kondratyev
770ec25b6faSVladimir Kondratyev static void
linux_irq_work_init(void * arg)771ec25b6faSVladimir Kondratyev linux_irq_work_init(void *arg)
772ec25b6faSVladimir Kondratyev {
773ec25b6faSVladimir Kondratyev linux_irq_work_tq = taskqueue_create_fast("linuxkpi_irq_wq",
774ec25b6faSVladimir Kondratyev M_WAITOK, taskqueue_thread_enqueue, &linux_irq_work_tq);
775ec25b6faSVladimir Kondratyev taskqueue_start_threads(&linux_irq_work_tq, 1, PWAIT,
776ec25b6faSVladimir Kondratyev "linuxkpi_irq_wq");
777ec25b6faSVladimir Kondratyev taskqueue_enqueue(linux_irq_work_tq, &linux_irq_work_init_task);
778ec25b6faSVladimir Kondratyev }
779ec25b6faSVladimir Kondratyev SYSINIT(linux_irq_work_init, SI_SUB_TASKQ, SI_ORDER_SECOND,
780ec25b6faSVladimir Kondratyev linux_irq_work_init, NULL);
781ec25b6faSVladimir Kondratyev
782ec25b6faSVladimir Kondratyev static void
linux_irq_work_uninit(void * arg)783ec25b6faSVladimir Kondratyev linux_irq_work_uninit(void *arg)
784ec25b6faSVladimir Kondratyev {
785ec25b6faSVladimir Kondratyev taskqueue_drain_all(linux_irq_work_tq);
786ec25b6faSVladimir Kondratyev taskqueue_free(linux_irq_work_tq);
787ec25b6faSVladimir Kondratyev }
788ec25b6faSVladimir Kondratyev SYSUNINIT(linux_irq_work_uninit, SI_SUB_TASKQ, SI_ORDER_SECOND,
789ec25b6faSVladimir Kondratyev linux_irq_work_uninit, NULL);
790