xref: /freebsd/sys/compat/linuxkpi/common/src/linux_work.c (revision 1b2f43a7427ebf51561867f6c497833268014512)
1  /*-
2   * Copyright (c) 2017-2019 Hans Petter Selasky
3   * All rights reserved.
4   *
5   * Redistribution and use in source and binary forms, with or without
6   * modification, are permitted provided that the following conditions
7   * are met:
8   * 1. Redistributions of source code must retain the above copyright
9   *    notice unmodified, this list of conditions, and the following
10   *    disclaimer.
11   * 2. Redistributions in binary form must reproduce the above copyright
12   *    notice, this list of conditions and the following disclaimer in the
13   *    documentation and/or other materials provided with the distribution.
14   *
15   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16   * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18   * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19   * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24   * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25   */
26  
27  #include <sys/cdefs.h>
28  #include <linux/workqueue.h>
29  #include <linux/wait.h>
30  #include <linux/compat.h>
31  #include <linux/spinlock.h>
32  #include <linux/rcupdate.h>
33  #include <linux/irq_work.h>
34  
35  #include <sys/kernel.h>
36  
37  /*
38   * Define all work struct states
39   */
40  enum {
41  	WORK_ST_IDLE,			/* idle - not started */
42  	WORK_ST_TIMER,			/* timer is being started */
43  	WORK_ST_TASK,			/* taskqueue is being queued */
44  	WORK_ST_EXEC,			/* callback is being called */
45  	WORK_ST_CANCEL,			/* cancel is being requested */
46  	WORK_ST_MAX,
47  };
48  
49  /*
50   * Define global workqueues
51   */
52  static struct workqueue_struct *linux_system_short_wq;
53  static struct workqueue_struct *linux_system_long_wq;
54  
55  struct workqueue_struct *system_wq;
56  struct workqueue_struct *system_long_wq;
57  struct workqueue_struct *system_unbound_wq;
58  struct workqueue_struct *system_highpri_wq;
59  struct workqueue_struct *system_power_efficient_wq;
60  
61  struct taskqueue *linux_irq_work_tq;
62  
63  static int linux_default_wq_cpus = 4;
64  
65  static void linux_delayed_work_timer_fn(void *);
66  
67  /*
68   * This function atomically updates the work state and returns the
69   * previous state at the time of update.
70   */
71  static uint8_t
linux_update_state(atomic_t * v,const uint8_t * pstate)72  linux_update_state(atomic_t *v, const uint8_t *pstate)
73  {
74  	int c, old;
75  
76  	c = v->counter;
77  
78  	while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
79  		c = old;
80  
81  	return (c);
82  }
83  
84  /*
85   * A LinuxKPI task is allowed to free itself inside the callback function
86   * and cannot safely be referred after the callback function has
87   * completed. This function gives the linux_work_fn() function a hint,
88   * that the task is not going away and can have its state checked
89   * again. Without this extra hint LinuxKPI tasks cannot be serialized
90   * across multiple worker threads.
91   */
92  static bool
linux_work_exec_unblock(struct work_struct * work)93  linux_work_exec_unblock(struct work_struct *work)
94  {
95  	struct workqueue_struct *wq;
96  	struct work_exec *exec;
97  	bool retval = false;
98  
99  	wq = work->work_queue;
100  	if (unlikely(wq == NULL))
101  		goto done;
102  
103  	WQ_EXEC_LOCK(wq);
104  	TAILQ_FOREACH(exec, &wq->exec_head, entry) {
105  		if (exec->target == work) {
106  			exec->target = NULL;
107  			retval = true;
108  			break;
109  		}
110  	}
111  	WQ_EXEC_UNLOCK(wq);
112  done:
113  	return (retval);
114  }
115  
116  static void
linux_delayed_work_enqueue(struct delayed_work * dwork)117  linux_delayed_work_enqueue(struct delayed_work *dwork)
118  {
119  	struct taskqueue *tq;
120  
121  	tq = dwork->work.work_queue->taskqueue;
122  	taskqueue_enqueue(tq, &dwork->work.work_task);
123  }
124  
125  /*
126   * This function queues the given work structure on the given
127   * workqueue. It returns non-zero if the work was successfully
128   * [re-]queued. Else the work is already pending for completion.
129   */
130  bool
linux_queue_work_on(int cpu __unused,struct workqueue_struct * wq,struct work_struct * work)131  linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq,
132      struct work_struct *work)
133  {
134  	static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
135  		[WORK_ST_IDLE] = WORK_ST_TASK,		/* start queuing task */
136  		[WORK_ST_TIMER] = WORK_ST_TIMER,	/* NOP */
137  		[WORK_ST_TASK] = WORK_ST_TASK,		/* NOP */
138  		[WORK_ST_EXEC] = WORK_ST_TASK,		/* queue task another time */
139  		[WORK_ST_CANCEL] = WORK_ST_TASK,	/* start queuing task again */
140  	};
141  
142  	if (atomic_read(&wq->draining) != 0)
143  		return (!work_pending(work));
144  
145  	switch (linux_update_state(&work->state, states)) {
146  	case WORK_ST_EXEC:
147  	case WORK_ST_CANCEL:
148  		if (linux_work_exec_unblock(work) != 0)
149  			return (true);
150  		/* FALLTHROUGH */
151  	case WORK_ST_IDLE:
152  		work->work_queue = wq;
153  		taskqueue_enqueue(wq->taskqueue, &work->work_task);
154  		return (true);
155  	default:
156  		return (false);		/* already on a queue */
157  	}
158  }
159  
160  /*
161   * Callback func for linux_queue_rcu_work
162   */
163  static void
rcu_work_func(struct rcu_head * rcu)164  rcu_work_func(struct rcu_head *rcu)
165  {
166  	struct rcu_work *rwork;
167  
168  	rwork = container_of(rcu, struct rcu_work, rcu);
169  	linux_queue_work_on(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
170  }
171  
172  /*
173   * This function queue a work after a grace period
174   * If the work was already pending it returns false,
175   * if not it calls call_rcu and returns true.
176   */
177  bool
linux_queue_rcu_work(struct workqueue_struct * wq,struct rcu_work * rwork)178  linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
179  {
180  
181  	if (!linux_work_pending(&rwork->work)) {
182  		rwork->wq = wq;
183  		linux_call_rcu(RCU_TYPE_REGULAR, &rwork->rcu, rcu_work_func);
184  		return (true);
185  	}
186  	return (false);
187  }
188  
189  /*
190   * This function waits for the last execution of a work and then
191   * flush the work.
192   * It returns true if the work was pending and we waited, it returns
193   * false otherwise.
194   */
195  bool
linux_flush_rcu_work(struct rcu_work * rwork)196  linux_flush_rcu_work(struct rcu_work *rwork)
197  {
198  
199  	if (linux_work_pending(&rwork->work)) {
200  		linux_rcu_barrier(RCU_TYPE_REGULAR);
201  		linux_flush_work(&rwork->work);
202  		return (true);
203  	}
204  	return (linux_flush_work(&rwork->work));
205  }
206  
207  /*
208   * This function queues the given work structure on the given
209   * workqueue after a given delay in ticks. It returns true if the
210   * work was successfully [re-]queued. Else the work is already pending
211   * for completion.
212   */
213  bool
linux_queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned delay)214  linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
215      struct delayed_work *dwork, unsigned delay)
216  {
217  	static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
218  		[WORK_ST_IDLE] = WORK_ST_TIMER,		/* start timeout */
219  		[WORK_ST_TIMER] = WORK_ST_TIMER,	/* NOP */
220  		[WORK_ST_TASK] = WORK_ST_TASK,		/* NOP */
221  		[WORK_ST_EXEC] = WORK_ST_TIMER,		/* start timeout */
222  		[WORK_ST_CANCEL] = WORK_ST_TIMER,	/* start timeout */
223  	};
224  	bool res;
225  
226  	if (atomic_read(&wq->draining) != 0)
227  		return (!work_pending(&dwork->work));
228  
229  	mtx_lock(&dwork->timer.mtx);
230  	switch (linux_update_state(&dwork->work.state, states)) {
231  	case WORK_ST_EXEC:
232  	case WORK_ST_CANCEL:
233  		if (delay == 0 && linux_work_exec_unblock(&dwork->work)) {
234  			dwork->timer.expires = jiffies;
235  			res = true;
236  			goto out;
237  		}
238  		/* FALLTHROUGH */
239  	case WORK_ST_IDLE:
240  		dwork->work.work_queue = wq;
241  		dwork->timer.expires = jiffies + delay;
242  
243  		if (delay == 0) {
244  			linux_delayed_work_enqueue(dwork);
245  		} else if (unlikely(cpu != WORK_CPU_UNBOUND)) {
246  			callout_reset_on(&dwork->timer.callout, delay,
247  			    &linux_delayed_work_timer_fn, dwork, cpu);
248  		} else {
249  			callout_reset(&dwork->timer.callout, delay,
250  			    &linux_delayed_work_timer_fn, dwork);
251  		}
252  		res = true;
253  		break;
254  	default:
255  		res = false;
256  		break;
257  	}
258  out:
259  	mtx_unlock(&dwork->timer.mtx);
260  	return (res);
261  }
262  
263  void
linux_work_fn(void * context,int pending)264  linux_work_fn(void *context, int pending)
265  {
266  	static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
267  		[WORK_ST_IDLE] = WORK_ST_IDLE,		/* NOP */
268  		[WORK_ST_TIMER] = WORK_ST_EXEC,		/* delayed work w/o timeout */
269  		[WORK_ST_TASK] = WORK_ST_EXEC,		/* call callback */
270  		[WORK_ST_EXEC] = WORK_ST_IDLE,		/* complete callback */
271  		[WORK_ST_CANCEL] = WORK_ST_EXEC,	/* failed to cancel */
272  	};
273  	struct work_struct *work;
274  	struct workqueue_struct *wq;
275  	struct work_exec exec;
276  	struct task_struct *task;
277  
278  	task = current;
279  
280  	/* setup local variables */
281  	work = context;
282  	wq = work->work_queue;
283  
284  	/* store target pointer */
285  	exec.target = work;
286  
287  	/* insert executor into list */
288  	WQ_EXEC_LOCK(wq);
289  	TAILQ_INSERT_TAIL(&wq->exec_head, &exec, entry);
290  	while (1) {
291  		switch (linux_update_state(&work->state, states)) {
292  		case WORK_ST_TIMER:
293  		case WORK_ST_TASK:
294  		case WORK_ST_CANCEL:
295  			WQ_EXEC_UNLOCK(wq);
296  
297  			/* set current work structure */
298  			task->work = work;
299  
300  			/* call work function */
301  			work->func(work);
302  
303  			/* set current work structure */
304  			task->work = NULL;
305  
306  			WQ_EXEC_LOCK(wq);
307  			/* check if unblocked */
308  			if (exec.target != work) {
309  				/* reapply block */
310  				exec.target = work;
311  				break;
312  			}
313  			/* FALLTHROUGH */
314  		default:
315  			goto done;
316  		}
317  	}
318  done:
319  	/* remove executor from list */
320  	TAILQ_REMOVE(&wq->exec_head, &exec, entry);
321  	WQ_EXEC_UNLOCK(wq);
322  }
323  
324  void
linux_delayed_work_fn(void * context,int pending)325  linux_delayed_work_fn(void *context, int pending)
326  {
327  	struct delayed_work *dwork = context;
328  
329  	/*
330  	 * Make sure the timer belonging to the delayed work gets
331  	 * drained before invoking the work function. Else the timer
332  	 * mutex may still be in use which can lead to use-after-free
333  	 * situations, because the work function might free the work
334  	 * structure before returning.
335  	 */
336  	callout_drain(&dwork->timer.callout);
337  
338  	linux_work_fn(&dwork->work, pending);
339  }
340  
341  static void
linux_delayed_work_timer_fn(void * arg)342  linux_delayed_work_timer_fn(void *arg)
343  {
344  	static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
345  		[WORK_ST_IDLE] = WORK_ST_IDLE,		/* NOP */
346  		[WORK_ST_TIMER] = WORK_ST_TASK,		/* start queueing task */
347  		[WORK_ST_TASK] = WORK_ST_TASK,		/* NOP */
348  		[WORK_ST_EXEC] = WORK_ST_EXEC,		/* NOP */
349  		[WORK_ST_CANCEL] = WORK_ST_TASK,	/* failed to cancel */
350  	};
351  	struct delayed_work *dwork = arg;
352  
353  	switch (linux_update_state(&dwork->work.state, states)) {
354  	case WORK_ST_TIMER:
355  	case WORK_ST_CANCEL:
356  		linux_delayed_work_enqueue(dwork);
357  		break;
358  	default:
359  		break;
360  	}
361  }
362  
363  /*
364   * This function cancels the given work structure in a
365   * non-blocking fashion. It returns non-zero if the work was
366   * successfully cancelled. Else the work may still be busy or already
367   * cancelled.
368   */
369  bool
linux_cancel_work(struct work_struct * work)370  linux_cancel_work(struct work_struct *work)
371  {
372  	static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
373  		[WORK_ST_IDLE] = WORK_ST_IDLE,		/* NOP */
374  		[WORK_ST_TIMER] = WORK_ST_TIMER,	/* can't happen */
375  		[WORK_ST_TASK] = WORK_ST_IDLE,		/* cancel */
376  		[WORK_ST_EXEC] = WORK_ST_EXEC,		/* NOP */
377  		[WORK_ST_CANCEL] = WORK_ST_IDLE,	/* can't happen */
378  	};
379  	struct taskqueue *tq;
380  
381  	MPASS(atomic_read(&work->state) != WORK_ST_TIMER);
382  	MPASS(atomic_read(&work->state) != WORK_ST_CANCEL);
383  
384  	switch (linux_update_state(&work->state, states)) {
385  	case WORK_ST_TASK:
386  		tq = work->work_queue->taskqueue;
387  		if (taskqueue_cancel(tq, &work->work_task, NULL) == 0)
388  			return (true);
389  		/* FALLTHROUGH */
390  	default:
391  		return (false);
392  	}
393  }
394  
395  /*
396   * This function cancels the given work structure in a synchronous
397   * fashion. It returns non-zero if the work was successfully
398   * cancelled. Else the work was already cancelled.
399   */
400  bool
linux_cancel_work_sync(struct work_struct * work)401  linux_cancel_work_sync(struct work_struct *work)
402  {
403  	static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
404  		[WORK_ST_IDLE] = WORK_ST_IDLE,		/* NOP */
405  		[WORK_ST_TIMER] = WORK_ST_TIMER,	/* can't happen */
406  		[WORK_ST_TASK] = WORK_ST_IDLE,		/* cancel and drain */
407  		[WORK_ST_EXEC] = WORK_ST_IDLE,		/* too late, drain */
408  		[WORK_ST_CANCEL] = WORK_ST_IDLE,	/* cancel and drain */
409  	};
410  	struct taskqueue *tq;
411  	bool retval = false;
412  
413  	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
414  	    "linux_cancel_work_sync() might sleep");
415  retry:
416  	switch (linux_update_state(&work->state, states)) {
417  	case WORK_ST_IDLE:
418  	case WORK_ST_TIMER:
419  		return (retval);
420  	case WORK_ST_EXEC:
421  		tq = work->work_queue->taskqueue;
422  		if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
423  			taskqueue_drain(tq, &work->work_task);
424  		goto retry;	/* work may have restarted itself */
425  	default:
426  		tq = work->work_queue->taskqueue;
427  		if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
428  			taskqueue_drain(tq, &work->work_task);
429  		retval = true;
430  		goto retry;
431  	}
432  }
433  
434  /*
435   * This function atomically stops the timer and callback. The timer
436   * callback will not be called after this function returns. This
437   * functions returns true when the timeout was cancelled. Else the
438   * timeout was not started or has already been called.
439   */
440  static inline bool
linux_cancel_timer(struct delayed_work * dwork,bool drain)441  linux_cancel_timer(struct delayed_work *dwork, bool drain)
442  {
443  	bool cancelled;
444  
445  	mtx_lock(&dwork->timer.mtx);
446  	cancelled = (callout_stop(&dwork->timer.callout) == 1);
447  	mtx_unlock(&dwork->timer.mtx);
448  
449  	/* check if we should drain */
450  	if (drain)
451  		callout_drain(&dwork->timer.callout);
452  	return (cancelled);
453  }
454  
455  /*
456   * This function cancels the given delayed work structure in a
457   * non-blocking fashion. It returns non-zero if the work was
458   * successfully cancelled. Else the work may still be busy or already
459   * cancelled.
460   */
461  bool
linux_cancel_delayed_work(struct delayed_work * dwork)462  linux_cancel_delayed_work(struct delayed_work *dwork)
463  {
464  	static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
465  		[WORK_ST_IDLE] = WORK_ST_IDLE,		/* NOP */
466  		[WORK_ST_TIMER] = WORK_ST_CANCEL,	/* try to cancel */
467  		[WORK_ST_TASK] = WORK_ST_CANCEL,	/* try to cancel */
468  		[WORK_ST_EXEC] = WORK_ST_EXEC,		/* NOP */
469  		[WORK_ST_CANCEL] = WORK_ST_CANCEL,	/* NOP */
470  	};
471  	struct taskqueue *tq;
472  	bool cancelled;
473  
474  	mtx_lock(&dwork->timer.mtx);
475  	switch (linux_update_state(&dwork->work.state, states)) {
476  	case WORK_ST_TIMER:
477  	case WORK_ST_CANCEL:
478  		cancelled = (callout_stop(&dwork->timer.callout) == 1);
479  		if (cancelled) {
480  			atomic_cmpxchg(&dwork->work.state,
481  			    WORK_ST_CANCEL, WORK_ST_IDLE);
482  			mtx_unlock(&dwork->timer.mtx);
483  			return (true);
484  		}
485  		/* FALLTHROUGH */
486  	case WORK_ST_TASK:
487  		tq = dwork->work.work_queue->taskqueue;
488  		if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
489  			atomic_cmpxchg(&dwork->work.state,
490  			    WORK_ST_CANCEL, WORK_ST_IDLE);
491  			mtx_unlock(&dwork->timer.mtx);
492  			return (true);
493  		}
494  		/* FALLTHROUGH */
495  	default:
496  		mtx_unlock(&dwork->timer.mtx);
497  		return (false);
498  	}
499  }
500  
501  /*
502   * This function cancels the given work structure in a synchronous
503   * fashion. It returns true if the work was successfully
504   * cancelled. Else the work was already cancelled.
505   */
506  static bool
linux_cancel_delayed_work_sync_int(struct delayed_work * dwork)507  linux_cancel_delayed_work_sync_int(struct delayed_work *dwork)
508  {
509  	static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
510  		[WORK_ST_IDLE] = WORK_ST_IDLE,		/* NOP */
511  		[WORK_ST_TIMER] = WORK_ST_IDLE,		/* cancel and drain */
512  		[WORK_ST_TASK] = WORK_ST_IDLE,		/* cancel and drain */
513  		[WORK_ST_EXEC] = WORK_ST_IDLE,		/* too late, drain */
514  		[WORK_ST_CANCEL] = WORK_ST_IDLE,	/* cancel and drain */
515  	};
516  	struct taskqueue *tq;
517  	int ret, state;
518  	bool cancelled;
519  
520  	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
521  	    "linux_cancel_delayed_work_sync() might sleep");
522  	mtx_lock(&dwork->timer.mtx);
523  
524  	state = linux_update_state(&dwork->work.state, states);
525  	switch (state) {
526  	case WORK_ST_IDLE:
527  		mtx_unlock(&dwork->timer.mtx);
528  		return (false);
529  	case WORK_ST_TIMER:
530  	case WORK_ST_CANCEL:
531  		cancelled = (callout_stop(&dwork->timer.callout) == 1);
532  
533  		tq = dwork->work.work_queue->taskqueue;
534  		ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
535  		mtx_unlock(&dwork->timer.mtx);
536  
537  		callout_drain(&dwork->timer.callout);
538  		taskqueue_drain(tq, &dwork->work.work_task);
539  		return (cancelled || (ret != 0));
540  	default:
541  		tq = dwork->work.work_queue->taskqueue;
542  		ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
543  		mtx_unlock(&dwork->timer.mtx);
544  		if (ret != 0)
545  			taskqueue_drain(tq, &dwork->work.work_task);
546  		return (ret != 0);
547  	}
548  }
549  
550  bool
linux_cancel_delayed_work_sync(struct delayed_work * dwork)551  linux_cancel_delayed_work_sync(struct delayed_work *dwork)
552  {
553  	bool res;
554  
555  	res = false;
556  	while (linux_cancel_delayed_work_sync_int(dwork))
557  		res = true;
558  	return (res);
559  }
560  
561  /*
562   * This function waits until the given work structure is completed.
563   * It returns non-zero if the work was successfully
564   * waited for. Else the work was not waited for.
565   */
566  bool
linux_flush_work(struct work_struct * work)567  linux_flush_work(struct work_struct *work)
568  {
569  	struct taskqueue *tq;
570  	bool retval;
571  
572  	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
573  	    "linux_flush_work() might sleep");
574  
575  	switch (atomic_read(&work->state)) {
576  	case WORK_ST_IDLE:
577  		return (false);
578  	default:
579  		tq = work->work_queue->taskqueue;
580  		retval = taskqueue_poll_is_busy(tq, &work->work_task);
581  		taskqueue_drain(tq, &work->work_task);
582  		return (retval);
583  	}
584  }
585  
586  /*
587   * This function waits until the given delayed work structure is
588   * completed. It returns non-zero if the work was successfully waited
589   * for. Else the work was not waited for.
590   */
591  bool
linux_flush_delayed_work(struct delayed_work * dwork)592  linux_flush_delayed_work(struct delayed_work *dwork)
593  {
594  	struct taskqueue *tq;
595  	bool retval;
596  
597  	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
598  	    "linux_flush_delayed_work() might sleep");
599  
600  	switch (atomic_read(&dwork->work.state)) {
601  	case WORK_ST_IDLE:
602  		return (false);
603  	case WORK_ST_TIMER:
604  		if (linux_cancel_timer(dwork, 1))
605  			linux_delayed_work_enqueue(dwork);
606  		/* FALLTHROUGH */
607  	default:
608  		tq = dwork->work.work_queue->taskqueue;
609  		retval = taskqueue_poll_is_busy(tq, &dwork->work.work_task);
610  		taskqueue_drain(tq, &dwork->work.work_task);
611  		return (retval);
612  	}
613  }
614  
615  /*
616   * This function returns true if the given work is pending, and not
617   * yet executing:
618   */
619  bool
linux_work_pending(struct work_struct * work)620  linux_work_pending(struct work_struct *work)
621  {
622  	switch (atomic_read(&work->state)) {
623  	case WORK_ST_TIMER:
624  	case WORK_ST_TASK:
625  	case WORK_ST_CANCEL:
626  		return (true);
627  	default:
628  		return (false);
629  	}
630  }
631  
632  /*
633   * This function returns true if the given work is busy.
634   */
635  bool
linux_work_busy(struct work_struct * work)636  linux_work_busy(struct work_struct *work)
637  {
638  	struct taskqueue *tq;
639  
640  	switch (atomic_read(&work->state)) {
641  	case WORK_ST_IDLE:
642  		return (false);
643  	case WORK_ST_EXEC:
644  		tq = work->work_queue->taskqueue;
645  		return (taskqueue_poll_is_busy(tq, &work->work_task));
646  	default:
647  		return (true);
648  	}
649  }
650  
651  struct workqueue_struct *
linux_create_workqueue_common(const char * name,int cpus)652  linux_create_workqueue_common(const char *name, int cpus)
653  {
654  	struct workqueue_struct *wq;
655  
656  	/*
657  	 * If zero CPUs are specified use the default number of CPUs:
658  	 */
659  	if (cpus == 0)
660  		cpus = linux_default_wq_cpus;
661  
662  	wq = kmalloc(sizeof(*wq), M_WAITOK | M_ZERO);
663  	wq->taskqueue = taskqueue_create(name, M_WAITOK,
664  	    taskqueue_thread_enqueue, &wq->taskqueue);
665  	atomic_set(&wq->draining, 0);
666  	taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
667  	TAILQ_INIT(&wq->exec_head);
668  	mtx_init(&wq->exec_mtx, "linux_wq_exec", NULL, MTX_DEF);
669  
670  	return (wq);
671  }
672  
673  void
linux_destroy_workqueue(struct workqueue_struct * wq)674  linux_destroy_workqueue(struct workqueue_struct *wq)
675  {
676  	atomic_inc(&wq->draining);
677  	drain_workqueue(wq);
678  	taskqueue_free(wq->taskqueue);
679  	mtx_destroy(&wq->exec_mtx);
680  	kfree(wq);
681  }
682  
683  void
linux_init_delayed_work(struct delayed_work * dwork,work_func_t func)684  linux_init_delayed_work(struct delayed_work *dwork, work_func_t func)
685  {
686  	memset(dwork, 0, sizeof(*dwork));
687  	dwork->work.func = func;
688  	TASK_INIT(&dwork->work.work_task, 0, linux_delayed_work_fn, dwork);
689  	mtx_init(&dwork->timer.mtx, spin_lock_name("lkpi-dwork"), NULL,
690  	    MTX_DEF | MTX_NOWITNESS);
691  	callout_init_mtx(&dwork->timer.callout, &dwork->timer.mtx, 0);
692  }
693  
694  struct work_struct *
linux_current_work(void)695  linux_current_work(void)
696  {
697  	return (current->work);
698  }
699  
700  static void
linux_work_init(void * arg)701  linux_work_init(void *arg)
702  {
703  	int max_wq_cpus = mp_ncpus + 1;
704  
705  	/* avoid deadlock when there are too few threads */
706  	if (max_wq_cpus < 4)
707  		max_wq_cpus = 4;
708  
709  	/* set default number of CPUs */
710  	linux_default_wq_cpus = max_wq_cpus;
711  
712  	linux_system_short_wq = alloc_workqueue("linuxkpi_short_wq", 0, max_wq_cpus);
713  	linux_system_long_wq = alloc_workqueue("linuxkpi_long_wq", 0, max_wq_cpus);
714  
715  	/* populate the workqueue pointers */
716  	system_long_wq = linux_system_long_wq;
717  	system_wq = linux_system_short_wq;
718  	system_power_efficient_wq = linux_system_short_wq;
719  	system_unbound_wq = linux_system_short_wq;
720  	system_highpri_wq = linux_system_short_wq;
721  }
722  SYSINIT(linux_work_init, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_init, NULL);
723  
724  static void
linux_work_uninit(void * arg)725  linux_work_uninit(void *arg)
726  {
727  	destroy_workqueue(linux_system_short_wq);
728  	destroy_workqueue(linux_system_long_wq);
729  
730  	/* clear workqueue pointers */
731  	system_long_wq = NULL;
732  	system_wq = NULL;
733  	system_power_efficient_wq = NULL;
734  	system_unbound_wq = NULL;
735  	system_highpri_wq = NULL;
736  }
737  SYSUNINIT(linux_work_uninit, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_uninit, NULL);
738  
739  void
linux_irq_work_fn(void * context,int pending)740  linux_irq_work_fn(void *context, int pending)
741  {
742  	struct irq_work *irqw = context;
743  
744  	irqw->func(irqw);
745  }
746  
747  static void
linux_irq_work_init_fn(void * context,int pending)748  linux_irq_work_init_fn(void *context, int pending)
749  {
750  	/*
751  	 * LinuxKPI performs lazy allocation of memory structures required by
752  	 * current on the first access to it.  As some irq_work clients read
753  	 * it with spinlock taken, we have to preallocate td_lkpi_task before
754  	 * first call to irq_work_queue().  As irq_work uses a single thread,
755  	 * it is enough to read current once at SYSINIT stage.
756  	 */
757  	if (current == NULL)
758  		panic("irq_work taskqueue is not initialized");
759  }
760  static struct task linux_irq_work_init_task =
761      TASK_INITIALIZER(0, linux_irq_work_init_fn, &linux_irq_work_init_task);
762  
763  static void
linux_irq_work_init(void * arg)764  linux_irq_work_init(void *arg)
765  {
766  	linux_irq_work_tq = taskqueue_create_fast("linuxkpi_irq_wq",
767  	    M_WAITOK, taskqueue_thread_enqueue, &linux_irq_work_tq);
768  	taskqueue_start_threads(&linux_irq_work_tq, 1, PWAIT,
769  	    "linuxkpi_irq_wq");
770  	taskqueue_enqueue(linux_irq_work_tq, &linux_irq_work_init_task);
771  }
772  SYSINIT(linux_irq_work_init, SI_SUB_TASKQ, SI_ORDER_SECOND,
773      linux_irq_work_init, NULL);
774  
775  static void
linux_irq_work_uninit(void * arg)776  linux_irq_work_uninit(void *arg)
777  {
778  	taskqueue_drain_all(linux_irq_work_tq);
779  	taskqueue_free(linux_irq_work_tq);
780  }
781  SYSUNINIT(linux_irq_work_uninit, SI_SUB_TASKQ, SI_ORDER_SECOND,
782      linux_irq_work_uninit, NULL);
783