xref: /freebsd/sys/compat/linuxkpi/common/include/linux/workqueue.h (revision 09285d143618d9679b9112b13942cffc76ab00cf)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_WORKQUEUE_H_
32 #define	_LINUX_WORKQUEUE_H_
33 
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/timer.h>
37 #include <linux/slab.h>
38 
39 #include <asm/atomic.h>
40 
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/taskqueue.h>
44 #include <sys/mutex.h>
45 
46 #define	WORK_CPU_UNBOUND MAXCPU
47 #define	WQ_UNBOUND (1 << 0)
48 #define	WQ_HIGHPRI (1 << 1)
49 
50 struct work_struct;
51 typedef void (*work_func_t)(struct work_struct *);
52 
53 struct work_exec {
54 	TAILQ_ENTRY(work_exec) entry;
55 	struct work_struct *target;
56 };
57 
58 struct workqueue_struct {
59 	struct taskqueue *taskqueue;
60 	struct mtx exec_mtx;
61 	TAILQ_HEAD(, work_exec) exec_head;
62 	atomic_t draining;
63 };
64 
65 #define	WQ_EXEC_LOCK(wq) mtx_lock(&(wq)->exec_mtx)
66 #define	WQ_EXEC_UNLOCK(wq) mtx_unlock(&(wq)->exec_mtx)
67 
68 struct work_struct {
69 	struct task work_task;
70 	struct workqueue_struct *work_queue;
71 	work_func_t func;
72 	atomic_t state;
73 };
74 
75 #define	DECLARE_WORK(name, fn) \
76 	struct work_struct name = { .func = (fn) }
77 
78 struct delayed_work {
79 	struct work_struct work;
80 	struct {
81 		struct callout callout;
82 		struct mtx mtx;
83 		int	expires;
84 	} timer;
85 };
86 
87 #define	DECLARE_DELAYED_WORK(name, fn)					\
88 	struct delayed_work name;					\
89 	static void name##_init(void *arg)				\
90 	{								\
91 		linux_init_delayed_work(&name, fn);			\
92 	}								\
93 	SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL)
94 
95 static inline struct delayed_work *
96 to_delayed_work(struct work_struct *work)
97 {
98 	return (container_of(work, struct delayed_work, work));
99 }
100 
101 #define	INIT_WORK(work, fn) 	 					\
102 do {									\
103 	(work)->func = (fn);						\
104 	(work)->work_queue = NULL;					\
105 	atomic_set(&(work)->state, 0);					\
106 	TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work));	\
107 } while (0)
108 
109 #define	INIT_WORK_ONSTACK(work, fn) \
110 	INIT_WORK(work, fn)
111 
112 #define	INIT_DELAYED_WORK(dwork, fn) \
113 	linux_init_delayed_work(dwork, fn)
114 
115 #define	INIT_DEFERRABLE_WORK(dwork, fn) \
116 	INIT_DELAYED_WORK(dwork, fn)
117 
118 #define	flush_scheduled_work() \
119 	taskqueue_drain_all(system_wq->taskqueue)
120 
121 #define	queue_work(wq, work) \
122 	linux_queue_work_on(WORK_CPU_UNBOUND, wq, work)
123 
124 #define	schedule_work(work) \
125 	linux_queue_work_on(WORK_CPU_UNBOUND, system_wq, work)
126 
127 #define	queue_delayed_work(wq, dwork, delay) \
128 	linux_queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay)
129 
130 #define	schedule_delayed_work_on(cpu, dwork, delay) \
131 	linux_queue_delayed_work_on(cpu, system_wq, dwork, delay)
132 
133 #define	queue_work_on(cpu, wq, work) \
134 	linux_queue_work_on(cpu, wq, work)
135 
136 #define	schedule_delayed_work(dwork, delay) \
137 	linux_queue_delayed_work_on(WORK_CPU_UNBOUND, system_wq, dwork, delay)
138 
139 #define	queue_delayed_work_on(cpu, wq, dwork, delay) \
140 	linux_queue_delayed_work_on(cpu, wq, dwork, delay)
141 
142 #define	create_singlethread_workqueue(name) \
143 	linux_create_workqueue_common(name, 1)
144 
145 #define	create_workqueue(name) \
146 	linux_create_workqueue_common(name, mp_ncpus)
147 
148 #define	alloc_ordered_workqueue(name, flags) \
149 	linux_create_workqueue_common(name, 1)
150 
151 #define	alloc_workqueue(name, flags, max_active) \
152 	linux_create_workqueue_common(name, max_active)
153 
154 #define	flush_workqueue(wq) \
155 	taskqueue_drain_all((wq)->taskqueue)
156 
157 #define	drain_workqueue(wq) do {		\
158 	atomic_inc(&(wq)->draining);		\
159 	taskqueue_drain_all((wq)->taskqueue);	\
160 	atomic_dec(&(wq)->draining);		\
161 } while (0)
162 
163 #define	mod_delayed_work(wq, dwork, delay) ({		\
164 	bool __retval;					\
165 	__retval = linux_cancel_delayed_work(dwork);	\
166 	linux_queue_delayed_work_on(WORK_CPU_UNBOUND,	\
167 	    wq, dwork, delay);				\
168 	__retval;					\
169 })
170 
171 #define	delayed_work_pending(dwork) \
172 	linux_work_pending(&(dwork)->work)
173 
174 #define	cancel_delayed_work(dwork) \
175 	linux_cancel_delayed_work(dwork)
176 
177 #define	cancel_work_sync(work) \
178 	linux_cancel_work_sync(work)
179 
180 #define	cancel_delayed_work_sync(dwork) \
181 	linux_cancel_delayed_work_sync(dwork)
182 
183 #define	flush_work(work) \
184 	linux_flush_work(work)
185 
186 #define	flush_delayed_work(dwork) \
187 	linux_flush_delayed_work(dwork)
188 
189 #define	work_pending(work) \
190 	linux_work_pending(work)
191 
192 #define	work_busy(work) \
193 	linux_work_busy(work)
194 
195 #define	destroy_work_on_stack(work) \
196 	do { } while (0)
197 
198 #define	destroy_delayed_work_on_stack(dwork) \
199 	do { } while (0)
200 
201 #define	destroy_workqueue(wq) \
202 	linux_destroy_workqueue(wq)
203 
204 /* prototypes */
205 
206 extern struct workqueue_struct *system_wq;
207 extern struct workqueue_struct *system_long_wq;
208 extern struct workqueue_struct *system_unbound_wq;
209 extern struct workqueue_struct *system_power_efficient_wq;
210 
211 extern void linux_init_delayed_work(struct delayed_work *, work_func_t);
212 extern void linux_work_fn(void *, int);
213 extern struct workqueue_struct *linux_create_workqueue_common(const char *, int);
214 extern void linux_destroy_workqueue(struct workqueue_struct *);
215 extern bool linux_queue_work_on(int cpu, struct workqueue_struct *, struct work_struct *);
216 extern bool linux_queue_delayed_work_on(int cpu, struct workqueue_struct *,
217     struct delayed_work *, unsigned delay);
218 extern bool linux_cancel_delayed_work(struct delayed_work *);
219 extern bool linux_cancel_work_sync(struct work_struct *);
220 extern bool linux_cancel_delayed_work_sync(struct delayed_work *);
221 extern bool linux_flush_work(struct work_struct *);
222 extern bool linux_flush_delayed_work(struct delayed_work *);
223 extern bool linux_work_pending(struct work_struct *);
224 extern bool linux_work_busy(struct work_struct *);
225 
226 #endif					/* _LINUX_WORKQUEUE_H_ */
227