xref: /freebsd/sys/compat/linuxkpi/common/include/linux/workqueue.h (revision 0b3105a37d7adcadcb720112fed4dc4e8040be99)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_WORKQUEUE_H_
32 #define	_LINUX_WORKQUEUE_H_
33 
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/timer.h>
37 #include <linux/slab.h>
38 
39 #include <asm/atomic.h>
40 
41 #include <sys/taskqueue.h>
42 
43 struct workqueue_struct {
44 	struct taskqueue	*taskqueue;
45 	atomic_t		draining;
46 };
47 
48 struct work_struct {
49 	struct	task 		work_task;
50 	struct	taskqueue	*taskqueue;
51 	void			(*fn)(struct work_struct *);
52 };
53 
54 typedef __typeof(((struct work_struct *)0)->fn) work_func_t;
55 
56 struct delayed_work {
57 	struct work_struct	work;
58 	struct callout		timer;
59 };
60 
61 extern void linux_work_fn(void *, int);
62 extern void linux_flush_fn(void *, int);
63 extern void linux_delayed_work_fn(void *);
64 extern struct workqueue_struct *linux_create_workqueue_common(const char *, int);
65 extern void destroy_workqueue(struct workqueue_struct *);
66 
67 static inline struct delayed_work *
68 to_delayed_work(struct work_struct *work)
69 {
70 
71  	return container_of(work, struct delayed_work, work);
72 }
73 
74 #define	INIT_WORK(work, func) 	 					\
75 do {									\
76 	(work)->fn = (func);						\
77 	(work)->taskqueue = NULL;					\
78 	TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work));		\
79 } while (0)
80 
81 #define	INIT_DELAYED_WORK(_work, func)					\
82 do {									\
83 	INIT_WORK(&(_work)->work, func);				\
84 	callout_init(&(_work)->timer, 1);				\
85 } while (0)
86 
87 #define	INIT_DEFERRABLE_WORK(...) INIT_DELAYED_WORK(__VA_ARGS__)
88 
89 #define	schedule_work(work)						\
90 do {									\
91 	(work)->taskqueue = taskqueue_thread;				\
92 	taskqueue_enqueue(taskqueue_thread, &(work)->work_task);	\
93 } while (0)
94 
95 #define	flush_scheduled_work()	flush_taskqueue(taskqueue_thread)
96 
97 static inline int
98 queue_work(struct workqueue_struct *wq, struct work_struct *work)
99 {
100 	work->taskqueue = wq->taskqueue;
101 	/* Check for draining */
102 	if (atomic_read(&wq->draining) != 0)
103 		return (!work->work_task.ta_pending);
104 	/* Return opposite value to align with Linux logic */
105 	return (!taskqueue_enqueue(wq->taskqueue, &work->work_task));
106 }
107 
108 static inline int
109 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
110     unsigned long delay)
111 {
112 	int pending;
113 
114 	work->work.taskqueue = wq->taskqueue;
115 	if (atomic_read(&wq->draining) != 0) {
116 	  	pending = work->work.work_task.ta_pending;
117 	} else if (delay != 0) {
118 		pending = work->work.work_task.ta_pending;
119 		callout_reset(&work->timer, delay, linux_delayed_work_fn, work);
120 	} else {
121 		callout_stop(&work->timer);
122 		pending = taskqueue_enqueue(work->work.taskqueue,
123 		    &work->work.work_task);
124 	}
125 	return (!pending);
126 }
127 
128 static inline bool
129 schedule_delayed_work(struct delayed_work *dwork,
130     unsigned long delay)
131 {
132 	struct workqueue_struct wq;
133 
134 	wq.taskqueue = taskqueue_thread;
135 	atomic_set(&wq.draining, 0);
136 	return (queue_delayed_work(&wq, dwork, delay));
137 }
138 
139 #define	create_singlethread_workqueue(name)				\
140 	linux_create_workqueue_common(name, 1)
141 
142 #define	create_workqueue(name)						\
143 	linux_create_workqueue_common(name, MAXCPU)
144 
145 #define	alloc_ordered_workqueue(name, flags)				\
146 	linux_create_workqueue_common(name, 1)
147 
148 #define	alloc_workqueue(name, flags, max_active)			\
149 	linux_create_workqueue_common(name, max_active)
150 
151 #define	flush_workqueue(wq)	flush_taskqueue((wq)->taskqueue)
152 
153 static inline void
154 flush_taskqueue(struct taskqueue *tq)
155 {
156 	struct task flushtask;
157 
158 	PHOLD(curproc);
159 	TASK_INIT(&flushtask, 0, linux_flush_fn, NULL);
160 	taskqueue_enqueue(tq, &flushtask);
161 	taskqueue_drain(tq, &flushtask);
162 	PRELE(curproc);
163 }
164 
165 static inline void
166 drain_workqueue(struct workqueue_struct *wq)
167 {
168 	atomic_inc(&wq->draining);
169 	flush_taskqueue(wq->taskqueue);
170 	atomic_dec(&wq->draining);
171 }
172 
173 static inline int
174 cancel_work_sync(struct work_struct *work)
175 {
176 	if (work->taskqueue &&
177 	    taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
178 		taskqueue_drain(work->taskqueue, &work->work_task);
179 	return 0;
180 }
181 
182 /*
183  * This may leave work running on another CPU as it does on Linux.
184  */
185 static inline int
186 cancel_delayed_work(struct delayed_work *work)
187 {
188 
189 	callout_stop(&work->timer);
190 	if (work->work.taskqueue)
191 		return (taskqueue_cancel(work->work.taskqueue,
192 		    &work->work.work_task, NULL) == 0);
193 	return 0;
194 }
195 
196 static inline int
197 cancel_delayed_work_sync(struct delayed_work *work)
198 {
199 
200         callout_drain(&work->timer);
201         if (work->work.taskqueue &&
202             taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
203                 taskqueue_drain(work->work.taskqueue, &work->work.work_task);
204         return 0;
205 }
206 
207 static inline bool
208 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
209     unsigned long delay)
210 {
211 	cancel_delayed_work(dwork);
212 	queue_delayed_work(wq, dwork, delay);
213 	return false;
214 }
215 
216 #endif	/* _LINUX_WORKQUEUE_H_ */
217