1 /*-
2 * Copyright (c) 2017 Hans Petter Selasky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/types.h>
28 #include <sys/malloc.h>
29 #include <sys/gtaskqueue.h>
30 #include <sys/proc.h>
31 #include <sys/sched.h>
32
33 #include <linux/compiler.h>
34 #include <linux/interrupt.h>
35 #include <linux/compat.h>
36
37 #define TASKLET_ST_IDLE 0
38 #define TASKLET_ST_BUSY 1
39 #define TASKLET_ST_EXEC 2
40 #define TASKLET_ST_LOOP 3
41
42 #define TASKLET_ST_CMPSET(ts, old, new) \
43 atomic_cmpset_int((volatile u_int *)&(ts)->tasklet_state, old, new)
44
45 #define TASKLET_ST_SET(ts, new) \
46 WRITE_ONCE(*(volatile u_int *)&(ts)->tasklet_state, new)
47
48 #define TASKLET_ST_GET(ts) \
49 READ_ONCE(*(volatile u_int *)&(ts)->tasklet_state)
50
51 struct tasklet_worker {
52 struct mtx mtx;
53 TAILQ_HEAD(tasklet_list, tasklet_struct) head;
54 struct grouptask gtask;
55 } __aligned(CACHE_LINE_SIZE);
56
57 #define TASKLET_WORKER_LOCK(tw) mtx_lock(&(tw)->mtx)
58 #define TASKLET_WORKER_UNLOCK(tw) mtx_unlock(&(tw)->mtx)
59
60 DPCPU_DEFINE_STATIC(struct tasklet_worker, tasklet_worker);
61
62 static void
tasklet_handler(void * arg)63 tasklet_handler(void *arg)
64 {
65 struct tasklet_worker *tw = (struct tasklet_worker *)arg;
66 struct tasklet_struct *ts;
67 struct tasklet_struct *last;
68
69 linux_set_current(curthread);
70
71 TASKLET_WORKER_LOCK(tw);
72 last = TAILQ_LAST(&tw->head, tasklet_list);
73 while (1) {
74 ts = TAILQ_FIRST(&tw->head);
75 if (ts == NULL)
76 break;
77 TAILQ_REMOVE(&tw->head, ts, entry);
78
79 if (!atomic_read(&ts->count)) {
80 TASKLET_WORKER_UNLOCK(tw);
81 do {
82 /* reset executing state */
83 TASKLET_ST_SET(ts, TASKLET_ST_EXEC);
84
85 if (ts->use_callback)
86 ts->callback(ts);
87 else
88 ts->func(ts->data);
89
90 } while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC,
91 TASKLET_ST_IDLE) == 0);
92 TASKLET_WORKER_LOCK(tw);
93 } else {
94 TAILQ_INSERT_TAIL(&tw->head, ts, entry);
95 }
96 if (ts == last)
97 break;
98 }
99 TASKLET_WORKER_UNLOCK(tw);
100 }
101
102 static void
tasklet_subsystem_init(void * arg __unused)103 tasklet_subsystem_init(void *arg __unused)
104 {
105 struct tasklet_worker *tw;
106 char buf[32];
107 int i;
108
109 CPU_FOREACH(i) {
110 if (CPU_ABSENT(i))
111 continue;
112
113 tw = DPCPU_ID_PTR(i, tasklet_worker);
114
115 mtx_init(&tw->mtx, "linux_tasklet", NULL, MTX_DEF);
116 TAILQ_INIT(&tw->head);
117 GROUPTASK_INIT(&tw->gtask, 0, tasklet_handler, tw);
118 snprintf(buf, sizeof(buf), "softirq%d", i);
119 taskqgroup_attach_cpu(qgroup_softirq, &tw->gtask,
120 "tasklet", i, NULL, NULL, buf);
121 }
122 }
123 SYSINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_init, NULL);
124
125 static void
tasklet_subsystem_uninit(void * arg __unused)126 tasklet_subsystem_uninit(void *arg __unused)
127 {
128 struct tasklet_worker *tw;
129 int i;
130
131 taskqgroup_drain_all(qgroup_softirq);
132
133 CPU_FOREACH(i) {
134 if (CPU_ABSENT(i))
135 continue;
136
137 tw = DPCPU_ID_PTR(i, tasklet_worker);
138
139 taskqgroup_detach(qgroup_softirq, &tw->gtask);
140 mtx_destroy(&tw->mtx);
141 }
142 }
143 SYSUNINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_uninit, NULL);
144
145 void
tasklet_init(struct tasklet_struct * ts,tasklet_func_t * func,unsigned long data)146 tasklet_init(struct tasklet_struct *ts,
147 tasklet_func_t *func, unsigned long data)
148 {
149 ts->entry.tqe_prev = NULL;
150 ts->entry.tqe_next = NULL;
151 ts->func = func;
152 ts->callback = NULL;
153 ts->data = data;
154 atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE);
155 atomic_set(&ts->count, 0);
156 ts->use_callback = false;
157 }
158
159 void
tasklet_setup(struct tasklet_struct * ts,tasklet_callback_t * c)160 tasklet_setup(struct tasklet_struct *ts, tasklet_callback_t *c)
161 {
162 ts->entry.tqe_prev = NULL;
163 ts->entry.tqe_next = NULL;
164 ts->func = NULL;
165 ts->callback = c;
166 ts->data = 0;
167 atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE);
168 atomic_set(&ts->count, 0);
169 ts->use_callback = true;
170 }
171
172 void
local_bh_enable(void)173 local_bh_enable(void)
174 {
175 sched_unpin();
176 }
177
178 void
local_bh_disable(void)179 local_bh_disable(void)
180 {
181 sched_pin();
182 }
183
184 void
tasklet_schedule(struct tasklet_struct * ts)185 tasklet_schedule(struct tasklet_struct *ts)
186 {
187
188 /* tasklet is paused */
189 if (atomic_read(&ts->count))
190 return;
191
192 if (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_LOOP)) {
193 /* tasklet_handler() will loop */
194 } else if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY)) {
195 struct tasklet_worker *tw;
196
197 tw = &DPCPU_GET(tasklet_worker);
198
199 /* tasklet_handler() was not queued */
200 TASKLET_WORKER_LOCK(tw);
201 /* enqueue tasklet */
202 TAILQ_INSERT_TAIL(&tw->head, ts, entry);
203 /* schedule worker */
204 GROUPTASK_ENQUEUE(&tw->gtask);
205 TASKLET_WORKER_UNLOCK(tw);
206 } else {
207 /*
208 * tasklet_handler() is already executing
209 *
210 * If the state is neither EXEC nor IDLE, it is either
211 * LOOP or BUSY. If the state changed between the two
212 * CMPSET's above the only possible transitions by
213 * elimination are LOOP->EXEC and BUSY->EXEC. If a
214 * EXEC->LOOP transition was missed that is not a
215 * problem because the callback function is then
216 * already about to be called again.
217 */
218 }
219 }
220
221 void
tasklet_kill(struct tasklet_struct * ts)222 tasklet_kill(struct tasklet_struct *ts)
223 {
224
225 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep");
226
227 /* wait until tasklet is no longer busy */
228 while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE)
229 pause("W", 1);
230 }
231
232 void
tasklet_enable(struct tasklet_struct * ts)233 tasklet_enable(struct tasklet_struct *ts)
234 {
235
236 atomic_dec(&ts->count);
237 }
238
239 void
tasklet_disable(struct tasklet_struct * ts)240 tasklet_disable(struct tasklet_struct *ts)
241 {
242
243 atomic_inc(&ts->count);
244 tasklet_unlock_wait(ts);
245 }
246
247 void
tasklet_disable_nosync(struct tasklet_struct * ts)248 tasklet_disable_nosync(struct tasklet_struct *ts)
249 {
250 atomic_inc(&ts->count);
251 barrier();
252 }
253
254 int
tasklet_trylock(struct tasklet_struct * ts)255 tasklet_trylock(struct tasklet_struct *ts)
256 {
257
258 return (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY));
259 }
260
261 void
tasklet_unlock(struct tasklet_struct * ts)262 tasklet_unlock(struct tasklet_struct *ts)
263 {
264
265 TASKLET_ST_SET(ts, TASKLET_ST_IDLE);
266 }
267
268 void
tasklet_unlock_wait(struct tasklet_struct * ts)269 tasklet_unlock_wait(struct tasklet_struct *ts)
270 {
271
272 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep");
273
274 /* wait until tasklet is no longer busy */
275 while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE)
276 pause("W", 1);
277 }
278