xref: /freebsd/sys/compat/linuxkpi/common/src/linux_tasklet.c (revision d93a896ef95946b0bf1219866fcb324b78543444)
1 /*-
2  * Copyright (c) 2017 Hans Petter Selasky
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/types.h>
31 #include <sys/malloc.h>
32 #include <sys/gtaskqueue.h>
33 #include <sys/proc.h>
34 #include <sys/sched.h>
35 
36 #include <linux/interrupt.h>
37 #include <linux/compat.h>
38 
39 #define	TASKLET_ST_IDLE 0
40 #define	TASKLET_ST_BUSY 1
41 #define	TASKLET_ST_EXEC 2
42 #define	TASKLET_ST_LOOP 3
43 
44 #define	TASKLET_ST_CMPSET(ts, old, new)	\
45 	atomic_cmpset_ptr((volatile uintptr_t *)&(ts)->entry.tqe_prev, old, new)
46 
47 #define	TASKLET_ST_SET(ts, new)	\
48 	atomic_store_rel_ptr((volatile uintptr_t *)&(ts)->entry.tqe_prev, new)
49 
50 #define	TASKLET_ST_GET(ts) \
51 	atomic_load_acq_ptr((volatile uintptr_t *)&(ts)->entry.tqe_prev)
52 
53 struct tasklet_worker {
54 	struct mtx mtx;
55 	TAILQ_HEAD(, tasklet_struct) head;
56 	struct grouptask gtask;
57 } __aligned(CACHE_LINE_SIZE);
58 
59 #define	TASKLET_WORKER_LOCK(tw) mtx_lock(&(tw)->mtx)
60 #define	TASKLET_WORKER_UNLOCK(tw) mtx_unlock(&(tw)->mtx)
61 
62 static DPCPU_DEFINE(struct tasklet_worker, tasklet_worker);
63 
64 static void
65 tasklet_handler(void *arg)
66 {
67 	struct tasklet_worker *tw = (struct tasklet_worker *)arg;
68 	struct tasklet_struct *ts;
69 
70 	linux_set_current(curthread);
71 
72 	TASKLET_WORKER_LOCK(tw);
73 	while (1) {
74 		ts = TAILQ_FIRST(&tw->head);
75 		if (ts == NULL)
76 			break;
77 		TAILQ_REMOVE(&tw->head, ts, entry);
78 
79 		TASKLET_WORKER_UNLOCK(tw);
80 		do {
81 			/* reset executing state */
82 			TASKLET_ST_SET(ts, TASKLET_ST_EXEC);
83 
84 			ts->func(ts->data);
85 
86 		} while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_IDLE) == 0);
87 		TASKLET_WORKER_LOCK(tw);
88 	}
89 	TASKLET_WORKER_UNLOCK(tw);
90 }
91 
92 static void
93 tasklet_subsystem_init(void *arg __unused)
94 {
95 	struct tasklet_worker *tw;
96 	char buf[32];
97 	int i;
98 
99 	CPU_FOREACH(i) {
100 		if (CPU_ABSENT(i))
101 			continue;
102 
103 		tw = DPCPU_ID_PTR(i, tasklet_worker);
104 
105 		mtx_init(&tw->mtx, "linux_tasklet", NULL, MTX_DEF);
106 		TAILQ_INIT(&tw->head);
107 		GROUPTASK_INIT(&tw->gtask, 0, tasklet_handler, tw);
108 		snprintf(buf, sizeof(buf), "softirq%d", i);
109 		taskqgroup_attach_cpu(qgroup_softirq, &tw->gtask,
110 		    "tasklet", i, -1, buf);
111        }
112 }
113 SYSINIT(linux_tasklet, SI_SUB_INIT_IF, SI_ORDER_THIRD, tasklet_subsystem_init, NULL);
114 
115 static void
116 tasklet_subsystem_uninit(void *arg __unused)
117 {
118 	struct tasklet_worker *tw;
119 	int i;
120 
121 	CPU_FOREACH(i) {
122 		if (CPU_ABSENT(i))
123 			continue;
124 
125 		tw = DPCPU_ID_PTR(i, tasklet_worker);
126 
127 		taskqgroup_detach(qgroup_softirq, &tw->gtask);
128 		mtx_destroy(&tw->mtx);
129 	}
130 }
131 SYSUNINIT(linux_tasklet, SI_SUB_INIT_IF, SI_ORDER_THIRD, tasklet_subsystem_uninit, NULL);
132 
133 void
134 tasklet_init(struct tasklet_struct *ts,
135     tasklet_func_t *func, unsigned long data)
136 {
137 	ts->entry.tqe_prev = NULL;
138 	ts->entry.tqe_next = NULL;
139 	ts->func = func;
140 	ts->data = data;
141 }
142 
143 void
144 local_bh_enable(void)
145 {
146 	sched_unpin();
147 }
148 
149 void
150 local_bh_disable(void)
151 {
152 	sched_pin();
153 }
154 
155 void
156 tasklet_schedule(struct tasklet_struct *ts)
157 {
158 
159 	if (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_LOOP)) {
160 		/* tasklet_handler() will loop */
161 	} else if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY)) {
162 		struct tasklet_worker *tw;
163 
164 		tw = &DPCPU_GET(tasklet_worker);
165 
166 		/* tasklet_handler() was not queued */
167 		TASKLET_WORKER_LOCK(tw);
168 		/* enqueue tasklet */
169 		TAILQ_INSERT_TAIL(&tw->head, ts, entry);
170 		/* schedule worker */
171 		GROUPTASK_ENQUEUE(&tw->gtask);
172 		TASKLET_WORKER_UNLOCK(tw);
173 	} else {
174 		/*
175 		 * tasklet_handler() is already executing
176 		 *
177 		 * If the state is neither EXEC nor IDLE, it is either
178 		 * LOOP or BUSY. If the state changed between the two
179 		 * CMPSET's above the only possible transitions by
180 		 * elimination are LOOP->EXEC and BUSY->EXEC. If a
181 		 * EXEC->LOOP transition was missed that is not a
182 		 * problem because the callback function is then
183 		 * already about to be called again.
184 		 */
185 	}
186 }
187 
188 void
189 tasklet_kill(struct tasklet_struct *ts)
190 {
191 
192 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep");
193 
194 	/* wait until tasklet is no longer busy */
195 	while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE)
196 		pause("W", 1);
197 }
198