1 /*- 2 * Copyright (c) 2017 Hans Petter Selasky 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/types.h> 31 #include <sys/malloc.h> 32 #include <sys/gtaskqueue.h> 33 #include <sys/proc.h> 34 #include <sys/sched.h> 35 36 #include <linux/compiler.h> 37 #include <linux/interrupt.h> 38 #include <linux/compat.h> 39 40 #define TASKLET_ST_IDLE 0 41 #define TASKLET_ST_BUSY 1 42 #define TASKLET_ST_EXEC 2 43 #define TASKLET_ST_LOOP 3 44 #define TASKLET_ST_PAUSED 4 45 46 #define TASKLET_ST_CMPSET(ts, old, new) \ 47 atomic_cmpset_ptr((volatile uintptr_t *)&(ts)->entry.tqe_prev, old, new) 48 49 #define TASKLET_ST_SET(ts, new) \ 50 WRITE_ONCE(*(volatile uintptr_t *)&(ts)->entry.tqe_prev, new) 51 52 #define TASKLET_ST_GET(ts) \ 53 READ_ONCE(*(volatile uintptr_t *)&(ts)->entry.tqe_prev) 54 55 struct tasklet_worker { 56 struct mtx mtx; 57 TAILQ_HEAD(, tasklet_struct) head; 58 struct grouptask gtask; 59 } __aligned(CACHE_LINE_SIZE); 60 61 #define TASKLET_WORKER_LOCK(tw) mtx_lock(&(tw)->mtx) 62 #define TASKLET_WORKER_UNLOCK(tw) mtx_unlock(&(tw)->mtx) 63 64 DPCPU_DEFINE_STATIC(struct tasklet_worker, tasklet_worker); 65 66 static void 67 tasklet_handler(void *arg) 68 { 69 struct tasklet_worker *tw = (struct tasklet_worker *)arg; 70 struct tasklet_struct *ts; 71 72 linux_set_current(curthread); 73 74 TASKLET_WORKER_LOCK(tw); 75 while (1) { 76 ts = TAILQ_FIRST(&tw->head); 77 if (ts == NULL) 78 break; 79 TAILQ_REMOVE(&tw->head, ts, entry); 80 81 TASKLET_WORKER_UNLOCK(tw); 82 do { 83 /* reset executing state */ 84 TASKLET_ST_SET(ts, TASKLET_ST_EXEC); 85 86 ts->func(ts->data); 87 88 } while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_IDLE) == 0); 89 TASKLET_WORKER_LOCK(tw); 90 } 91 TASKLET_WORKER_UNLOCK(tw); 92 } 93 94 static void 95 tasklet_subsystem_init(void *arg __unused) 96 { 97 struct tasklet_worker *tw; 98 char buf[32]; 99 int i; 100 101 CPU_FOREACH(i) { 102 if (CPU_ABSENT(i)) 103 continue; 104 105 tw = DPCPU_ID_PTR(i, tasklet_worker); 106 107 mtx_init(&tw->mtx, "linux_tasklet", NULL, MTX_DEF); 108 TAILQ_INIT(&tw->head); 109 GROUPTASK_INIT(&tw->gtask, 0, tasklet_handler, tw); 110 snprintf(buf, sizeof(buf), "softirq%d", i); 111 taskqgroup_attach_cpu(qgroup_softirq, &tw->gtask, 112 "tasklet", i, -1, buf); 113 } 114 } 115 SYSINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_init, NULL); 116 117 static void 118 tasklet_subsystem_uninit(void *arg __unused) 119 { 120 struct tasklet_worker *tw; 121 int i; 122 123 CPU_FOREACH(i) { 124 if (CPU_ABSENT(i)) 125 continue; 126 127 tw = DPCPU_ID_PTR(i, tasklet_worker); 128 129 taskqgroup_detach(qgroup_softirq, &tw->gtask); 130 mtx_destroy(&tw->mtx); 131 } 132 } 133 SYSUNINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_uninit, NULL); 134 135 void 136 tasklet_init(struct tasklet_struct *ts, 137 tasklet_func_t *func, unsigned long data) 138 { 139 ts->entry.tqe_prev = NULL; 140 ts->entry.tqe_next = NULL; 141 ts->func = func; 142 ts->data = data; 143 } 144 145 void 146 local_bh_enable(void) 147 { 148 sched_unpin(); 149 } 150 151 void 152 local_bh_disable(void) 153 { 154 sched_pin(); 155 } 156 157 void 158 tasklet_schedule(struct tasklet_struct *ts) 159 { 160 161 if (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_LOOP)) { 162 /* tasklet_handler() will loop */ 163 } else if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY)) { 164 struct tasklet_worker *tw; 165 166 tw = &DPCPU_GET(tasklet_worker); 167 168 /* tasklet_handler() was not queued */ 169 TASKLET_WORKER_LOCK(tw); 170 /* enqueue tasklet */ 171 TAILQ_INSERT_TAIL(&tw->head, ts, entry); 172 /* schedule worker */ 173 GROUPTASK_ENQUEUE(&tw->gtask); 174 TASKLET_WORKER_UNLOCK(tw); 175 } else { 176 /* 177 * tasklet_handler() is already executing 178 * 179 * If the state is neither EXEC nor IDLE, it is either 180 * LOOP or BUSY. If the state changed between the two 181 * CMPSET's above the only possible transitions by 182 * elimination are LOOP->EXEC and BUSY->EXEC. If a 183 * EXEC->LOOP transition was missed that is not a 184 * problem because the callback function is then 185 * already about to be called again. 186 */ 187 } 188 } 189 190 void 191 tasklet_kill(struct tasklet_struct *ts) 192 { 193 194 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep"); 195 196 /* wait until tasklet is no longer busy */ 197 while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE) 198 pause("W", 1); 199 } 200 201 void 202 tasklet_enable(struct tasklet_struct *ts) 203 { 204 (void) TASKLET_ST_CMPSET(ts, TASKLET_ST_PAUSED, TASKLET_ST_IDLE); 205 } 206 207 void 208 tasklet_disable(struct tasklet_struct *ts) 209 { 210 while (1) { 211 if (TASKLET_ST_GET(ts) == TASKLET_ST_PAUSED) 212 break; 213 if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_PAUSED)) 214 break; 215 pause("W", 1); 216 } 217 } 218