1 /*- 2 * Copyright (c) 2017 Hans Petter Selasky 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/types.h> 31 #include <sys/malloc.h> 32 #include <sys/taskqueue.h> 33 #include <sys/proc.h> 34 #include <sys/sched.h> 35 36 #include <linux/interrupt.h> 37 #include <linux/bottom_half.h> 38 #include <linux/compat.h> 39 40 #define TASKLET_ST_IDLE 0 41 #define TASKLET_ST_BUSY 1 42 #define TASKLET_ST_EXEC 2 43 #define TASKLET_ST_LOOP 3 44 45 #define TASKLET_ST_CMPSET(ts, old, new) \ 46 atomic_cmpset_ptr((volatile uintptr_t *)&(ts)->entry.tqe_prev, old, new) 47 48 #define TASKLET_ST_SET(ts, new) \ 49 atomic_store_rel_ptr((volatile uintptr_t *)&(ts)->entry.tqe_prev, new) 50 51 #define TASKLET_ST_GET(ts) \ 52 atomic_load_acq_ptr((volatile uintptr_t *)&(ts)->entry.tqe_prev) 53 54 struct tasklet_worker { 55 struct mtx mtx; 56 TAILQ_HEAD(, tasklet_struct) head; 57 struct taskqueue *taskqueue; 58 struct task task; 59 } __aligned(CACHE_LINE_SIZE); 60 61 #define TASKLET_WORKER_LOCK(tw) mtx_lock(&(tw)->mtx) 62 #define TASKLET_WORKER_UNLOCK(tw) mtx_unlock(&(tw)->mtx) 63 64 static struct tasklet_worker tasklet_worker; 65 66 static void 67 tasklet_handler(void *arg, int pending) 68 { 69 struct tasklet_worker *tw = (struct tasklet_worker *)arg; 70 struct tasklet_struct *ts; 71 72 linux_set_current(curthread); 73 74 TASKLET_WORKER_LOCK(tw); 75 local_bh_disable(); /* pin thread to CPU */ 76 while (1) { 77 ts = TAILQ_FIRST(&tw->head); 78 if (ts == NULL) 79 break; 80 TAILQ_REMOVE(&tw->head, ts, entry); 81 82 TASKLET_WORKER_UNLOCK(tw); 83 do { 84 /* reset executing state */ 85 TASKLET_ST_SET(ts, TASKLET_ST_EXEC); 86 87 ts->func(ts->data); 88 89 } while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_IDLE) == 0); 90 TASKLET_WORKER_LOCK(tw); 91 } 92 local_bh_enable(); /* unpin thread from CPU */ 93 TASKLET_WORKER_UNLOCK(tw); 94 } 95 96 static void 97 tasklet_subsystem_init(void *arg __unused) 98 { 99 struct tasklet_worker *tw = &tasklet_worker; 100 101 tw->taskqueue = taskqueue_create("tasklet", M_WAITOK, 102 taskqueue_thread_enqueue, &tw->taskqueue); 103 mtx_init(&tw->mtx, "linux_tasklet", NULL, MTX_DEF); 104 TAILQ_INIT(&tw->head); 105 TASK_INIT(&tw->task, 0, tasklet_handler, tw); 106 taskqueue_start_threads(&tw->taskqueue, 1, PI_NET, "tasklet"); 107 } 108 SYSINIT(linux_tasklet, SI_SUB_INIT_IF, SI_ORDER_THIRD, tasklet_subsystem_init, NULL); 109 110 static void 111 tasklet_subsystem_uninit(void *arg __unused) 112 { 113 struct tasklet_worker *tw = &tasklet_worker; 114 115 taskqueue_free(tw->taskqueue); 116 tw->taskqueue = NULL; 117 mtx_destroy(&tw->mtx); 118 } 119 SYSUNINIT(linux_tasklet, SI_SUB_INIT_IF, SI_ORDER_THIRD, tasklet_subsystem_uninit, NULL); 120 121 void 122 tasklet_init(struct tasklet_struct *ts, 123 tasklet_func_t *func, unsigned long data) 124 { 125 ts->entry.tqe_prev = NULL; 126 ts->entry.tqe_next = NULL; 127 ts->func = func; 128 ts->data = data; 129 } 130 131 void 132 local_bh_enable(void) 133 { 134 sched_unpin(); 135 } 136 137 void 138 local_bh_disable(void) 139 { 140 sched_pin(); 141 } 142 143 void 144 tasklet_schedule(struct tasklet_struct *ts) 145 { 146 147 if (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_LOOP)) { 148 /* tasklet_handler() will loop */ 149 } else if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY)) { 150 struct tasklet_worker *tw = &tasklet_worker; 151 152 /* tasklet_handler() was not queued */ 153 TASKLET_WORKER_LOCK(tw); 154 /* enqueue tasklet */ 155 TAILQ_INSERT_TAIL(&tw->head, ts, entry); 156 /* schedule worker */ 157 taskqueue_enqueue(tw->taskqueue, &tw->task); 158 TASKLET_WORKER_UNLOCK(tw); 159 } else { 160 /* 161 * tasklet_handler() is already executing 162 * 163 * If the state is neither EXEC nor IDLE, it is either 164 * LOOP or BUSY. If the state changed between the two 165 * CMPSET's above the only possible transitions by 166 * elimination are LOOP->EXEC and BUSY->EXEC. If a 167 * EXEC->LOOP transition was missed that is not a 168 * problem because the callback function is then 169 * already about to be called again. 170 */ 171 } 172 } 173 174 void 175 tasklet_kill(struct tasklet_struct *ts) 176 { 177 178 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep"); 179 180 /* wait until tasklet is no longer busy */ 181 while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE) 182 pause("W", 1); 183 } 184