1 /*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/bus.h> 32 #include <sys/interrupt.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mutex.h> 37 #include <sys/taskqueue.h> 38 39 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 40 41 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues; 42 43 static void *taskqueue_ih; 44 static struct mtx taskqueue_queues_mutex; 45 46 struct taskqueue { 47 STAILQ_ENTRY(taskqueue) tq_link; 48 STAILQ_HEAD(, task) tq_queue; 49 const char *tq_name; 50 taskqueue_enqueue_fn tq_enqueue; 51 void *tq_context; 52 int tq_draining; 53 struct mtx tq_mutex; 54 }; 55 56 static void init_taskqueue_list(void *data); 57 58 static void 59 init_taskqueue_list(void *data __unused) 60 { 61 62 mtx_init(&taskqueue_queues_mutex, "taskqueue list", MTX_DEF); 63 STAILQ_INIT(&taskqueue_queues); 64 } 65 SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list, 66 NULL); 67 68 struct taskqueue * 69 taskqueue_create(const char *name, int mflags, 70 taskqueue_enqueue_fn enqueue, void *context) 71 { 72 struct taskqueue *queue; 73 74 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 75 if (!queue) 76 return 0; 77 78 STAILQ_INIT(&queue->tq_queue); 79 queue->tq_name = name; 80 queue->tq_enqueue = enqueue; 81 queue->tq_context = context; 82 queue->tq_draining = 0; 83 mtx_init(&queue->tq_mutex, "taskqueue", MTX_DEF); 84 85 mtx_lock(&taskqueue_queues_mutex); 86 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link); 87 mtx_unlock(&taskqueue_queues_mutex); 88 89 return queue; 90 } 91 92 void 93 taskqueue_free(struct taskqueue *queue) 94 { 95 96 mtx_lock(&queue->tq_mutex); 97 queue->tq_draining = 1; 98 mtx_unlock(&queue->tq_mutex); 99 100 taskqueue_run(queue); 101 102 mtx_lock(&taskqueue_queues_mutex); 103 STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link); 104 mtx_unlock(&taskqueue_queues_mutex); 105 106 mtx_destroy(&queue->tq_mutex); 107 free(queue, M_TASKQUEUE); 108 } 109 110 /* 111 * Returns with the taskqueue locked. 112 */ 113 struct taskqueue * 114 taskqueue_find(const char *name) 115 { 116 struct taskqueue *queue; 117 118 mtx_lock(&taskqueue_queues_mutex); 119 STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) { 120 mtx_lock(&queue->tq_mutex); 121 if (!strcmp(queue->tq_name, name)) { 122 mtx_unlock(&taskqueue_queues_mutex); 123 return queue; 124 } 125 mtx_unlock(&queue->tq_mutex); 126 } 127 mtx_unlock(&taskqueue_queues_mutex); 128 return 0; 129 } 130 131 int 132 taskqueue_enqueue(struct taskqueue *queue, struct task *task) 133 { 134 struct task *ins; 135 struct task *prev; 136 137 mtx_lock(&queue->tq_mutex); 138 139 /* 140 * Don't allow new tasks on a queue which is being freed. 141 */ 142 if (queue->tq_draining) { 143 mtx_unlock(&queue->tq_mutex); 144 return EPIPE; 145 } 146 147 /* 148 * Count multiple enqueues. 149 */ 150 if (task->ta_pending) { 151 task->ta_pending++; 152 mtx_unlock(&queue->tq_mutex); 153 return 0; 154 } 155 156 /* 157 * Optimise the case when all tasks have the same priority. 158 */ 159 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 160 if (!prev || prev->ta_priority >= task->ta_priority) { 161 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 162 } else { 163 prev = 0; 164 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 165 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 166 if (ins->ta_priority < task->ta_priority) 167 break; 168 169 if (prev) 170 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 171 else 172 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 173 } 174 175 task->ta_pending = 1; 176 if (queue->tq_enqueue) 177 queue->tq_enqueue(queue->tq_context); 178 179 mtx_unlock(&queue->tq_mutex); 180 181 return 0; 182 } 183 184 void 185 taskqueue_run(struct taskqueue *queue) 186 { 187 struct task *task; 188 int pending; 189 190 mtx_lock(&queue->tq_mutex); 191 while (STAILQ_FIRST(&queue->tq_queue)) { 192 /* 193 * Carefully remove the first task from the queue and 194 * zero its pending count. 195 */ 196 task = STAILQ_FIRST(&queue->tq_queue); 197 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 198 pending = task->ta_pending; 199 task->ta_pending = 0; 200 mtx_unlock(&queue->tq_mutex); 201 202 task->ta_func(task->ta_context, pending); 203 204 mtx_lock(&queue->tq_mutex); 205 } 206 mtx_unlock(&queue->tq_mutex); 207 } 208 209 static void 210 taskqueue_swi_enqueue(void *context) 211 { 212 swi_sched(taskqueue_ih, 0); 213 } 214 215 static void 216 taskqueue_swi_run(void *dummy) 217 { 218 taskqueue_run(taskqueue_swi); 219 } 220 221 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0, 222 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 0, 223 &taskqueue_ih)); 224