1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 #ifndef _LINUXKPI_LINUX_WORKQUEUE_H_ 30 #define _LINUXKPI_LINUX_WORKQUEUE_H_ 31 32 #include <linux/types.h> 33 #include <linux/kernel.h> 34 #include <linux/timer.h> 35 #include <linux/slab.h> 36 37 #include <asm/atomic.h> 38 39 #include <sys/param.h> 40 #include <sys/kernel.h> 41 #include <sys/taskqueue.h> 42 #include <sys/mutex.h> 43 44 #define WORK_CPU_UNBOUND MAXCPU 45 #define WQ_UNBOUND (1 << 0) 46 #define WQ_HIGHPRI (1 << 1) 47 48 struct work_struct; 49 typedef void (*work_func_t)(struct work_struct *); 50 51 struct work_exec { 52 TAILQ_ENTRY(work_exec) entry; 53 struct work_struct *target; 54 }; 55 56 struct workqueue_struct { 57 struct taskqueue *taskqueue; 58 struct mtx exec_mtx; 59 TAILQ_HEAD(, work_exec) exec_head; 60 atomic_t draining; 61 }; 62 63 #define WQ_EXEC_LOCK(wq) mtx_lock(&(wq)->exec_mtx) 64 #define WQ_EXEC_UNLOCK(wq) mtx_unlock(&(wq)->exec_mtx) 65 66 struct work_struct { 67 struct task work_task; 68 struct workqueue_struct *work_queue; 69 work_func_t func; 70 atomic_t state; 71 }; 72 73 struct rcu_work { 74 struct work_struct work; 75 struct rcu_head rcu; 76 77 struct workqueue_struct *wq; 78 }; 79 80 #define DECLARE_WORK(name, fn) \ 81 struct work_struct name; \ 82 static void name##_init(void *arg) \ 83 { \ 84 INIT_WORK(&name, fn); \ 85 } \ 86 SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL) 87 88 struct delayed_work { 89 struct work_struct work; 90 struct { 91 struct callout callout; 92 struct mtx mtx; 93 int expires; 94 } timer; 95 }; 96 97 #define DECLARE_DELAYED_WORK(name, fn) \ 98 struct delayed_work name; \ 99 static void __linux_delayed_ ## name ## _init(void *arg) \ 100 { \ 101 linux_init_delayed_work(&name, fn); \ 102 } \ 103 SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, \ 104 __linux_delayed_ ## name##_init, NULL) 105 106 static inline struct delayed_work * 107 to_delayed_work(struct work_struct *work) 108 { 109 return (container_of(work, struct delayed_work, work)); 110 } 111 112 #define INIT_WORK(work, fn) \ 113 do { \ 114 (work)->func = (fn); \ 115 (work)->work_queue = NULL; \ 116 atomic_set(&(work)->state, 0); \ 117 TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work)); \ 118 } while (0) 119 120 #define INIT_RCU_WORK(_work, _fn) \ 121 INIT_WORK(&(_work)->work, (_fn)) 122 123 #define INIT_WORK_ONSTACK(work, fn) \ 124 INIT_WORK(work, fn) 125 126 #define INIT_DELAYED_WORK(dwork, fn) \ 127 linux_init_delayed_work(dwork, fn) 128 129 #define INIT_DELAYED_WORK_ONSTACK(dwork, fn) \ 130 linux_init_delayed_work(dwork, fn) 131 132 #define INIT_DEFERRABLE_WORK(dwork, fn) \ 133 INIT_DELAYED_WORK(dwork, fn) 134 135 #define flush_scheduled_work() \ 136 taskqueue_drain_all(system_wq->taskqueue) 137 138 #define queue_work(wq, work) \ 139 linux_queue_work_on(WORK_CPU_UNBOUND, wq, work) 140 141 #define schedule_work(work) \ 142 linux_queue_work_on(WORK_CPU_UNBOUND, system_wq, work) 143 144 #define queue_delayed_work(wq, dwork, delay) \ 145 linux_queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay) 146 147 #define schedule_delayed_work_on(cpu, dwork, delay) \ 148 linux_queue_delayed_work_on(cpu, system_wq, dwork, delay) 149 150 #define queue_work_on(cpu, wq, work) \ 151 linux_queue_work_on(cpu, wq, work) 152 153 #define schedule_delayed_work(dwork, delay) \ 154 linux_queue_delayed_work_on(WORK_CPU_UNBOUND, system_wq, dwork, delay) 155 156 #define queue_delayed_work_on(cpu, wq, dwork, delay) \ 157 linux_queue_delayed_work_on(cpu, wq, dwork, delay) 158 159 #define create_singlethread_workqueue(name) \ 160 linux_create_workqueue_common(name, 1) 161 162 #define create_workqueue(name) \ 163 linux_create_workqueue_common(name, mp_ncpus) 164 165 #define alloc_ordered_workqueue(name, flags) \ 166 linux_create_workqueue_common(name, 1) 167 168 #define alloc_workqueue(name, flags, max_active) \ 169 linux_create_workqueue_common(name, max_active) 170 171 #define flush_workqueue(wq) \ 172 taskqueue_drain_all((wq)->taskqueue) 173 174 #define drain_workqueue(wq) do { \ 175 atomic_inc(&(wq)->draining); \ 176 taskqueue_drain_all((wq)->taskqueue); \ 177 atomic_dec(&(wq)->draining); \ 178 } while (0) 179 180 #define mod_delayed_work(wq, dwork, delay) ({ \ 181 bool __retval; \ 182 __retval = linux_cancel_delayed_work(dwork); \ 183 linux_queue_delayed_work_on(WORK_CPU_UNBOUND, \ 184 wq, dwork, delay); \ 185 __retval; \ 186 }) 187 188 #define delayed_work_pending(dwork) \ 189 linux_work_pending(&(dwork)->work) 190 191 #define cancel_work(work) \ 192 linux_cancel_work(work) 193 194 #define cancel_delayed_work(dwork) \ 195 linux_cancel_delayed_work(dwork) 196 197 #define cancel_work_sync(work) \ 198 linux_cancel_work_sync(work) 199 200 #define cancel_delayed_work_sync(dwork) \ 201 linux_cancel_delayed_work_sync(dwork) 202 203 #define flush_work(work) \ 204 linux_flush_work(work) 205 206 #define queue_rcu_work(wq, rwork) \ 207 linux_queue_rcu_work(wq, rwork) 208 209 #define flush_rcu_work(rwork) \ 210 linux_flush_rcu_work(rwork) 211 212 #define flush_delayed_work(dwork) \ 213 linux_flush_delayed_work(dwork) 214 215 #define work_pending(work) \ 216 linux_work_pending(work) 217 218 #define work_busy(work) \ 219 linux_work_busy(work) 220 221 #define destroy_work_on_stack(work) \ 222 do { } while (0) 223 224 #define destroy_delayed_work_on_stack(dwork) \ 225 do { } while (0) 226 227 #define destroy_workqueue(wq) \ 228 linux_destroy_workqueue(wq) 229 230 #define current_work() \ 231 linux_current_work() 232 233 /* prototypes */ 234 235 extern struct workqueue_struct *system_wq; 236 extern struct workqueue_struct *system_long_wq; 237 extern struct workqueue_struct *system_unbound_wq; 238 extern struct workqueue_struct *system_highpri_wq; 239 extern struct workqueue_struct *system_power_efficient_wq; 240 241 extern void linux_init_delayed_work(struct delayed_work *, work_func_t); 242 extern void linux_work_fn(void *, int); 243 extern void linux_delayed_work_fn(void *, int); 244 extern struct workqueue_struct *linux_create_workqueue_common(const char *, int); 245 extern void linux_destroy_workqueue(struct workqueue_struct *); 246 extern bool linux_queue_work_on(int cpu, struct workqueue_struct *, struct work_struct *); 247 extern bool linux_queue_delayed_work_on(int cpu, struct workqueue_struct *, 248 struct delayed_work *, unsigned delay); 249 extern bool linux_cancel_work(struct work_struct *); 250 extern bool linux_cancel_delayed_work(struct delayed_work *); 251 extern bool linux_cancel_work_sync(struct work_struct *); 252 extern bool linux_cancel_delayed_work_sync(struct delayed_work *); 253 extern bool linux_flush_work(struct work_struct *); 254 extern bool linux_flush_delayed_work(struct delayed_work *); 255 extern bool linux_work_pending(struct work_struct *); 256 extern bool linux_work_busy(struct work_struct *); 257 extern struct work_struct *linux_current_work(void); 258 extern bool linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); 259 extern bool linux_flush_rcu_work(struct rcu_work *rwork); 260 261 #endif /* _LINUXKPI_LINUX_WORKQUEUE_H_ */ 262