1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * workqueue.h --- work queue handling for Linux. 4 */ 5 6 #ifndef _LINUX_WORKQUEUE_H 7 #define _LINUX_WORKQUEUE_H 8 9 #include <linux/alloc_tag.h> 10 #include <linux/timer.h> 11 #include <linux/linkage.h> 12 #include <linux/bitops.h> 13 #include <linux/lockdep.h> 14 #include <linux/threads.h> 15 #include <linux/atomic.h> 16 #include <linux/cpumask_types.h> 17 #include <linux/rcupdate.h> 18 #include <linux/workqueue_types.h> 19 20 /* 21 * The first word is the work queue pointer and the flags rolled into 22 * one 23 */ 24 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) 25 26 enum work_bits { 27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 28 WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */ 29 WORK_STRUCT_PWQ_BIT, /* data points to pwq */ 30 WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */ 31 #ifdef CONFIG_DEBUG_OBJECTS_WORK 32 WORK_STRUCT_STATIC_BIT, /* static initializer (debugobjects) */ 33 #endif 34 WORK_STRUCT_FLAG_BITS, 35 36 /* color for workqueue flushing */ 37 WORK_STRUCT_COLOR_SHIFT = WORK_STRUCT_FLAG_BITS, 38 WORK_STRUCT_COLOR_BITS = 4, 39 40 /* 41 * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/ 42 * debugobjects turned off. This makes pwqs aligned to 256 bytes (512 43 * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors. 44 * 45 * MSB 46 * [ pwq pointer ] [ flush color ] [ STRUCT flags ] 47 * 4 bits 4 or 5 bits 48 */ 49 WORK_STRUCT_PWQ_SHIFT = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS, 50 51 /* 52 * data contains off-queue information when !WORK_STRUCT_PWQ. 53 * 54 * MSB 55 * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ] 56 * 16 bits 1 bit 4 or 5 bits 57 */ 58 WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS, 59 WORK_OFFQ_BH_BIT = WORK_OFFQ_FLAG_SHIFT, 60 WORK_OFFQ_FLAG_END, 61 WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, 62 63 WORK_OFFQ_DISABLE_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, 64 WORK_OFFQ_DISABLE_BITS = 16, 65 66 /* 67 * When a work item is off queue, the high bits encode off-queue flags 68 * and the last pool it was on. Cap pool ID to 31 bits and use the 69 * highest number to indicate that no pool is associated. 70 */ 71 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS, 72 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, 73 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, 74 }; 75 76 enum work_flags { 77 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 78 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, 79 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, 80 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 81 #ifdef CONFIG_DEBUG_OBJECTS_WORK 82 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, 83 #else 84 WORK_STRUCT_STATIC = 0, 85 #endif 86 }; 87 88 enum wq_misc_consts { 89 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), 90 91 /* not bound to any CPU, prefer the local CPU */ 92 WORK_CPU_UNBOUND = NR_CPUS, 93 94 /* bit mask for work_busy() return values */ 95 WORK_BUSY_PENDING = 1 << 0, 96 WORK_BUSY_RUNNING = 1 << 1, 97 98 /* maximum string length for set_worker_desc() */ 99 WORKER_DESC_LEN = 32, 100 }; 101 102 /* Convenience constants - of type 'unsigned long', not 'enum'! */ 103 #define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT) 104 #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) 105 #define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT) 106 #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) 107 #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) 108 #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1)) 109 110 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) 111 #define WORK_DATA_STATIC_INIT() \ 112 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) 113 114 struct delayed_work { 115 struct work_struct work; 116 struct timer_list timer; 117 118 /* target workqueue and CPU ->timer uses to queue ->work */ 119 struct workqueue_struct *wq; 120 int cpu; 121 }; 122 123 struct rcu_work { 124 struct work_struct work; 125 struct rcu_head rcu; 126 127 /* target workqueue ->rcu uses to queue ->work */ 128 struct workqueue_struct *wq; 129 }; 130 131 enum wq_affn_scope { 132 WQ_AFFN_DFL, /* use system default */ 133 WQ_AFFN_CPU, /* one pod per CPU */ 134 WQ_AFFN_SMT, /* one pod per SMT */ 135 WQ_AFFN_CACHE, /* one pod per LLC */ 136 WQ_AFFN_CACHE_SHARD, /* synthetic sub-LLC shards */ 137 WQ_AFFN_NUMA, /* one pod per NUMA node */ 138 WQ_AFFN_SYSTEM, /* one pod across the whole system */ 139 140 WQ_AFFN_NR_TYPES, 141 }; 142 143 /** 144 * struct workqueue_attrs - A struct for workqueue attributes. 145 * 146 * This can be used to change attributes of an unbound workqueue. 147 */ 148 struct workqueue_attrs { 149 /** 150 * @nice: nice level 151 */ 152 int nice; 153 154 /** 155 * @cpumask: allowed CPUs 156 * 157 * Work items in this workqueue are affine to these CPUs and not allowed 158 * to execute on other CPUs. A pool serving a workqueue must have the 159 * same @cpumask. 160 */ 161 cpumask_var_t cpumask; 162 163 /** 164 * @__pod_cpumask: internal attribute used to create per-pod pools 165 * 166 * Internal use only. 167 * 168 * Per-pod unbound worker pools are used to improve locality. Always a 169 * subset of ->cpumask. A workqueue can be associated with multiple 170 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement 171 * of a pool's @__pod_cpumask is strict depends on @affn_strict. 172 */ 173 cpumask_var_t __pod_cpumask; 174 175 /** 176 * @affn_strict: affinity scope is strict 177 * 178 * If clear, workqueue will make a best-effort attempt at starting the 179 * worker inside @__pod_cpumask but the scheduler is free to migrate it 180 * outside. 181 * 182 * If set, workers are only allowed to run inside @__pod_cpumask. 183 */ 184 bool affn_strict; 185 186 /* 187 * Below fields aren't properties of a worker_pool. They only modify how 188 * :c:func:`apply_workqueue_attrs` select pools and thus don't 189 * participate in pool hash calculations or equality comparisons. 190 * 191 * If @affn_strict is set, @cpumask isn't a property of a worker_pool 192 * either. 193 */ 194 195 /** 196 * @affn_scope: unbound CPU affinity scope 197 * 198 * CPU pods are used to improve execution locality of unbound work 199 * items. There are multiple pod types, one for each wq_affn_scope, and 200 * every CPU in the system belongs to one pod in every pod type. CPUs 201 * that belong to the same pod share the worker pool. For example, 202 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker 203 * pool for each NUMA node. 204 */ 205 enum wq_affn_scope affn_scope; 206 207 /** 208 * @ordered: work items must be executed one by one in queueing order 209 */ 210 bool ordered; 211 }; 212 213 static inline struct delayed_work *to_delayed_work(struct work_struct *work) 214 { 215 return container_of(work, struct delayed_work, work); 216 } 217 218 static inline struct rcu_work *to_rcu_work(struct work_struct *work) 219 { 220 return container_of(work, struct rcu_work, work); 221 } 222 223 struct execute_work { 224 struct work_struct work; 225 }; 226 227 #ifdef CONFIG_LOCKDEP 228 /* 229 * NB: because we have to copy the lockdep_map, setting _key 230 * here is required, otherwise it could get initialised to the 231 * copy of the lockdep_map! 232 */ 233 #define __WORK_INIT_LOCKDEP_MAP(n, k) \ 234 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), 235 #else 236 #define __WORK_INIT_LOCKDEP_MAP(n, k) 237 #endif 238 239 #define __WORK_INITIALIZER(n, f) { \ 240 .data = WORK_DATA_STATIC_INIT(), \ 241 .entry = { &(n).entry, &(n).entry }, \ 242 .func = (f), \ 243 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 244 } 245 246 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 247 .work = __WORK_INITIALIZER((n).work, (f)), \ 248 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ 249 (tflags) | TIMER_IRQSAFE), \ 250 } 251 252 #define DECLARE_WORK(n, f) \ 253 struct work_struct n = __WORK_INITIALIZER(n, f) 254 255 #define DECLARE_DELAYED_WORK(n, f) \ 256 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) 257 258 #define DECLARE_DEFERRABLE_WORK(n, f) \ 259 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) 260 261 #ifdef CONFIG_DEBUG_OBJECTS_WORK 262 extern void __init_work(struct work_struct *work, int onstack); 263 extern void destroy_work_on_stack(struct work_struct *work); 264 extern void destroy_delayed_work_on_stack(struct delayed_work *work); 265 static inline unsigned int work_static(struct work_struct *work) 266 { 267 return *work_data_bits(work) & WORK_STRUCT_STATIC; 268 } 269 #else 270 static inline void __init_work(struct work_struct *work, int onstack) { } 271 static inline void destroy_work_on_stack(struct work_struct *work) { } 272 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } 273 static inline unsigned int work_static(struct work_struct *work) { return 0; } 274 #endif 275 276 /* 277 * initialize all of a work item in one go 278 * 279 * NOTE! No point in using "atomic_long_set()": using a direct 280 * assignment of the work data initializer allows the compiler 281 * to generate better code. 282 */ 283 #ifdef CONFIG_LOCKDEP 284 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ 285 do { \ 286 __init_work((_work), _onstack); \ 287 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 288 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \ 289 INIT_LIST_HEAD(&(_work)->entry); \ 290 (_work)->func = (_func); \ 291 } while (0) 292 #else 293 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ 294 do { \ 295 __init_work((_work), _onstack); \ 296 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 297 INIT_LIST_HEAD(&(_work)->entry); \ 298 (_work)->func = (_func); \ 299 } while (0) 300 #endif 301 302 #define __INIT_WORK(_work, _func, _onstack) \ 303 do { \ 304 static __maybe_unused struct lock_class_key __key; \ 305 \ 306 __INIT_WORK_KEY(_work, _func, _onstack, &__key); \ 307 } while (0) 308 309 #define INIT_WORK(_work, _func) \ 310 __INIT_WORK((_work), (_func), 0) 311 312 #define INIT_WORK_ONSTACK(_work, _func) \ 313 __INIT_WORK((_work), (_func), 1) 314 315 #define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \ 316 __INIT_WORK_KEY((_work), (_func), 1, _key) 317 318 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 319 do { \ 320 INIT_WORK(&(_work)->work, (_func)); \ 321 __timer_init(&(_work)->timer, \ 322 delayed_work_timer_fn, \ 323 (_tflags) | TIMER_IRQSAFE); \ 324 } while (0) 325 326 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 327 do { \ 328 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 329 __timer_init_on_stack(&(_work)->timer, \ 330 delayed_work_timer_fn, \ 331 (_tflags) | TIMER_IRQSAFE); \ 332 } while (0) 333 334 #define INIT_DELAYED_WORK(_work, _func) \ 335 __INIT_DELAYED_WORK(_work, _func, 0) 336 337 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 338 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) 339 340 #define INIT_DEFERRABLE_WORK(_work, _func) \ 341 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) 342 343 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ 344 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) 345 346 #define INIT_RCU_WORK(_work, _func) \ 347 INIT_WORK(&(_work)->work, (_func)) 348 349 #define INIT_RCU_WORK_ONSTACK(_work, _func) \ 350 INIT_WORK_ONSTACK(&(_work)->work, (_func)) 351 352 /** 353 * work_pending - Find out whether a work item is currently pending 354 * @work: The work item in question 355 */ 356 #define work_pending(work) \ 357 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 358 359 /** 360 * delayed_work_pending - Find out whether a delayable work item is currently 361 * pending 362 * @w: The work item in question 363 */ 364 #define delayed_work_pending(w) \ 365 work_pending(&(w)->work) 366 367 /* 368 * Workqueue flags and constants. For details, please refer to 369 * Documentation/core-api/workqueue.rst. 370 */ 371 enum wq_flags { 372 WQ_BH = 1 << 0, /* execute in bottom half (softirq) context */ 373 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 374 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 375 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 376 WQ_HIGHPRI = 1 << 4, /* high priority */ 377 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ 378 WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */ 379 380 /* 381 * Per-cpu workqueues are generally preferred because they tend to 382 * show better performance thanks to cache locality. Per-cpu 383 * workqueues exclude the scheduler from choosing the CPU to 384 * execute the worker threads, which has an unfortunate side effect 385 * of increasing power consumption. 386 * 387 * The scheduler considers a CPU idle if it doesn't have any task 388 * to execute and tries to keep idle cores idle to conserve power; 389 * however, for example, a per-cpu work item scheduled from an 390 * interrupt handler on an idle CPU will force the scheduler to 391 * execute the work item on that CPU breaking the idleness, which in 392 * turn may lead to more scheduling choices which are sub-optimal 393 * in terms of power consumption. 394 * 395 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default 396 * but become unbound if workqueue.power_efficient kernel param is 397 * specified. Per-cpu workqueues which are identified to 398 * contribute significantly to power-consumption are identified and 399 * marked with this flag and enabling the power_efficient mode 400 * leads to noticeable power saving at the cost of small 401 * performance disadvantage. 402 * 403 * http://thread.gmane.org/gmane.linux.kernel/1480396 404 */ 405 WQ_POWER_EFFICIENT = 1 << 7, 406 WQ_PERCPU = 1 << 8, /* bound to a specific cpu */ 407 408 __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */ 409 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ 410 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ 411 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ 412 413 /* BH wq only allows the following flags */ 414 __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI | WQ_PERCPU, 415 }; 416 417 enum wq_consts { 418 WQ_MAX_ACTIVE = 2048, /* I like 2048, better ideas? */ 419 WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, 420 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 421 422 /* 423 * Per-node default cap on min_active. Unless explicitly set, min_active 424 * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see 425 * workqueue_struct->min_active definition. 426 */ 427 WQ_DFL_MIN_ACTIVE = 8, 428 }; 429 430 /* 431 * System-wide workqueues which are always present. 432 * 433 * system_percpu_wq is the one used by schedule[_delayed]_work[_on](). 434 * Multi-CPU multi-threaded. There are users which expect relatively 435 * short queue flush time. Don't queue works which can run for too 436 * long. 437 * 438 * system_highpri_wq is similar to system_percpu_wq but for work items which 439 * require WQ_HIGHPRI. 440 * 441 * system_long_wq is similar to system_percpu_wq but may host long running 442 * works. Queue flushing might take relatively long. 443 * 444 * system_dfl_long_wq is similar to system_dfl_wq but it may host long running 445 * works. 446 * 447 * system_dfl_wq is unbound workqueue. Workers are not bound to 448 * any specific CPU, not concurrency managed, and all queued works are 449 * executed immediately as long as max_active limit is not reached and 450 * resources are available. 451 * 452 * system_freezable_wq is equivalent to system_percpu_wq except that it's 453 * freezable. 454 * 455 * *_power_efficient_wq are inclined towards saving power and converted 456 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, 457 * they are same as their non-power-efficient counterparts - e.g. 458 * system_power_efficient_wq is identical to system_percpu_wq if 459 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. 460 * 461 * system_bh[_highpri]_wq are convenience interface to softirq. BH work items 462 * are executed in the queueing CPU's BH context in the queueing order. 463 */ 464 extern struct workqueue_struct *system_wq; /* use system_percpu_wq, this will be removed */ 465 extern struct workqueue_struct *system_percpu_wq; 466 extern struct workqueue_struct *system_highpri_wq; 467 extern struct workqueue_struct *system_long_wq; 468 extern struct workqueue_struct *system_unbound_wq; 469 extern struct workqueue_struct *system_dfl_wq; 470 extern struct workqueue_struct *system_freezable_wq; 471 extern struct workqueue_struct *system_power_efficient_wq; 472 extern struct workqueue_struct *system_freezable_power_efficient_wq; 473 extern struct workqueue_struct *system_bh_wq; 474 extern struct workqueue_struct *system_bh_highpri_wq; 475 extern struct workqueue_struct *system_dfl_long_wq; 476 477 void workqueue_softirq_action(bool highpri); 478 void workqueue_softirq_dead(unsigned int cpu); 479 480 /** 481 * alloc_workqueue - allocate a workqueue 482 * @fmt: printf format for the name of the workqueue 483 * @flags: WQ_* flags 484 * @max_active: max in-flight work items, 0 for default 485 * @...: args for @fmt 486 * 487 * For a per-cpu workqueue, @max_active limits the number of in-flight work 488 * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be 489 * executing at most one work item for the workqueue. 490 * 491 * For unbound workqueues, @max_active limits the number of in-flight work items 492 * for the whole system. e.g. @max_active of 16 indicates that there can be 493 * at most 16 work items executing for the workqueue in the whole system. 494 * 495 * As sharing the same active counter for an unbound workqueue across multiple 496 * NUMA nodes can be expensive, @max_active is distributed to each NUMA node 497 * according to the proportion of the number of online CPUs and enforced 498 * independently. 499 * 500 * Depending on online CPU distribution, a node may end up with per-node 501 * max_active which is significantly lower than @max_active, which can lead to 502 * deadlocks if the per-node concurrency limit is lower than the maximum number 503 * of interdependent work items for the workqueue. 504 * 505 * To guarantee forward progress regardless of online CPU distribution, the 506 * concurrency limit on every node is guaranteed to be equal to or greater than 507 * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means 508 * that the sum of per-node max_active's may be larger than @max_active. 509 * 510 * For detailed information on %WQ_\* flags, please refer to 511 * Documentation/core-api/workqueue.rst. 512 * 513 * RETURNS: 514 * Pointer to the allocated workqueue on success, %NULL on failure. 515 */ 516 __printf(1, 4) struct workqueue_struct * 517 alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...); 518 #define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__)) 519 520 /** 521 * devm_alloc_workqueue - Resource-managed allocate a workqueue 522 * @dev: Device to allocate workqueue for 523 * @fmt: printf format for the name of the workqueue 524 * @flags: WQ_* flags 525 * @max_active: max in-flight work items, 0 for default 526 * @...: args for @fmt 527 * 528 * Resource managed workqueue, see alloc_workqueue() for details. 529 * 530 * The workqueue will be automatically destroyed on driver detach. Typically 531 * this should be used in drivers already relying on devm interafaces. 532 * 533 * RETURNS: 534 * Pointer to the allocated workqueue on success, %NULL on failure. 535 */ 536 __printf(2, 5) struct workqueue_struct * 537 devm_alloc_workqueue(struct device *dev, const char *fmt, unsigned int flags, 538 int max_active, ...); 539 540 #ifdef CONFIG_LOCKDEP 541 /** 542 * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map 543 * @fmt: printf format for the name of the workqueue 544 * @flags: WQ_* flags 545 * @max_active: max in-flight work items, 0 for default 546 * @lockdep_map: user-defined lockdep_map 547 * @...: args for @fmt 548 * 549 * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for 550 * workqueues created with the same purpose and to avoid leaking a lockdep_map 551 * on each workqueue creation. 552 * 553 * RETURNS: 554 * Pointer to the allocated workqueue on success, %NULL on failure. 555 */ 556 __printf(1, 5) struct workqueue_struct * 557 alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, 558 struct lockdep_map *lockdep_map, ...); 559 560 /** 561 * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with 562 * user-defined lockdep_map 563 * 564 * @fmt: printf format for the name of the workqueue 565 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 566 * @lockdep_map: user-defined lockdep_map 567 * @args: args for @fmt 568 * 569 * Same as alloc_ordered_workqueue but with the a user-define lockdep_map. 570 * Useful for workqueues created with the same purpose and to avoid leaking a 571 * lockdep_map on each workqueue creation. 572 * 573 * RETURNS: 574 * Pointer to the allocated workqueue on success, %NULL on failure. 575 */ 576 #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \ 577 alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\ 578 1, lockdep_map, ##args)) 579 #endif 580 581 /** 582 * alloc_ordered_workqueue - allocate an ordered workqueue 583 * @fmt: printf format for the name of the workqueue 584 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 585 * @args: args for @fmt 586 * 587 * Allocate an ordered workqueue. An ordered workqueue executes at 588 * most one work item at any given time in the queued order. They are 589 * implemented as unbound workqueues with @max_active of one. 590 * 591 * RETURNS: 592 * Pointer to the allocated workqueue on success, %NULL on failure. 593 */ 594 #define alloc_ordered_workqueue(fmt, flags, args...) \ 595 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) 596 #define devm_alloc_ordered_workqueue(dev, fmt, flags, args...) \ 597 devm_alloc_workqueue(dev, fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) 598 599 #define create_workqueue(name) \ 600 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_PERCPU, 1, (name)) 601 #define create_freezable_workqueue(name) \ 602 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ 603 WQ_MEM_RECLAIM, 1, (name)) 604 #define create_singlethread_workqueue(name) \ 605 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) 606 607 #define from_work(var, callback_work, work_fieldname) \ 608 container_of(callback_work, typeof(*var), work_fieldname) 609 610 extern void destroy_workqueue(struct workqueue_struct *wq); 611 612 struct workqueue_attrs *alloc_workqueue_attrs_noprof(void); 613 #define alloc_workqueue_attrs(...) alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__)) 614 615 void free_workqueue_attrs(struct workqueue_attrs *attrs); 616 int apply_workqueue_attrs(struct workqueue_struct *wq, 617 const struct workqueue_attrs *attrs); 618 extern int workqueue_unbound_housekeeping_update(const struct cpumask *hk); 619 620 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 621 struct work_struct *work); 622 extern bool queue_work_node(int node, struct workqueue_struct *wq, 623 struct work_struct *work); 624 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 625 struct delayed_work *work, unsigned long delay); 626 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 627 struct delayed_work *dwork, unsigned long delay); 628 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); 629 630 extern void __flush_workqueue(struct workqueue_struct *wq); 631 extern void drain_workqueue(struct workqueue_struct *wq); 632 633 extern int schedule_on_each_cpu(work_func_t func); 634 635 int execute_in_process_context(work_func_t fn, struct execute_work *); 636 637 extern bool flush_work(struct work_struct *work); 638 extern bool cancel_work(struct work_struct *work); 639 extern bool cancel_work_sync(struct work_struct *work); 640 641 extern bool flush_delayed_work(struct delayed_work *dwork); 642 extern bool cancel_delayed_work(struct delayed_work *dwork); 643 extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 644 645 extern bool disable_work(struct work_struct *work); 646 extern bool disable_work_sync(struct work_struct *work); 647 extern bool enable_work(struct work_struct *work); 648 649 extern bool disable_delayed_work(struct delayed_work *dwork); 650 extern bool disable_delayed_work_sync(struct delayed_work *dwork); 651 extern bool enable_delayed_work(struct delayed_work *dwork); 652 653 extern bool flush_rcu_work(struct rcu_work *rwork); 654 655 extern void workqueue_set_max_active(struct workqueue_struct *wq, 656 int max_active); 657 extern void workqueue_set_min_active(struct workqueue_struct *wq, 658 int min_active); 659 extern struct work_struct *current_work(void); 660 extern bool current_is_workqueue_rescuer(void); 661 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 662 extern unsigned int work_busy(struct work_struct *work); 663 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); 664 extern void print_worker_info(const char *log_lvl, struct task_struct *task); 665 extern void show_all_workqueues(void); 666 extern void show_freezable_workqueues(void); 667 extern void show_one_workqueue(struct workqueue_struct *wq); 668 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); 669 670 /** 671 * queue_work - queue work on a workqueue 672 * @wq: workqueue to use 673 * @work: work to queue 674 * 675 * Returns %false if @work was already on a queue, %true otherwise. 676 * 677 * We queue the work to the CPU on which it was submitted, but if the CPU dies 678 * it can be processed by another CPU. 679 * 680 * Memory-ordering properties: If it returns %true, guarantees that all stores 681 * preceding the call to queue_work() in the program order will be visible from 682 * the CPU which will execute @work by the time such work executes, e.g., 683 * 684 * { x is initially 0 } 685 * 686 * CPU0 CPU1 687 * 688 * WRITE_ONCE(x, 1); [ @work is being executed ] 689 * r0 = queue_work(wq, work); r1 = READ_ONCE(x); 690 * 691 * Forbids: r0 == true && r1 == 0 692 */ 693 static inline bool queue_work(struct workqueue_struct *wq, 694 struct work_struct *work) 695 { 696 return queue_work_on(WORK_CPU_UNBOUND, wq, work); 697 } 698 699 /** 700 * queue_delayed_work - queue work on a workqueue after delay 701 * @wq: workqueue to use 702 * @dwork: delayable work to queue 703 * @delay: number of jiffies to wait before queueing 704 * 705 * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 706 */ 707 static inline bool queue_delayed_work(struct workqueue_struct *wq, 708 struct delayed_work *dwork, 709 unsigned long delay) 710 { 711 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 712 } 713 714 /** 715 * mod_delayed_work - modify delay of or queue a delayed work 716 * @wq: workqueue to use 717 * @dwork: work to queue 718 * @delay: number of jiffies to wait before queueing 719 * 720 * mod_delayed_work_on() on local CPU. 721 */ 722 static inline bool mod_delayed_work(struct workqueue_struct *wq, 723 struct delayed_work *dwork, 724 unsigned long delay) 725 { 726 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 727 } 728 729 /** 730 * schedule_work_on - put work task on a specific cpu 731 * @cpu: cpu to put the work task on 732 * @work: job to be done 733 * 734 * This puts a job on a specific cpu 735 */ 736 static inline bool schedule_work_on(int cpu, struct work_struct *work) 737 { 738 return queue_work_on(cpu, system_percpu_wq, work); 739 } 740 741 /** 742 * schedule_work - put work task in per-CPU workqueue 743 * @work: job to be done 744 * 745 * Returns %false if @work was already on the system per-CPU workqueue and 746 * %true otherwise. 747 * 748 * This puts a job in the system per-CPU workqueue if it was not already 749 * queued and leaves it in the same position on the system per-CPU 750 * workqueue otherwise. 751 * 752 * Shares the same memory-ordering properties of queue_work(), cf. the 753 * DocBook header of queue_work(). 754 */ 755 static inline bool schedule_work(struct work_struct *work) 756 { 757 return queue_work(system_percpu_wq, work); 758 } 759 760 /** 761 * enable_and_queue_work - Enable and queue a work item on a specific workqueue 762 * @wq: The target workqueue 763 * @work: The work item to be enabled and queued 764 * 765 * This function combines the operations of enable_work() and queue_work(), 766 * providing a convenient way to enable and queue a work item in a single call. 767 * It invokes enable_work() on @work and then queues it if the disable depth 768 * reached 0. Returns %true if the disable depth reached 0 and @work is queued, 769 * and %false otherwise. 770 * 771 * Note that @work is always queued when disable depth reaches zero. If the 772 * desired behavior is queueing only if certain events took place while @work is 773 * disabled, the user should implement the necessary state tracking and perform 774 * explicit conditional queueing after enable_work(). 775 */ 776 static inline bool enable_and_queue_work(struct workqueue_struct *wq, 777 struct work_struct *work) 778 { 779 if (enable_work(work)) { 780 queue_work(wq, work); 781 return true; 782 } 783 return false; 784 } 785 786 /* 787 * Detect attempt to flush system-wide workqueues at compile time when possible. 788 * Warn attempt to flush system-wide workqueues at runtime. 789 * 790 * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp 791 * for reasons and steps for converting system-wide workqueues into local workqueues. 792 */ 793 extern void __warn_flushing_systemwide_wq(void) 794 __compiletime_warning("Please avoid flushing system-wide workqueues."); 795 796 /* Please stop using this function, for this function will be removed in near future. */ 797 #define flush_scheduled_work() \ 798 ({ \ 799 __warn_flushing_systemwide_wq(); \ 800 __flush_workqueue(system_percpu_wq); \ 801 }) 802 803 #define flush_workqueue(wq) \ 804 ({ \ 805 struct workqueue_struct *_wq = (wq); \ 806 \ 807 if ((__builtin_constant_p(_wq == system_percpu_wq) && \ 808 _wq == system_percpu_wq) || \ 809 (__builtin_constant_p(_wq == system_highpri_wq) && \ 810 _wq == system_highpri_wq) || \ 811 (__builtin_constant_p(_wq == system_long_wq) && \ 812 _wq == system_long_wq) || \ 813 (__builtin_constant_p(_wq == system_dfl_long_wq) && \ 814 _wq == system_dfl_long_wq) || \ 815 (__builtin_constant_p(_wq == system_dfl_wq) && \ 816 _wq == system_dfl_wq) || \ 817 (__builtin_constant_p(_wq == system_freezable_wq) && \ 818 _wq == system_freezable_wq) || \ 819 (__builtin_constant_p(_wq == system_power_efficient_wq) && \ 820 _wq == system_power_efficient_wq) || \ 821 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ 822 _wq == system_freezable_power_efficient_wq)) \ 823 __warn_flushing_systemwide_wq(); \ 824 __flush_workqueue(_wq); \ 825 }) 826 827 /** 828 * schedule_delayed_work_on - queue work in per-CPU workqueue on CPU after delay 829 * @cpu: cpu to use 830 * @dwork: job to be done 831 * @delay: number of jiffies to wait 832 * 833 * After waiting for a given time this puts a job in the system per-CPU 834 * workqueue on the specified CPU. 835 */ 836 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 837 unsigned long delay) 838 { 839 return queue_delayed_work_on(cpu, system_percpu_wq, dwork, delay); 840 } 841 842 /** 843 * schedule_delayed_work - put work task in per-CPU workqueue after delay 844 * @dwork: job to be done 845 * @delay: number of jiffies to wait or 0 for immediate execution 846 * 847 * After waiting for a given time this puts a job in the system per-CPU 848 * workqueue. 849 */ 850 static inline bool schedule_delayed_work(struct delayed_work *dwork, 851 unsigned long delay) 852 { 853 return queue_delayed_work(system_percpu_wq, dwork, delay); 854 } 855 856 #ifndef CONFIG_SMP 857 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 858 { 859 return fn(arg); 860 } 861 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) 862 { 863 return fn(arg); 864 } 865 #else 866 long work_on_cpu_key(int cpu, long (*fn)(void *), 867 void *arg, struct lock_class_key *key); 868 /* 869 * A new key is defined for each caller to make sure the work 870 * associated with the function doesn't share its locking class. 871 */ 872 #define work_on_cpu(_cpu, _fn, _arg) \ 873 ({ \ 874 static struct lock_class_key __key; \ 875 \ 876 work_on_cpu_key(_cpu, _fn, _arg, &__key); \ 877 }) 878 879 #endif /* CONFIG_SMP */ 880 881 #ifdef CONFIG_FREEZER 882 extern void freeze_workqueues_begin(void); 883 extern bool freeze_workqueues_busy(void); 884 extern void thaw_workqueues(void); 885 #endif /* CONFIG_FREEZER */ 886 887 #ifdef CONFIG_SYSFS 888 int workqueue_sysfs_register(struct workqueue_struct *wq); 889 #else /* CONFIG_SYSFS */ 890 static inline int workqueue_sysfs_register(struct workqueue_struct *wq) 891 { return 0; } 892 #endif /* CONFIG_SYSFS */ 893 894 #ifdef CONFIG_WQ_WATCHDOG 895 void wq_watchdog_touch(int cpu); 896 #else /* CONFIG_WQ_WATCHDOG */ 897 static inline void wq_watchdog_touch(int cpu) { } 898 #endif /* CONFIG_WQ_WATCHDOG */ 899 900 #ifdef CONFIG_SMP 901 int workqueue_prepare_cpu(unsigned int cpu); 902 int workqueue_online_cpu(unsigned int cpu); 903 int workqueue_offline_cpu(unsigned int cpu); 904 #endif 905 906 void __init workqueue_init_early(void); 907 void __init workqueue_init(void); 908 void __init workqueue_init_topology(void); 909 910 #endif 911