1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/workqueue.c - generic async execution with shared worker pool 4 * 5 * Copyright (C) 2002 Ingo Molnar 6 * 7 * Derived from the taskqueue/keventd code by: 8 * David Woodhouse <dwmw2@infradead.org> 9 * Andrew Morton 10 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 11 * Theodore Ts'o <tytso@mit.edu> 12 * 13 * Made to use alloc_percpu by Christoph Lameter. 14 * 15 * Copyright (C) 2010 SUSE Linux Products GmbH 16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 17 * 18 * This is the generic async execution mechanism. Work items as are 19 * executed in process context. The worker pool is shared and 20 * automatically managed. There are two worker pools for each CPU (one for 21 * normal work items and the other for high priority ones) and some extra 22 * pools for workqueues which are not bound to any specific CPU - the 23 * number of these backing pools is dynamic. 24 * 25 * Please read Documentation/core-api/workqueue.rst for details. 26 */ 27 28 #include <linux/export.h> 29 #include <linux/kernel.h> 30 #include <linux/sched.h> 31 #include <linux/init.h> 32 #include <linux/interrupt.h> 33 #include <linux/signal.h> 34 #include <linux/completion.h> 35 #include <linux/workqueue.h> 36 #include <linux/slab.h> 37 #include <linux/cpu.h> 38 #include <linux/notifier.h> 39 #include <linux/kthread.h> 40 #include <linux/hardirq.h> 41 #include <linux/mempolicy.h> 42 #include <linux/freezer.h> 43 #include <linux/debug_locks.h> 44 #include <linux/lockdep.h> 45 #include <linux/idr.h> 46 #include <linux/jhash.h> 47 #include <linux/hashtable.h> 48 #include <linux/rculist.h> 49 #include <linux/nodemask.h> 50 #include <linux/moduleparam.h> 51 #include <linux/uaccess.h> 52 #include <linux/sched/isolation.h> 53 #include <linux/sched/debug.h> 54 #include <linux/nmi.h> 55 #include <linux/kvm_para.h> 56 #include <linux/delay.h> 57 #include <linux/irq_work.h> 58 59 #include "workqueue_internal.h" 60 61 enum worker_pool_flags { 62 /* 63 * worker_pool flags 64 * 65 * A bound pool is either associated or disassociated with its CPU. 66 * While associated (!DISASSOCIATED), all workers are bound to the 67 * CPU and none has %WORKER_UNBOUND set and concurrency management 68 * is in effect. 69 * 70 * While DISASSOCIATED, the cpu may be offline and all workers have 71 * %WORKER_UNBOUND set and concurrency management disabled, and may 72 * be executing on any CPU. The pool behaves as an unbound one. 73 * 74 * Note that DISASSOCIATED should be flipped only while holding 75 * wq_pool_attach_mutex to avoid changing binding state while 76 * worker_attach_to_pool() is in progress. 77 * 78 * As there can only be one concurrent BH execution context per CPU, a 79 * BH pool is per-CPU and always DISASSOCIATED. 80 */ 81 POOL_BH = 1 << 0, /* is a BH pool */ 82 POOL_MANAGER_ACTIVE = 1 << 1, /* being managed */ 83 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 84 POOL_BH_DRAINING = 1 << 3, /* draining after CPU offline */ 85 }; 86 87 enum worker_flags { 88 /* worker flags */ 89 WORKER_DIE = 1 << 1, /* die die die */ 90 WORKER_IDLE = 1 << 2, /* is idle */ 91 WORKER_PREP = 1 << 3, /* preparing to run works */ 92 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 93 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 94 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 95 96 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 97 WORKER_UNBOUND | WORKER_REBOUND, 98 }; 99 100 enum work_cancel_flags { 101 WORK_CANCEL_DELAYED = 1 << 0, /* canceling a delayed_work */ 102 }; 103 104 enum wq_internal_consts { 105 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 106 107 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 108 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 109 110 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 111 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 112 113 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 114 /* call for help after 10ms 115 (min two ticks) */ 116 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 117 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 118 119 /* 120 * Rescue workers are used only on emergencies and shared by 121 * all cpus. Give MIN_NICE. 122 */ 123 RESCUER_NICE_LEVEL = MIN_NICE, 124 HIGHPRI_NICE_LEVEL = MIN_NICE, 125 126 WQ_NAME_LEN = 32, 127 }; 128 129 /* 130 * We don't want to trap softirq for too long. See MAX_SOFTIRQ_TIME and 131 * MAX_SOFTIRQ_RESTART in kernel/softirq.c. These are macros because 132 * msecs_to_jiffies() can't be an initializer. 133 */ 134 #define BH_WORKER_JIFFIES msecs_to_jiffies(2) 135 #define BH_WORKER_RESTARTS 10 136 137 /* 138 * Structure fields follow one of the following exclusion rules. 139 * 140 * I: Modifiable by initialization/destruction paths and read-only for 141 * everyone else. 142 * 143 * P: Preemption protected. Disabling preemption is enough and should 144 * only be modified and accessed from the local cpu. 145 * 146 * L: pool->lock protected. Access with pool->lock held. 147 * 148 * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for 149 * reads. 150 * 151 * K: Only modified by worker while holding pool->lock. Can be safely read by 152 * self, while holding pool->lock or from IRQ context if %current is the 153 * kworker. 154 * 155 * S: Only modified by worker self. 156 * 157 * A: wq_pool_attach_mutex protected. 158 * 159 * PL: wq_pool_mutex protected. 160 * 161 * PR: wq_pool_mutex protected for writes. RCU protected for reads. 162 * 163 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 164 * 165 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 166 * RCU for reads. 167 * 168 * WQ: wq->mutex protected. 169 * 170 * WR: wq->mutex protected for writes. RCU protected for reads. 171 * 172 * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read 173 * with READ_ONCE() without locking. 174 * 175 * MD: wq_mayday_lock protected. 176 * 177 * WD: Used internally by the watchdog. 178 */ 179 180 /* struct worker is defined in workqueue_internal.h */ 181 182 struct worker_pool { 183 raw_spinlock_t lock; /* the pool lock */ 184 int cpu; /* I: the associated cpu */ 185 int node; /* I: the associated node ID */ 186 int id; /* I: pool ID */ 187 unsigned int flags; /* L: flags */ 188 189 unsigned long watchdog_ts; /* L: watchdog timestamp */ 190 bool cpu_stall; /* WD: stalled cpu bound pool */ 191 192 /* 193 * The counter is incremented in a process context on the associated CPU 194 * w/ preemption disabled, and decremented or reset in the same context 195 * but w/ pool->lock held. The readers grab pool->lock and are 196 * guaranteed to see if the counter reached zero. 197 */ 198 int nr_running; 199 200 struct list_head worklist; /* L: list of pending works */ 201 202 int nr_workers; /* L: total number of workers */ 203 int nr_idle; /* L: currently idle workers */ 204 205 struct list_head idle_list; /* L: list of idle workers */ 206 struct timer_list idle_timer; /* L: worker idle timeout */ 207 struct work_struct idle_cull_work; /* L: worker idle cleanup */ 208 209 struct timer_list mayday_timer; /* L: SOS timer for workers */ 210 211 /* a workers is either on busy_hash or idle_list, or the manager */ 212 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 213 /* L: hash of busy workers */ 214 215 struct worker *manager; /* L: purely informational */ 216 struct list_head workers; /* A: attached workers */ 217 struct list_head dying_workers; /* A: workers about to die */ 218 struct completion *detach_completion; /* all workers detached */ 219 220 struct ida worker_ida; /* worker IDs for task name */ 221 222 struct workqueue_attrs *attrs; /* I: worker attributes */ 223 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 224 int refcnt; /* PL: refcnt for unbound pools */ 225 226 /* 227 * Destruction of pool is RCU protected to allow dereferences 228 * from get_work_pool(). 229 */ 230 struct rcu_head rcu; 231 }; 232 233 /* 234 * Per-pool_workqueue statistics. These can be monitored using 235 * tools/workqueue/wq_monitor.py. 236 */ 237 enum pool_workqueue_stats { 238 PWQ_STAT_STARTED, /* work items started execution */ 239 PWQ_STAT_COMPLETED, /* work items completed execution */ 240 PWQ_STAT_CPU_TIME, /* total CPU time consumed */ 241 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */ 242 PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */ 243 PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */ 244 PWQ_STAT_MAYDAY, /* maydays to rescuer */ 245 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 246 247 PWQ_NR_STATS, 248 }; 249 250 /* 251 * The per-pool workqueue. While queued, bits below WORK_PWQ_SHIFT 252 * of work_struct->data are used for flags and the remaining high bits 253 * point to the pwq; thus, pwqs need to be aligned at two's power of the 254 * number of flag bits. 255 */ 256 struct pool_workqueue { 257 struct worker_pool *pool; /* I: the associated pool */ 258 struct workqueue_struct *wq; /* I: the owning workqueue */ 259 int work_color; /* L: current color */ 260 int flush_color; /* L: flushing color */ 261 int refcnt; /* L: reference count */ 262 int nr_in_flight[WORK_NR_COLORS]; 263 /* L: nr of in_flight works */ 264 bool plugged; /* L: execution suspended */ 265 266 /* 267 * nr_active management and WORK_STRUCT_INACTIVE: 268 * 269 * When pwq->nr_active >= max_active, new work item is queued to 270 * pwq->inactive_works instead of pool->worklist and marked with 271 * WORK_STRUCT_INACTIVE. 272 * 273 * All work items marked with WORK_STRUCT_INACTIVE do not participate in 274 * nr_active and all work items in pwq->inactive_works are marked with 275 * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are 276 * in pwq->inactive_works. Some of them are ready to run in 277 * pool->worklist or worker->scheduled. Those work itmes are only struct 278 * wq_barrier which is used for flush_work() and should not participate 279 * in nr_active. For non-barrier work item, it is marked with 280 * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. 281 */ 282 int nr_active; /* L: nr of active works */ 283 struct list_head inactive_works; /* L: inactive works */ 284 struct list_head pending_node; /* LN: node on wq_node_nr_active->pending_pwqs */ 285 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 286 struct list_head mayday_node; /* MD: node on wq->maydays */ 287 288 u64 stats[PWQ_NR_STATS]; 289 290 /* 291 * Release of unbound pwq is punted to a kthread_worker. See put_pwq() 292 * and pwq_release_workfn() for details. pool_workqueue itself is also 293 * RCU protected so that the first pwq can be determined without 294 * grabbing wq->mutex. 295 */ 296 struct kthread_work release_work; 297 struct rcu_head rcu; 298 } __aligned(1 << WORK_STRUCT_PWQ_SHIFT); 299 300 /* 301 * Structure used to wait for workqueue flush. 302 */ 303 struct wq_flusher { 304 struct list_head list; /* WQ: list of flushers */ 305 int flush_color; /* WQ: flush color waiting for */ 306 struct completion done; /* flush completion */ 307 }; 308 309 struct wq_device; 310 311 /* 312 * Unlike in a per-cpu workqueue where max_active limits its concurrency level 313 * on each CPU, in an unbound workqueue, max_active applies to the whole system. 314 * As sharing a single nr_active across multiple sockets can be very expensive, 315 * the counting and enforcement is per NUMA node. 316 * 317 * The following struct is used to enforce per-node max_active. When a pwq wants 318 * to start executing a work item, it should increment ->nr using 319 * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over 320 * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish 321 * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in 322 * round-robin order. 323 */ 324 struct wq_node_nr_active { 325 int max; /* per-node max_active */ 326 atomic_t nr; /* per-node nr_active */ 327 raw_spinlock_t lock; /* nests inside pool locks */ 328 struct list_head pending_pwqs; /* LN: pwqs with inactive works */ 329 }; 330 331 /* 332 * The externally visible workqueue. It relays the issued work items to 333 * the appropriate worker_pool through its pool_workqueues. 334 */ 335 struct workqueue_struct { 336 struct list_head pwqs; /* WR: all pwqs of this wq */ 337 struct list_head list; /* PR: list of all workqueues */ 338 339 struct mutex mutex; /* protects this wq */ 340 int work_color; /* WQ: current work color */ 341 int flush_color; /* WQ: current flush color */ 342 atomic_t nr_pwqs_to_flush; /* flush in progress */ 343 struct wq_flusher *first_flusher; /* WQ: first flusher */ 344 struct list_head flusher_queue; /* WQ: flush waiters */ 345 struct list_head flusher_overflow; /* WQ: flush overflow list */ 346 347 struct list_head maydays; /* MD: pwqs requesting rescue */ 348 struct worker *rescuer; /* MD: rescue worker */ 349 350 int nr_drainers; /* WQ: drain in progress */ 351 352 /* See alloc_workqueue() function comment for info on min/max_active */ 353 int max_active; /* WO: max active works */ 354 int min_active; /* WO: min active works */ 355 int saved_max_active; /* WQ: saved max_active */ 356 int saved_min_active; /* WQ: saved min_active */ 357 358 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 359 struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */ 360 361 #ifdef CONFIG_SYSFS 362 struct wq_device *wq_dev; /* I: for sysfs interface */ 363 #endif 364 #ifdef CONFIG_LOCKDEP 365 char *lock_name; 366 struct lock_class_key key; 367 struct lockdep_map lockdep_map; 368 #endif 369 char name[WQ_NAME_LEN]; /* I: workqueue name */ 370 371 /* 372 * Destruction of workqueue_struct is RCU protected to allow walking 373 * the workqueues list without grabbing wq_pool_mutex. 374 * This is used to dump all workqueues from sysrq. 375 */ 376 struct rcu_head rcu; 377 378 /* hot fields used during command issue, aligned to cacheline */ 379 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 380 struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ 381 struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */ 382 }; 383 384 /* 385 * Each pod type describes how CPUs should be grouped for unbound workqueues. 386 * See the comment above workqueue_attrs->affn_scope. 387 */ 388 struct wq_pod_type { 389 int nr_pods; /* number of pods */ 390 cpumask_var_t *pod_cpus; /* pod -> cpus */ 391 int *pod_node; /* pod -> node */ 392 int *cpu_pod; /* cpu -> pod */ 393 }; 394 395 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { 396 [WQ_AFFN_DFL] = "default", 397 [WQ_AFFN_CPU] = "cpu", 398 [WQ_AFFN_SMT] = "smt", 399 [WQ_AFFN_CACHE] = "cache", 400 [WQ_AFFN_NUMA] = "numa", 401 [WQ_AFFN_SYSTEM] = "system", 402 }; 403 404 /* 405 * Per-cpu work items which run for longer than the following threshold are 406 * automatically considered CPU intensive and excluded from concurrency 407 * management to prevent them from noticeably delaying other per-cpu work items. 408 * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. 409 * The actual value is initialized in wq_cpu_intensive_thresh_init(). 410 */ 411 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; 412 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); 413 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT 414 static unsigned int wq_cpu_intensive_warning_thresh = 4; 415 module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh, uint, 0644); 416 #endif 417 418 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 419 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 420 module_param_named(power_efficient, wq_power_efficient, bool, 0444); 421 422 static bool wq_online; /* can kworkers be created yet? */ 423 static bool wq_topo_initialized __read_mostly = false; 424 425 static struct kmem_cache *pwq_cache; 426 427 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; 428 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; 429 430 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ 431 static struct workqueue_attrs *wq_update_pod_attrs_buf; 432 433 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 434 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ 435 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 436 /* wait for manager to go away */ 437 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait); 438 439 static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 440 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 441 442 /* PL&A: allowable cpus for unbound wqs and work items */ 443 static cpumask_var_t wq_unbound_cpumask; 444 445 /* PL: user requested unbound cpumask via sysfs */ 446 static cpumask_var_t wq_requested_unbound_cpumask; 447 448 /* PL: isolated cpumask to be excluded from unbound cpumask */ 449 static cpumask_var_t wq_isolated_cpumask; 450 451 /* for further constrain wq_unbound_cpumask by cmdline parameter*/ 452 static struct cpumask wq_cmdline_cpumask __initdata; 453 454 /* CPU where unbound work was last round robin scheduled from this CPU */ 455 static DEFINE_PER_CPU(int, wq_rr_cpu_last); 456 457 /* 458 * Local execution of unbound work items is no longer guaranteed. The 459 * following always forces round-robin CPU selection on unbound work items 460 * to uncover usages which depend on it. 461 */ 462 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 463 static bool wq_debug_force_rr_cpu = true; 464 #else 465 static bool wq_debug_force_rr_cpu = false; 466 #endif 467 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); 468 469 /* to raise softirq for the BH worker pools on other CPUs */ 470 static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS], 471 bh_pool_irq_works); 472 473 /* the BH worker pools */ 474 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 475 bh_worker_pools); 476 477 /* the per-cpu worker pools */ 478 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 479 cpu_worker_pools); 480 481 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 482 483 /* PL: hash of all unbound pools keyed by pool->attrs */ 484 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 485 486 /* I: attributes used when instantiating standard unbound pools on demand */ 487 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 488 489 /* I: attributes used when instantiating ordered pools on demand */ 490 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 491 492 /* 493 * Used to synchronize multiple cancel_sync attempts on the same work item. See 494 * work_grab_pending() and __cancel_work_sync(). 495 */ 496 static DECLARE_WAIT_QUEUE_HEAD(wq_cancel_waitq); 497 498 /* 499 * I: kthread_worker to release pwq's. pwq release needs to be bounced to a 500 * process context while holding a pool lock. Bounce to a dedicated kthread 501 * worker to avoid A-A deadlocks. 502 */ 503 static struct kthread_worker *pwq_release_worker __ro_after_init; 504 505 struct workqueue_struct *system_wq __ro_after_init; 506 EXPORT_SYMBOL(system_wq); 507 struct workqueue_struct *system_highpri_wq __ro_after_init; 508 EXPORT_SYMBOL_GPL(system_highpri_wq); 509 struct workqueue_struct *system_long_wq __ro_after_init; 510 EXPORT_SYMBOL_GPL(system_long_wq); 511 struct workqueue_struct *system_unbound_wq __ro_after_init; 512 EXPORT_SYMBOL_GPL(system_unbound_wq); 513 struct workqueue_struct *system_freezable_wq __ro_after_init; 514 EXPORT_SYMBOL_GPL(system_freezable_wq); 515 struct workqueue_struct *system_power_efficient_wq __ro_after_init; 516 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 517 struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init; 518 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 519 struct workqueue_struct *system_bh_wq; 520 EXPORT_SYMBOL_GPL(system_bh_wq); 521 struct workqueue_struct *system_bh_highpri_wq; 522 EXPORT_SYMBOL_GPL(system_bh_highpri_wq); 523 524 static int worker_thread(void *__worker); 525 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 526 static void show_pwq(struct pool_workqueue *pwq); 527 static void show_one_worker_pool(struct worker_pool *pool); 528 529 #define CREATE_TRACE_POINTS 530 #include <trace/events/workqueue.h> 531 532 #define assert_rcu_or_pool_mutex() \ 533 RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() && \ 534 !lockdep_is_held(&wq_pool_mutex), \ 535 "RCU or wq_pool_mutex should be held") 536 537 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 538 RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() && \ 539 !lockdep_is_held(&wq->mutex) && \ 540 !lockdep_is_held(&wq_pool_mutex), \ 541 "RCU, wq->mutex or wq_pool_mutex should be held") 542 543 #define for_each_bh_worker_pool(pool, cpu) \ 544 for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \ 545 (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 546 (pool)++) 547 548 #define for_each_cpu_worker_pool(pool, cpu) \ 549 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 550 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 551 (pool)++) 552 553 /** 554 * for_each_pool - iterate through all worker_pools in the system 555 * @pool: iteration cursor 556 * @pi: integer used for iteration 557 * 558 * This must be called either with wq_pool_mutex held or RCU read 559 * locked. If the pool needs to be used beyond the locking in effect, the 560 * caller is responsible for guaranteeing that the pool stays online. 561 * 562 * The if/else clause exists only for the lockdep assertion and can be 563 * ignored. 564 */ 565 #define for_each_pool(pool, pi) \ 566 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 567 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 568 else 569 570 /** 571 * for_each_pool_worker - iterate through all workers of a worker_pool 572 * @worker: iteration cursor 573 * @pool: worker_pool to iterate workers of 574 * 575 * This must be called with wq_pool_attach_mutex. 576 * 577 * The if/else clause exists only for the lockdep assertion and can be 578 * ignored. 579 */ 580 #define for_each_pool_worker(worker, pool) \ 581 list_for_each_entry((worker), &(pool)->workers, node) \ 582 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ 583 else 584 585 /** 586 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 587 * @pwq: iteration cursor 588 * @wq: the target workqueue 589 * 590 * This must be called either with wq->mutex held or RCU read locked. 591 * If the pwq needs to be used beyond the locking in effect, the caller is 592 * responsible for guaranteeing that the pwq stays online. 593 * 594 * The if/else clause exists only for the lockdep assertion and can be 595 * ignored. 596 */ 597 #define for_each_pwq(pwq, wq) \ 598 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ 599 lockdep_is_held(&(wq->mutex))) 600 601 #ifdef CONFIG_DEBUG_OBJECTS_WORK 602 603 static const struct debug_obj_descr work_debug_descr; 604 605 static void *work_debug_hint(void *addr) 606 { 607 return ((struct work_struct *) addr)->func; 608 } 609 610 static bool work_is_static_object(void *addr) 611 { 612 struct work_struct *work = addr; 613 614 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); 615 } 616 617 /* 618 * fixup_init is called when: 619 * - an active object is initialized 620 */ 621 static bool work_fixup_init(void *addr, enum debug_obj_state state) 622 { 623 struct work_struct *work = addr; 624 625 switch (state) { 626 case ODEBUG_STATE_ACTIVE: 627 cancel_work_sync(work); 628 debug_object_init(work, &work_debug_descr); 629 return true; 630 default: 631 return false; 632 } 633 } 634 635 /* 636 * fixup_free is called when: 637 * - an active object is freed 638 */ 639 static bool work_fixup_free(void *addr, enum debug_obj_state state) 640 { 641 struct work_struct *work = addr; 642 643 switch (state) { 644 case ODEBUG_STATE_ACTIVE: 645 cancel_work_sync(work); 646 debug_object_free(work, &work_debug_descr); 647 return true; 648 default: 649 return false; 650 } 651 } 652 653 static const struct debug_obj_descr work_debug_descr = { 654 .name = "work_struct", 655 .debug_hint = work_debug_hint, 656 .is_static_object = work_is_static_object, 657 .fixup_init = work_fixup_init, 658 .fixup_free = work_fixup_free, 659 }; 660 661 static inline void debug_work_activate(struct work_struct *work) 662 { 663 debug_object_activate(work, &work_debug_descr); 664 } 665 666 static inline void debug_work_deactivate(struct work_struct *work) 667 { 668 debug_object_deactivate(work, &work_debug_descr); 669 } 670 671 void __init_work(struct work_struct *work, int onstack) 672 { 673 if (onstack) 674 debug_object_init_on_stack(work, &work_debug_descr); 675 else 676 debug_object_init(work, &work_debug_descr); 677 } 678 EXPORT_SYMBOL_GPL(__init_work); 679 680 void destroy_work_on_stack(struct work_struct *work) 681 { 682 debug_object_free(work, &work_debug_descr); 683 } 684 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 685 686 void destroy_delayed_work_on_stack(struct delayed_work *work) 687 { 688 destroy_timer_on_stack(&work->timer); 689 debug_object_free(&work->work, &work_debug_descr); 690 } 691 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 692 693 #else 694 static inline void debug_work_activate(struct work_struct *work) { } 695 static inline void debug_work_deactivate(struct work_struct *work) { } 696 #endif 697 698 /** 699 * worker_pool_assign_id - allocate ID and assign it to @pool 700 * @pool: the pool pointer of interest 701 * 702 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 703 * successfully, -errno on failure. 704 */ 705 static int worker_pool_assign_id(struct worker_pool *pool) 706 { 707 int ret; 708 709 lockdep_assert_held(&wq_pool_mutex); 710 711 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 712 GFP_KERNEL); 713 if (ret >= 0) { 714 pool->id = ret; 715 return 0; 716 } 717 return ret; 718 } 719 720 static struct pool_workqueue __rcu ** 721 unbound_pwq_slot(struct workqueue_struct *wq, int cpu) 722 { 723 if (cpu >= 0) 724 return per_cpu_ptr(wq->cpu_pwq, cpu); 725 else 726 return &wq->dfl_pwq; 727 } 728 729 /* @cpu < 0 for dfl_pwq */ 730 static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu) 731 { 732 return rcu_dereference_check(*unbound_pwq_slot(wq, cpu), 733 lockdep_is_held(&wq_pool_mutex) || 734 lockdep_is_held(&wq->mutex)); 735 } 736 737 /** 738 * unbound_effective_cpumask - effective cpumask of an unbound workqueue 739 * @wq: workqueue of interest 740 * 741 * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which 742 * is masked with wq_unbound_cpumask to determine the effective cpumask. The 743 * default pwq is always mapped to the pool with the current effective cpumask. 744 */ 745 static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq) 746 { 747 return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask; 748 } 749 750 static unsigned int work_color_to_flags(int color) 751 { 752 return color << WORK_STRUCT_COLOR_SHIFT; 753 } 754 755 static int get_work_color(unsigned long work_data) 756 { 757 return (work_data >> WORK_STRUCT_COLOR_SHIFT) & 758 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 759 } 760 761 static int work_next_color(int color) 762 { 763 return (color + 1) % WORK_NR_COLORS; 764 } 765 766 /* 767 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 768 * contain the pointer to the queued pwq. Once execution starts, the flag 769 * is cleared and the high bits contain OFFQ flags and pool ID. 770 * 771 * set_work_pwq(), set_work_pool_and_clear_pending() and mark_work_canceling() 772 * can be used to set the pwq, pool or clear work->data. These functions should 773 * only be called while the work is owned - ie. while the PENDING bit is set. 774 * 775 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 776 * corresponding to a work. Pool is available once the work has been 777 * queued anywhere after initialization until it is sync canceled. pwq is 778 * available only while the work item is queued. 779 * 780 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 781 * canceled. While being canceled, a work item may have its PENDING set 782 * but stay off timer and worklist for arbitrarily long and nobody should 783 * try to steal the PENDING bit. 784 */ 785 static inline void set_work_data(struct work_struct *work, unsigned long data) 786 { 787 WARN_ON_ONCE(!work_pending(work)); 788 atomic_long_set(&work->data, data | work_static(work)); 789 } 790 791 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 792 unsigned long flags) 793 { 794 set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING | 795 WORK_STRUCT_PWQ | flags); 796 } 797 798 static void set_work_pool_and_keep_pending(struct work_struct *work, 799 int pool_id, unsigned long flags) 800 { 801 set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | 802 WORK_STRUCT_PENDING | flags); 803 } 804 805 static void set_work_pool_and_clear_pending(struct work_struct *work, 806 int pool_id, unsigned long flags) 807 { 808 /* 809 * The following wmb is paired with the implied mb in 810 * test_and_set_bit(PENDING) and ensures all updates to @work made 811 * here are visible to and precede any updates by the next PENDING 812 * owner. 813 */ 814 smp_wmb(); 815 set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | 816 flags); 817 /* 818 * The following mb guarantees that previous clear of a PENDING bit 819 * will not be reordered with any speculative LOADS or STORES from 820 * work->current_func, which is executed afterwards. This possible 821 * reordering can lead to a missed execution on attempt to queue 822 * the same @work. E.g. consider this case: 823 * 824 * CPU#0 CPU#1 825 * ---------------------------- -------------------------------- 826 * 827 * 1 STORE event_indicated 828 * 2 queue_work_on() { 829 * 3 test_and_set_bit(PENDING) 830 * 4 } set_..._and_clear_pending() { 831 * 5 set_work_data() # clear bit 832 * 6 smp_mb() 833 * 7 work->current_func() { 834 * 8 LOAD event_indicated 835 * } 836 * 837 * Without an explicit full barrier speculative LOAD on line 8 can 838 * be executed before CPU#0 does STORE on line 1. If that happens, 839 * CPU#0 observes the PENDING bit is still set and new execution of 840 * a @work is not queued in a hope, that CPU#1 will eventually 841 * finish the queued @work. Meanwhile CPU#1 does not see 842 * event_indicated is set, because speculative LOAD was executed 843 * before actual STORE. 844 */ 845 smp_mb(); 846 } 847 848 static inline struct pool_workqueue *work_struct_pwq(unsigned long data) 849 { 850 return (struct pool_workqueue *)(data & WORK_STRUCT_PWQ_MASK); 851 } 852 853 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 854 { 855 unsigned long data = atomic_long_read(&work->data); 856 857 if (data & WORK_STRUCT_PWQ) 858 return work_struct_pwq(data); 859 else 860 return NULL; 861 } 862 863 /** 864 * get_work_pool - return the worker_pool a given work was associated with 865 * @work: the work item of interest 866 * 867 * Pools are created and destroyed under wq_pool_mutex, and allows read 868 * access under RCU read lock. As such, this function should be 869 * called under wq_pool_mutex or inside of a rcu_read_lock() region. 870 * 871 * All fields of the returned pool are accessible as long as the above 872 * mentioned locking is in effect. If the returned pool needs to be used 873 * beyond the critical section, the caller is responsible for ensuring the 874 * returned pool is and stays online. 875 * 876 * Return: The worker_pool @work was last associated with. %NULL if none. 877 */ 878 static struct worker_pool *get_work_pool(struct work_struct *work) 879 { 880 unsigned long data = atomic_long_read(&work->data); 881 int pool_id; 882 883 assert_rcu_or_pool_mutex(); 884 885 if (data & WORK_STRUCT_PWQ) 886 return work_struct_pwq(data)->pool; 887 888 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 889 if (pool_id == WORK_OFFQ_POOL_NONE) 890 return NULL; 891 892 return idr_find(&worker_pool_idr, pool_id); 893 } 894 895 /** 896 * get_work_pool_id - return the worker pool ID a given work is associated with 897 * @work: the work item of interest 898 * 899 * Return: The worker_pool ID @work was last associated with. 900 * %WORK_OFFQ_POOL_NONE if none. 901 */ 902 static int get_work_pool_id(struct work_struct *work) 903 { 904 unsigned long data = atomic_long_read(&work->data); 905 906 if (data & WORK_STRUCT_PWQ) 907 return work_struct_pwq(data)->pool->id; 908 909 return data >> WORK_OFFQ_POOL_SHIFT; 910 } 911 912 static void mark_work_canceling(struct work_struct *work) 913 { 914 unsigned long pool_id = get_work_pool_id(work); 915 916 pool_id <<= WORK_OFFQ_POOL_SHIFT; 917 set_work_data(work, pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING); 918 } 919 920 static bool work_is_canceling(struct work_struct *work) 921 { 922 unsigned long data = atomic_long_read(&work->data); 923 924 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 925 } 926 927 /* 928 * Policy functions. These define the policies on how the global worker 929 * pools are managed. Unless noted otherwise, these functions assume that 930 * they're being called with pool->lock held. 931 */ 932 933 /* 934 * Need to wake up a worker? Called from anything but currently 935 * running workers. 936 * 937 * Note that, because unbound workers never contribute to nr_running, this 938 * function will always return %true for unbound pools as long as the 939 * worklist isn't empty. 940 */ 941 static bool need_more_worker(struct worker_pool *pool) 942 { 943 return !list_empty(&pool->worklist) && !pool->nr_running; 944 } 945 946 /* Can I start working? Called from busy but !running workers. */ 947 static bool may_start_working(struct worker_pool *pool) 948 { 949 return pool->nr_idle; 950 } 951 952 /* Do I need to keep working? Called from currently running workers. */ 953 static bool keep_working(struct worker_pool *pool) 954 { 955 return !list_empty(&pool->worklist) && (pool->nr_running <= 1); 956 } 957 958 /* Do we need a new worker? Called from manager. */ 959 static bool need_to_create_worker(struct worker_pool *pool) 960 { 961 return need_more_worker(pool) && !may_start_working(pool); 962 } 963 964 /* Do we have too many workers and should some go away? */ 965 static bool too_many_workers(struct worker_pool *pool) 966 { 967 bool managing = pool->flags & POOL_MANAGER_ACTIVE; 968 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 969 int nr_busy = pool->nr_workers - nr_idle; 970 971 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 972 } 973 974 /** 975 * worker_set_flags - set worker flags and adjust nr_running accordingly 976 * @worker: self 977 * @flags: flags to set 978 * 979 * Set @flags in @worker->flags and adjust nr_running accordingly. 980 */ 981 static inline void worker_set_flags(struct worker *worker, unsigned int flags) 982 { 983 struct worker_pool *pool = worker->pool; 984 985 lockdep_assert_held(&pool->lock); 986 987 /* If transitioning into NOT_RUNNING, adjust nr_running. */ 988 if ((flags & WORKER_NOT_RUNNING) && 989 !(worker->flags & WORKER_NOT_RUNNING)) { 990 pool->nr_running--; 991 } 992 993 worker->flags |= flags; 994 } 995 996 /** 997 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 998 * @worker: self 999 * @flags: flags to clear 1000 * 1001 * Clear @flags in @worker->flags and adjust nr_running accordingly. 1002 */ 1003 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 1004 { 1005 struct worker_pool *pool = worker->pool; 1006 unsigned int oflags = worker->flags; 1007 1008 lockdep_assert_held(&pool->lock); 1009 1010 worker->flags &= ~flags; 1011 1012 /* 1013 * If transitioning out of NOT_RUNNING, increment nr_running. Note 1014 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 1015 * of multiple flags, not a single flag. 1016 */ 1017 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 1018 if (!(worker->flags & WORKER_NOT_RUNNING)) 1019 pool->nr_running++; 1020 } 1021 1022 /* Return the first idle worker. Called with pool->lock held. */ 1023 static struct worker *first_idle_worker(struct worker_pool *pool) 1024 { 1025 if (unlikely(list_empty(&pool->idle_list))) 1026 return NULL; 1027 1028 return list_first_entry(&pool->idle_list, struct worker, entry); 1029 } 1030 1031 /** 1032 * worker_enter_idle - enter idle state 1033 * @worker: worker which is entering idle state 1034 * 1035 * @worker is entering idle state. Update stats and idle timer if 1036 * necessary. 1037 * 1038 * LOCKING: 1039 * raw_spin_lock_irq(pool->lock). 1040 */ 1041 static void worker_enter_idle(struct worker *worker) 1042 { 1043 struct worker_pool *pool = worker->pool; 1044 1045 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 1046 WARN_ON_ONCE(!list_empty(&worker->entry) && 1047 (worker->hentry.next || worker->hentry.pprev))) 1048 return; 1049 1050 /* can't use worker_set_flags(), also called from create_worker() */ 1051 worker->flags |= WORKER_IDLE; 1052 pool->nr_idle++; 1053 worker->last_active = jiffies; 1054 1055 /* idle_list is LIFO */ 1056 list_add(&worker->entry, &pool->idle_list); 1057 1058 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 1059 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1060 1061 /* Sanity check nr_running. */ 1062 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); 1063 } 1064 1065 /** 1066 * worker_leave_idle - leave idle state 1067 * @worker: worker which is leaving idle state 1068 * 1069 * @worker is leaving idle state. Update stats. 1070 * 1071 * LOCKING: 1072 * raw_spin_lock_irq(pool->lock). 1073 */ 1074 static void worker_leave_idle(struct worker *worker) 1075 { 1076 struct worker_pool *pool = worker->pool; 1077 1078 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 1079 return; 1080 worker_clr_flags(worker, WORKER_IDLE); 1081 pool->nr_idle--; 1082 list_del_init(&worker->entry); 1083 } 1084 1085 /** 1086 * find_worker_executing_work - find worker which is executing a work 1087 * @pool: pool of interest 1088 * @work: work to find worker for 1089 * 1090 * Find a worker which is executing @work on @pool by searching 1091 * @pool->busy_hash which is keyed by the address of @work. For a worker 1092 * to match, its current execution should match the address of @work and 1093 * its work function. This is to avoid unwanted dependency between 1094 * unrelated work executions through a work item being recycled while still 1095 * being executed. 1096 * 1097 * This is a bit tricky. A work item may be freed once its execution 1098 * starts and nothing prevents the freed area from being recycled for 1099 * another work item. If the same work item address ends up being reused 1100 * before the original execution finishes, workqueue will identify the 1101 * recycled work item as currently executing and make it wait until the 1102 * current execution finishes, introducing an unwanted dependency. 1103 * 1104 * This function checks the work item address and work function to avoid 1105 * false positives. Note that this isn't complete as one may construct a 1106 * work function which can introduce dependency onto itself through a 1107 * recycled work item. Well, if somebody wants to shoot oneself in the 1108 * foot that badly, there's only so much we can do, and if such deadlock 1109 * actually occurs, it should be easy to locate the culprit work function. 1110 * 1111 * CONTEXT: 1112 * raw_spin_lock_irq(pool->lock). 1113 * 1114 * Return: 1115 * Pointer to worker which is executing @work if found, %NULL 1116 * otherwise. 1117 */ 1118 static struct worker *find_worker_executing_work(struct worker_pool *pool, 1119 struct work_struct *work) 1120 { 1121 struct worker *worker; 1122 1123 hash_for_each_possible(pool->busy_hash, worker, hentry, 1124 (unsigned long)work) 1125 if (worker->current_work == work && 1126 worker->current_func == work->func) 1127 return worker; 1128 1129 return NULL; 1130 } 1131 1132 /** 1133 * move_linked_works - move linked works to a list 1134 * @work: start of series of works to be scheduled 1135 * @head: target list to append @work to 1136 * @nextp: out parameter for nested worklist walking 1137 * 1138 * Schedule linked works starting from @work to @head. Work series to be 1139 * scheduled starts at @work and includes any consecutive work with 1140 * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on 1141 * @nextp. 1142 * 1143 * CONTEXT: 1144 * raw_spin_lock_irq(pool->lock). 1145 */ 1146 static void move_linked_works(struct work_struct *work, struct list_head *head, 1147 struct work_struct **nextp) 1148 { 1149 struct work_struct *n; 1150 1151 /* 1152 * Linked worklist will always end before the end of the list, 1153 * use NULL for list head. 1154 */ 1155 list_for_each_entry_safe_from(work, n, NULL, entry) { 1156 list_move_tail(&work->entry, head); 1157 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1158 break; 1159 } 1160 1161 /* 1162 * If we're already inside safe list traversal and have moved 1163 * multiple works to the scheduled queue, the next position 1164 * needs to be updated. 1165 */ 1166 if (nextp) 1167 *nextp = n; 1168 } 1169 1170 /** 1171 * assign_work - assign a work item and its linked work items to a worker 1172 * @work: work to assign 1173 * @worker: worker to assign to 1174 * @nextp: out parameter for nested worklist walking 1175 * 1176 * Assign @work and its linked work items to @worker. If @work is already being 1177 * executed by another worker in the same pool, it'll be punted there. 1178 * 1179 * If @nextp is not NULL, it's updated to point to the next work of the last 1180 * scheduled work. This allows assign_work() to be nested inside 1181 * list_for_each_entry_safe(). 1182 * 1183 * Returns %true if @work was successfully assigned to @worker. %false if @work 1184 * was punted to another worker already executing it. 1185 */ 1186 static bool assign_work(struct work_struct *work, struct worker *worker, 1187 struct work_struct **nextp) 1188 { 1189 struct worker_pool *pool = worker->pool; 1190 struct worker *collision; 1191 1192 lockdep_assert_held(&pool->lock); 1193 1194 /* 1195 * A single work shouldn't be executed concurrently by multiple workers. 1196 * __queue_work() ensures that @work doesn't jump to a different pool 1197 * while still running in the previous pool. Here, we should ensure that 1198 * @work is not executed concurrently by multiple workers from the same 1199 * pool. Check whether anyone is already processing the work. If so, 1200 * defer the work to the currently executing one. 1201 */ 1202 collision = find_worker_executing_work(pool, work); 1203 if (unlikely(collision)) { 1204 move_linked_works(work, &collision->scheduled, nextp); 1205 return false; 1206 } 1207 1208 move_linked_works(work, &worker->scheduled, nextp); 1209 return true; 1210 } 1211 1212 static struct irq_work *bh_pool_irq_work(struct worker_pool *pool) 1213 { 1214 int high = pool->attrs->nice == HIGHPRI_NICE_LEVEL ? 1 : 0; 1215 1216 return &per_cpu(bh_pool_irq_works, pool->cpu)[high]; 1217 } 1218 1219 static void kick_bh_pool(struct worker_pool *pool) 1220 { 1221 #ifdef CONFIG_SMP 1222 /* see drain_dead_softirq_workfn() for BH_DRAINING */ 1223 if (unlikely(pool->cpu != smp_processor_id() && 1224 !(pool->flags & POOL_BH_DRAINING))) { 1225 irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); 1226 return; 1227 } 1228 #endif 1229 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) 1230 raise_softirq_irqoff(HI_SOFTIRQ); 1231 else 1232 raise_softirq_irqoff(TASKLET_SOFTIRQ); 1233 } 1234 1235 /** 1236 * kick_pool - wake up an idle worker if necessary 1237 * @pool: pool to kick 1238 * 1239 * @pool may have pending work items. Wake up worker if necessary. Returns 1240 * whether a worker was woken up. 1241 */ 1242 static bool kick_pool(struct worker_pool *pool) 1243 { 1244 struct worker *worker = first_idle_worker(pool); 1245 struct task_struct *p; 1246 1247 lockdep_assert_held(&pool->lock); 1248 1249 if (!need_more_worker(pool) || !worker) 1250 return false; 1251 1252 if (pool->flags & POOL_BH) { 1253 kick_bh_pool(pool); 1254 return true; 1255 } 1256 1257 p = worker->task; 1258 1259 #ifdef CONFIG_SMP 1260 /* 1261 * Idle @worker is about to execute @work and waking up provides an 1262 * opportunity to migrate @worker at a lower cost by setting the task's 1263 * wake_cpu field. Let's see if we want to move @worker to improve 1264 * execution locality. 1265 * 1266 * We're waking the worker that went idle the latest and there's some 1267 * chance that @worker is marked idle but hasn't gone off CPU yet. If 1268 * so, setting the wake_cpu won't do anything. As this is a best-effort 1269 * optimization and the race window is narrow, let's leave as-is for 1270 * now. If this becomes pronounced, we can skip over workers which are 1271 * still on cpu when picking an idle worker. 1272 * 1273 * If @pool has non-strict affinity, @worker might have ended up outside 1274 * its affinity scope. Repatriate. 1275 */ 1276 if (!pool->attrs->affn_strict && 1277 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { 1278 struct work_struct *work = list_first_entry(&pool->worklist, 1279 struct work_struct, entry); 1280 int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask, 1281 cpu_online_mask); 1282 if (wake_cpu < nr_cpu_ids) { 1283 p->wake_cpu = wake_cpu; 1284 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; 1285 } 1286 } 1287 #endif 1288 wake_up_process(p); 1289 return true; 1290 } 1291 1292 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT 1293 1294 /* 1295 * Concurrency-managed per-cpu work items that hog CPU for longer than 1296 * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism, 1297 * which prevents them from stalling other concurrency-managed work items. If a 1298 * work function keeps triggering this mechanism, it's likely that the work item 1299 * should be using an unbound workqueue instead. 1300 * 1301 * wq_cpu_intensive_report() tracks work functions which trigger such conditions 1302 * and report them so that they can be examined and converted to use unbound 1303 * workqueues as appropriate. To avoid flooding the console, each violating work 1304 * function is tracked and reported with exponential backoff. 1305 */ 1306 #define WCI_MAX_ENTS 128 1307 1308 struct wci_ent { 1309 work_func_t func; 1310 atomic64_t cnt; 1311 struct hlist_node hash_node; 1312 }; 1313 1314 static struct wci_ent wci_ents[WCI_MAX_ENTS]; 1315 static int wci_nr_ents; 1316 static DEFINE_RAW_SPINLOCK(wci_lock); 1317 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS)); 1318 1319 static struct wci_ent *wci_find_ent(work_func_t func) 1320 { 1321 struct wci_ent *ent; 1322 1323 hash_for_each_possible_rcu(wci_hash, ent, hash_node, 1324 (unsigned long)func) { 1325 if (ent->func == func) 1326 return ent; 1327 } 1328 return NULL; 1329 } 1330 1331 static void wq_cpu_intensive_report(work_func_t func) 1332 { 1333 struct wci_ent *ent; 1334 1335 restart: 1336 ent = wci_find_ent(func); 1337 if (ent) { 1338 u64 cnt; 1339 1340 /* 1341 * Start reporting from the warning_thresh and back off 1342 * exponentially. 1343 */ 1344 cnt = atomic64_inc_return_relaxed(&ent->cnt); 1345 if (wq_cpu_intensive_warning_thresh && 1346 cnt >= wq_cpu_intensive_warning_thresh && 1347 is_power_of_2(cnt + 1 - wq_cpu_intensive_warning_thresh)) 1348 printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", 1349 ent->func, wq_cpu_intensive_thresh_us, 1350 atomic64_read(&ent->cnt)); 1351 return; 1352 } 1353 1354 /* 1355 * @func is a new violation. Allocate a new entry for it. If wcn_ents[] 1356 * is exhausted, something went really wrong and we probably made enough 1357 * noise already. 1358 */ 1359 if (wci_nr_ents >= WCI_MAX_ENTS) 1360 return; 1361 1362 raw_spin_lock(&wci_lock); 1363 1364 if (wci_nr_ents >= WCI_MAX_ENTS) { 1365 raw_spin_unlock(&wci_lock); 1366 return; 1367 } 1368 1369 if (wci_find_ent(func)) { 1370 raw_spin_unlock(&wci_lock); 1371 goto restart; 1372 } 1373 1374 ent = &wci_ents[wci_nr_ents++]; 1375 ent->func = func; 1376 atomic64_set(&ent->cnt, 0); 1377 hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); 1378 1379 raw_spin_unlock(&wci_lock); 1380 1381 goto restart; 1382 } 1383 1384 #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1385 static void wq_cpu_intensive_report(work_func_t func) {} 1386 #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1387 1388 /** 1389 * wq_worker_running - a worker is running again 1390 * @task: task waking up 1391 * 1392 * This function is called when a worker returns from schedule() 1393 */ 1394 void wq_worker_running(struct task_struct *task) 1395 { 1396 struct worker *worker = kthread_data(task); 1397 1398 if (!READ_ONCE(worker->sleeping)) 1399 return; 1400 1401 /* 1402 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check 1403 * and the nr_running increment below, we may ruin the nr_running reset 1404 * and leave with an unexpected pool->nr_running == 1 on the newly unbound 1405 * pool. Protect against such race. 1406 */ 1407 preempt_disable(); 1408 if (!(worker->flags & WORKER_NOT_RUNNING)) 1409 worker->pool->nr_running++; 1410 preempt_enable(); 1411 1412 /* 1413 * CPU intensive auto-detection cares about how long a work item hogged 1414 * CPU without sleeping. Reset the starting timestamp on wakeup. 1415 */ 1416 worker->current_at = worker->task->se.sum_exec_runtime; 1417 1418 WRITE_ONCE(worker->sleeping, 0); 1419 } 1420 1421 /** 1422 * wq_worker_sleeping - a worker is going to sleep 1423 * @task: task going to sleep 1424 * 1425 * This function is called from schedule() when a busy worker is 1426 * going to sleep. 1427 */ 1428 void wq_worker_sleeping(struct task_struct *task) 1429 { 1430 struct worker *worker = kthread_data(task); 1431 struct worker_pool *pool; 1432 1433 /* 1434 * Rescuers, which may not have all the fields set up like normal 1435 * workers, also reach here, let's not access anything before 1436 * checking NOT_RUNNING. 1437 */ 1438 if (worker->flags & WORKER_NOT_RUNNING) 1439 return; 1440 1441 pool = worker->pool; 1442 1443 /* Return if preempted before wq_worker_running() was reached */ 1444 if (READ_ONCE(worker->sleeping)) 1445 return; 1446 1447 WRITE_ONCE(worker->sleeping, 1); 1448 raw_spin_lock_irq(&pool->lock); 1449 1450 /* 1451 * Recheck in case unbind_workers() preempted us. We don't 1452 * want to decrement nr_running after the worker is unbound 1453 * and nr_running has been reset. 1454 */ 1455 if (worker->flags & WORKER_NOT_RUNNING) { 1456 raw_spin_unlock_irq(&pool->lock); 1457 return; 1458 } 1459 1460 pool->nr_running--; 1461 if (kick_pool(pool)) 1462 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1463 1464 raw_spin_unlock_irq(&pool->lock); 1465 } 1466 1467 /** 1468 * wq_worker_tick - a scheduler tick occurred while a kworker is running 1469 * @task: task currently running 1470 * 1471 * Called from scheduler_tick(). We're in the IRQ context and the current 1472 * worker's fields which follow the 'K' locking rule can be accessed safely. 1473 */ 1474 void wq_worker_tick(struct task_struct *task) 1475 { 1476 struct worker *worker = kthread_data(task); 1477 struct pool_workqueue *pwq = worker->current_pwq; 1478 struct worker_pool *pool = worker->pool; 1479 1480 if (!pwq) 1481 return; 1482 1483 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC; 1484 1485 if (!wq_cpu_intensive_thresh_us) 1486 return; 1487 1488 /* 1489 * If the current worker is concurrency managed and hogged the CPU for 1490 * longer than wq_cpu_intensive_thresh_us, it's automatically marked 1491 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. 1492 * 1493 * Set @worker->sleeping means that @worker is in the process of 1494 * switching out voluntarily and won't be contributing to 1495 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also 1496 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to 1497 * double decrements. The task is releasing the CPU anyway. Let's skip. 1498 * We probably want to make this prettier in the future. 1499 */ 1500 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || 1501 worker->task->se.sum_exec_runtime - worker->current_at < 1502 wq_cpu_intensive_thresh_us * NSEC_PER_USEC) 1503 return; 1504 1505 raw_spin_lock(&pool->lock); 1506 1507 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 1508 wq_cpu_intensive_report(worker->current_func); 1509 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; 1510 1511 if (kick_pool(pool)) 1512 pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1513 1514 raw_spin_unlock(&pool->lock); 1515 } 1516 1517 /** 1518 * wq_worker_last_func - retrieve worker's last work function 1519 * @task: Task to retrieve last work function of. 1520 * 1521 * Determine the last function a worker executed. This is called from 1522 * the scheduler to get a worker's last known identity. 1523 * 1524 * CONTEXT: 1525 * raw_spin_lock_irq(rq->lock) 1526 * 1527 * This function is called during schedule() when a kworker is going 1528 * to sleep. It's used by psi to identify aggregation workers during 1529 * dequeuing, to allow periodic aggregation to shut-off when that 1530 * worker is the last task in the system or cgroup to go to sleep. 1531 * 1532 * As this function doesn't involve any workqueue-related locking, it 1533 * only returns stable values when called from inside the scheduler's 1534 * queuing and dequeuing paths, when @task, which must be a kworker, 1535 * is guaranteed to not be processing any works. 1536 * 1537 * Return: 1538 * The last work function %current executed as a worker, NULL if it 1539 * hasn't executed any work yet. 1540 */ 1541 work_func_t wq_worker_last_func(struct task_struct *task) 1542 { 1543 struct worker *worker = kthread_data(task); 1544 1545 return worker->last_func; 1546 } 1547 1548 /** 1549 * wq_node_nr_active - Determine wq_node_nr_active to use 1550 * @wq: workqueue of interest 1551 * @node: NUMA node, can be %NUMA_NO_NODE 1552 * 1553 * Determine wq_node_nr_active to use for @wq on @node. Returns: 1554 * 1555 * - %NULL for per-cpu workqueues as they don't need to use shared nr_active. 1556 * 1557 * - node_nr_active[nr_node_ids] if @node is %NUMA_NO_NODE. 1558 * 1559 * - Otherwise, node_nr_active[@node]. 1560 */ 1561 static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq, 1562 int node) 1563 { 1564 if (!(wq->flags & WQ_UNBOUND)) 1565 return NULL; 1566 1567 if (node == NUMA_NO_NODE) 1568 node = nr_node_ids; 1569 1570 return wq->node_nr_active[node]; 1571 } 1572 1573 /** 1574 * wq_update_node_max_active - Update per-node max_actives to use 1575 * @wq: workqueue to update 1576 * @off_cpu: CPU that's going down, -1 if a CPU is not going down 1577 * 1578 * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is 1579 * distributed among nodes according to the proportions of numbers of online 1580 * cpus. The result is always between @wq->min_active and max_active. 1581 */ 1582 static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) 1583 { 1584 struct cpumask *effective = unbound_effective_cpumask(wq); 1585 int min_active = READ_ONCE(wq->min_active); 1586 int max_active = READ_ONCE(wq->max_active); 1587 int total_cpus, node; 1588 1589 lockdep_assert_held(&wq->mutex); 1590 1591 if (!wq_topo_initialized) 1592 return; 1593 1594 if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective)) 1595 off_cpu = -1; 1596 1597 total_cpus = cpumask_weight_and(effective, cpu_online_mask); 1598 if (off_cpu >= 0) 1599 total_cpus--; 1600 1601 /* If all CPUs of the wq get offline, use the default values */ 1602 if (unlikely(!total_cpus)) { 1603 for_each_node(node) 1604 wq_node_nr_active(wq, node)->max = min_active; 1605 1606 wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active; 1607 return; 1608 } 1609 1610 for_each_node(node) { 1611 int node_cpus; 1612 1613 node_cpus = cpumask_weight_and(effective, cpumask_of_node(node)); 1614 if (off_cpu >= 0 && cpu_to_node(off_cpu) == node) 1615 node_cpus--; 1616 1617 wq_node_nr_active(wq, node)->max = 1618 clamp(DIV_ROUND_UP(max_active * node_cpus, total_cpus), 1619 min_active, max_active); 1620 } 1621 1622 wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active; 1623 } 1624 1625 /** 1626 * get_pwq - get an extra reference on the specified pool_workqueue 1627 * @pwq: pool_workqueue to get 1628 * 1629 * Obtain an extra reference on @pwq. The caller should guarantee that 1630 * @pwq has positive refcnt and be holding the matching pool->lock. 1631 */ 1632 static void get_pwq(struct pool_workqueue *pwq) 1633 { 1634 lockdep_assert_held(&pwq->pool->lock); 1635 WARN_ON_ONCE(pwq->refcnt <= 0); 1636 pwq->refcnt++; 1637 } 1638 1639 /** 1640 * put_pwq - put a pool_workqueue reference 1641 * @pwq: pool_workqueue to put 1642 * 1643 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1644 * destruction. The caller should be holding the matching pool->lock. 1645 */ 1646 static void put_pwq(struct pool_workqueue *pwq) 1647 { 1648 lockdep_assert_held(&pwq->pool->lock); 1649 if (likely(--pwq->refcnt)) 1650 return; 1651 /* 1652 * @pwq can't be released under pool->lock, bounce to a dedicated 1653 * kthread_worker to avoid A-A deadlocks. 1654 */ 1655 kthread_queue_work(pwq_release_worker, &pwq->release_work); 1656 } 1657 1658 /** 1659 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1660 * @pwq: pool_workqueue to put (can be %NULL) 1661 * 1662 * put_pwq() with locking. This function also allows %NULL @pwq. 1663 */ 1664 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1665 { 1666 if (pwq) { 1667 /* 1668 * As both pwqs and pools are RCU protected, the 1669 * following lock operations are safe. 1670 */ 1671 raw_spin_lock_irq(&pwq->pool->lock); 1672 put_pwq(pwq); 1673 raw_spin_unlock_irq(&pwq->pool->lock); 1674 } 1675 } 1676 1677 static bool pwq_is_empty(struct pool_workqueue *pwq) 1678 { 1679 return !pwq->nr_active && list_empty(&pwq->inactive_works); 1680 } 1681 1682 static void __pwq_activate_work(struct pool_workqueue *pwq, 1683 struct work_struct *work) 1684 { 1685 unsigned long *wdb = work_data_bits(work); 1686 1687 WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE)); 1688 trace_workqueue_activate_work(work); 1689 if (list_empty(&pwq->pool->worklist)) 1690 pwq->pool->watchdog_ts = jiffies; 1691 move_linked_works(work, &pwq->pool->worklist, NULL); 1692 __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb); 1693 } 1694 1695 /** 1696 * pwq_activate_work - Activate a work item if inactive 1697 * @pwq: pool_workqueue @work belongs to 1698 * @work: work item to activate 1699 * 1700 * Returns %true if activated. %false if already active. 1701 */ 1702 static bool pwq_activate_work(struct pool_workqueue *pwq, 1703 struct work_struct *work) 1704 { 1705 struct worker_pool *pool = pwq->pool; 1706 struct wq_node_nr_active *nna; 1707 1708 lockdep_assert_held(&pool->lock); 1709 1710 if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE)) 1711 return false; 1712 1713 nna = wq_node_nr_active(pwq->wq, pool->node); 1714 if (nna) 1715 atomic_inc(&nna->nr); 1716 1717 pwq->nr_active++; 1718 __pwq_activate_work(pwq, work); 1719 return true; 1720 } 1721 1722 static bool tryinc_node_nr_active(struct wq_node_nr_active *nna) 1723 { 1724 int max = READ_ONCE(nna->max); 1725 1726 while (true) { 1727 int old, tmp; 1728 1729 old = atomic_read(&nna->nr); 1730 if (old >= max) 1731 return false; 1732 tmp = atomic_cmpxchg_relaxed(&nna->nr, old, old + 1); 1733 if (tmp == old) 1734 return true; 1735 } 1736 } 1737 1738 /** 1739 * pwq_tryinc_nr_active - Try to increment nr_active for a pwq 1740 * @pwq: pool_workqueue of interest 1741 * @fill: max_active may have increased, try to increase concurrency level 1742 * 1743 * Try to increment nr_active for @pwq. Returns %true if an nr_active count is 1744 * successfully obtained. %false otherwise. 1745 */ 1746 static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill) 1747 { 1748 struct workqueue_struct *wq = pwq->wq; 1749 struct worker_pool *pool = pwq->pool; 1750 struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node); 1751 bool obtained = false; 1752 1753 lockdep_assert_held(&pool->lock); 1754 1755 if (!nna) { 1756 /* BH or per-cpu workqueue, pwq->nr_active is sufficient */ 1757 obtained = pwq->nr_active < READ_ONCE(wq->max_active); 1758 goto out; 1759 } 1760 1761 if (unlikely(pwq->plugged)) 1762 return false; 1763 1764 /* 1765 * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is 1766 * already waiting on $nna, pwq_dec_nr_active() will maintain the 1767 * concurrency level. Don't jump the line. 1768 * 1769 * We need to ignore the pending test after max_active has increased as 1770 * pwq_dec_nr_active() can only maintain the concurrency level but not 1771 * increase it. This is indicated by @fill. 1772 */ 1773 if (!list_empty(&pwq->pending_node) && likely(!fill)) 1774 goto out; 1775 1776 obtained = tryinc_node_nr_active(nna); 1777 if (obtained) 1778 goto out; 1779 1780 /* 1781 * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs 1782 * and try again. The smp_mb() is paired with the implied memory barrier 1783 * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either 1784 * we see the decremented $nna->nr or they see non-empty 1785 * $nna->pending_pwqs. 1786 */ 1787 raw_spin_lock(&nna->lock); 1788 1789 if (list_empty(&pwq->pending_node)) 1790 list_add_tail(&pwq->pending_node, &nna->pending_pwqs); 1791 else if (likely(!fill)) 1792 goto out_unlock; 1793 1794 smp_mb(); 1795 1796 obtained = tryinc_node_nr_active(nna); 1797 1798 /* 1799 * If @fill, @pwq might have already been pending. Being spuriously 1800 * pending in cold paths doesn't affect anything. Let's leave it be. 1801 */ 1802 if (obtained && likely(!fill)) 1803 list_del_init(&pwq->pending_node); 1804 1805 out_unlock: 1806 raw_spin_unlock(&nna->lock); 1807 out: 1808 if (obtained) 1809 pwq->nr_active++; 1810 return obtained; 1811 } 1812 1813 /** 1814 * pwq_activate_first_inactive - Activate the first inactive work item on a pwq 1815 * @pwq: pool_workqueue of interest 1816 * @fill: max_active may have increased, try to increase concurrency level 1817 * 1818 * Activate the first inactive work item of @pwq if available and allowed by 1819 * max_active limit. 1820 * 1821 * Returns %true if an inactive work item has been activated. %false if no 1822 * inactive work item is found or max_active limit is reached. 1823 */ 1824 static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) 1825 { 1826 struct work_struct *work = 1827 list_first_entry_or_null(&pwq->inactive_works, 1828 struct work_struct, entry); 1829 1830 if (work && pwq_tryinc_nr_active(pwq, fill)) { 1831 __pwq_activate_work(pwq, work); 1832 return true; 1833 } else { 1834 return false; 1835 } 1836 } 1837 1838 /** 1839 * unplug_oldest_pwq - unplug the oldest pool_workqueue 1840 * @wq: workqueue_struct where its oldest pwq is to be unplugged 1841 * 1842 * This function should only be called for ordered workqueues where only the 1843 * oldest pwq is unplugged, the others are plugged to suspend execution to 1844 * ensure proper work item ordering:: 1845 * 1846 * dfl_pwq --------------+ [P] - plugged 1847 * | 1848 * v 1849 * pwqs -> A -> B [P] -> C [P] (newest) 1850 * | | | 1851 * 1 3 5 1852 * | | | 1853 * 2 4 6 1854 * 1855 * When the oldest pwq is drained and removed, this function should be called 1856 * to unplug the next oldest one to start its work item execution. Note that 1857 * pwq's are linked into wq->pwqs with the oldest first, so the first one in 1858 * the list is the oldest. 1859 */ 1860 static void unplug_oldest_pwq(struct workqueue_struct *wq) 1861 { 1862 struct pool_workqueue *pwq; 1863 1864 lockdep_assert_held(&wq->mutex); 1865 1866 /* Caller should make sure that pwqs isn't empty before calling */ 1867 pwq = list_first_entry_or_null(&wq->pwqs, struct pool_workqueue, 1868 pwqs_node); 1869 raw_spin_lock_irq(&pwq->pool->lock); 1870 if (pwq->plugged) { 1871 pwq->plugged = false; 1872 if (pwq_activate_first_inactive(pwq, true)) 1873 kick_pool(pwq->pool); 1874 } 1875 raw_spin_unlock_irq(&pwq->pool->lock); 1876 } 1877 1878 /** 1879 * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active 1880 * @nna: wq_node_nr_active to activate a pending pwq for 1881 * @caller_pool: worker_pool the caller is locking 1882 * 1883 * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked. 1884 * @caller_pool may be unlocked and relocked to lock other worker_pools. 1885 */ 1886 static void node_activate_pending_pwq(struct wq_node_nr_active *nna, 1887 struct worker_pool *caller_pool) 1888 { 1889 struct worker_pool *locked_pool = caller_pool; 1890 struct pool_workqueue *pwq; 1891 struct work_struct *work; 1892 1893 lockdep_assert_held(&caller_pool->lock); 1894 1895 raw_spin_lock(&nna->lock); 1896 retry: 1897 pwq = list_first_entry_or_null(&nna->pending_pwqs, 1898 struct pool_workqueue, pending_node); 1899 if (!pwq) 1900 goto out_unlock; 1901 1902 /* 1903 * If @pwq is for a different pool than @locked_pool, we need to lock 1904 * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock 1905 * / lock dance. For that, we also need to release @nna->lock as it's 1906 * nested inside pool locks. 1907 */ 1908 if (pwq->pool != locked_pool) { 1909 raw_spin_unlock(&locked_pool->lock); 1910 locked_pool = pwq->pool; 1911 if (!raw_spin_trylock(&locked_pool->lock)) { 1912 raw_spin_unlock(&nna->lock); 1913 raw_spin_lock(&locked_pool->lock); 1914 raw_spin_lock(&nna->lock); 1915 goto retry; 1916 } 1917 } 1918 1919 /* 1920 * $pwq may not have any inactive work items due to e.g. cancellations. 1921 * Drop it from pending_pwqs and see if there's another one. 1922 */ 1923 work = list_first_entry_or_null(&pwq->inactive_works, 1924 struct work_struct, entry); 1925 if (!work) { 1926 list_del_init(&pwq->pending_node); 1927 goto retry; 1928 } 1929 1930 /* 1931 * Acquire an nr_active count and activate the inactive work item. If 1932 * $pwq still has inactive work items, rotate it to the end of the 1933 * pending_pwqs so that we round-robin through them. This means that 1934 * inactive work items are not activated in queueing order which is fine 1935 * given that there has never been any ordering across different pwqs. 1936 */ 1937 if (likely(tryinc_node_nr_active(nna))) { 1938 pwq->nr_active++; 1939 __pwq_activate_work(pwq, work); 1940 1941 if (list_empty(&pwq->inactive_works)) 1942 list_del_init(&pwq->pending_node); 1943 else 1944 list_move_tail(&pwq->pending_node, &nna->pending_pwqs); 1945 1946 /* if activating a foreign pool, make sure it's running */ 1947 if (pwq->pool != caller_pool) 1948 kick_pool(pwq->pool); 1949 } 1950 1951 out_unlock: 1952 raw_spin_unlock(&nna->lock); 1953 if (locked_pool != caller_pool) { 1954 raw_spin_unlock(&locked_pool->lock); 1955 raw_spin_lock(&caller_pool->lock); 1956 } 1957 } 1958 1959 /** 1960 * pwq_dec_nr_active - Retire an active count 1961 * @pwq: pool_workqueue of interest 1962 * 1963 * Decrement @pwq's nr_active and try to activate the first inactive work item. 1964 * For unbound workqueues, this function may temporarily drop @pwq->pool->lock. 1965 */ 1966 static void pwq_dec_nr_active(struct pool_workqueue *pwq) 1967 { 1968 struct worker_pool *pool = pwq->pool; 1969 struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node); 1970 1971 lockdep_assert_held(&pool->lock); 1972 1973 /* 1974 * @pwq->nr_active should be decremented for both percpu and unbound 1975 * workqueues. 1976 */ 1977 pwq->nr_active--; 1978 1979 /* 1980 * For a percpu workqueue, it's simple. Just need to kick the first 1981 * inactive work item on @pwq itself. 1982 */ 1983 if (!nna) { 1984 pwq_activate_first_inactive(pwq, false); 1985 return; 1986 } 1987 1988 /* 1989 * If @pwq is for an unbound workqueue, it's more complicated because 1990 * multiple pwqs and pools may be sharing the nr_active count. When a 1991 * pwq needs to wait for an nr_active count, it puts itself on 1992 * $nna->pending_pwqs. The following atomic_dec_return()'s implied 1993 * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to 1994 * guarantee that either we see non-empty pending_pwqs or they see 1995 * decremented $nna->nr. 1996 * 1997 * $nna->max may change as CPUs come online/offline and @pwq->wq's 1998 * max_active gets updated. However, it is guaranteed to be equal to or 1999 * larger than @pwq->wq->min_active which is above zero unless freezing. 2000 * This maintains the forward progress guarantee. 2001 */ 2002 if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max)) 2003 return; 2004 2005 if (!list_empty(&nna->pending_pwqs)) 2006 node_activate_pending_pwq(nna, pool); 2007 } 2008 2009 /** 2010 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 2011 * @pwq: pwq of interest 2012 * @work_data: work_data of work which left the queue 2013 * 2014 * A work either has completed or is removed from pending queue, 2015 * decrement nr_in_flight of its pwq and handle workqueue flushing. 2016 * 2017 * NOTE: 2018 * For unbound workqueues, this function may temporarily drop @pwq->pool->lock 2019 * and thus should be called after all other state updates for the in-flight 2020 * work item is complete. 2021 * 2022 * CONTEXT: 2023 * raw_spin_lock_irq(pool->lock). 2024 */ 2025 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) 2026 { 2027 int color = get_work_color(work_data); 2028 2029 if (!(work_data & WORK_STRUCT_INACTIVE)) 2030 pwq_dec_nr_active(pwq); 2031 2032 pwq->nr_in_flight[color]--; 2033 2034 /* is flush in progress and are we at the flushing tip? */ 2035 if (likely(pwq->flush_color != color)) 2036 goto out_put; 2037 2038 /* are there still in-flight works? */ 2039 if (pwq->nr_in_flight[color]) 2040 goto out_put; 2041 2042 /* this pwq is done, clear flush_color */ 2043 pwq->flush_color = -1; 2044 2045 /* 2046 * If this was the last pwq, wake up the first flusher. It 2047 * will handle the rest. 2048 */ 2049 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 2050 complete(&pwq->wq->first_flusher->done); 2051 out_put: 2052 put_pwq(pwq); 2053 } 2054 2055 /** 2056 * try_to_grab_pending - steal work item from worklist and disable irq 2057 * @work: work item to steal 2058 * @cflags: %WORK_CANCEL_ flags 2059 * @irq_flags: place to store irq state 2060 * 2061 * Try to grab PENDING bit of @work. This function can handle @work in any 2062 * stable state - idle, on timer or on worklist. 2063 * 2064 * Return: 2065 * 2066 * ======== ================================================================ 2067 * 1 if @work was pending and we successfully stole PENDING 2068 * 0 if @work was idle and we claimed PENDING 2069 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 2070 * -ENOENT if someone else is canceling @work, this state may persist 2071 * for arbitrarily long 2072 * ======== ================================================================ 2073 * 2074 * Note: 2075 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 2076 * interrupted while holding PENDING and @work off queue, irq must be 2077 * disabled on entry. This, combined with delayed_work->timer being 2078 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 2079 * 2080 * On successful return, >= 0, irq is disabled and the caller is 2081 * responsible for releasing it using local_irq_restore(*@irq_flags). 2082 * 2083 * This function is safe to call from any context including IRQ handler. 2084 */ 2085 static int try_to_grab_pending(struct work_struct *work, u32 cflags, 2086 unsigned long *irq_flags) 2087 { 2088 struct worker_pool *pool; 2089 struct pool_workqueue *pwq; 2090 2091 local_irq_save(*irq_flags); 2092 2093 /* try to steal the timer if it exists */ 2094 if (cflags & WORK_CANCEL_DELAYED) { 2095 struct delayed_work *dwork = to_delayed_work(work); 2096 2097 /* 2098 * dwork->timer is irqsafe. If del_timer() fails, it's 2099 * guaranteed that the timer is not queued anywhere and not 2100 * running on the local CPU. 2101 */ 2102 if (likely(del_timer(&dwork->timer))) 2103 return 1; 2104 } 2105 2106 /* try to claim PENDING the normal way */ 2107 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 2108 return 0; 2109 2110 rcu_read_lock(); 2111 /* 2112 * The queueing is in progress, or it is already queued. Try to 2113 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 2114 */ 2115 pool = get_work_pool(work); 2116 if (!pool) 2117 goto fail; 2118 2119 raw_spin_lock(&pool->lock); 2120 /* 2121 * work->data is guaranteed to point to pwq only while the work 2122 * item is queued on pwq->wq, and both updating work->data to point 2123 * to pwq on queueing and to pool on dequeueing are done under 2124 * pwq->pool->lock. This in turn guarantees that, if work->data 2125 * points to pwq which is associated with a locked pool, the work 2126 * item is currently queued on that pool. 2127 */ 2128 pwq = get_work_pwq(work); 2129 if (pwq && pwq->pool == pool) { 2130 unsigned long work_data; 2131 2132 debug_work_deactivate(work); 2133 2134 /* 2135 * A cancelable inactive work item must be in the 2136 * pwq->inactive_works since a queued barrier can't be 2137 * canceled (see the comments in insert_wq_barrier()). 2138 * 2139 * An inactive work item cannot be grabbed directly because 2140 * it might have linked barrier work items which, if left 2141 * on the inactive_works list, will confuse pwq->nr_active 2142 * management later on and cause stall. Make sure the work 2143 * item is activated before grabbing. 2144 */ 2145 pwq_activate_work(pwq, work); 2146 2147 list_del_init(&work->entry); 2148 2149 /* 2150 * work->data points to pwq iff queued. Let's point to pool. As 2151 * this destroys work->data needed by the next step, stash it. 2152 */ 2153 work_data = *work_data_bits(work); 2154 set_work_pool_and_keep_pending(work, pool->id, 0); 2155 2156 /* must be the last step, see the function comment */ 2157 pwq_dec_nr_in_flight(pwq, work_data); 2158 2159 raw_spin_unlock(&pool->lock); 2160 rcu_read_unlock(); 2161 return 1; 2162 } 2163 raw_spin_unlock(&pool->lock); 2164 fail: 2165 rcu_read_unlock(); 2166 local_irq_restore(*irq_flags); 2167 if (work_is_canceling(work)) 2168 return -ENOENT; 2169 cpu_relax(); 2170 return -EAGAIN; 2171 } 2172 2173 struct cwt_wait { 2174 wait_queue_entry_t wait; 2175 struct work_struct *work; 2176 }; 2177 2178 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 2179 { 2180 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 2181 2182 if (cwait->work != key) 2183 return 0; 2184 return autoremove_wake_function(wait, mode, sync, key); 2185 } 2186 2187 /** 2188 * work_grab_pending - steal work item from worklist and disable irq 2189 * @work: work item to steal 2190 * @cflags: %WORK_CANCEL_ flags 2191 * @irq_flags: place to store IRQ state 2192 * 2193 * Grab PENDING bit of @work. @work can be in any stable state - idle, on timer 2194 * or on worklist. 2195 * 2196 * Must be called in process context. IRQ is disabled on return with IRQ state 2197 * stored in *@irq_flags. The caller is responsible for re-enabling it using 2198 * local_irq_restore(). 2199 * 2200 * Returns %true if @work was pending. %false if idle. 2201 */ 2202 static bool work_grab_pending(struct work_struct *work, u32 cflags, 2203 unsigned long *irq_flags) 2204 { 2205 struct cwt_wait cwait; 2206 int ret; 2207 2208 might_sleep(); 2209 repeat: 2210 ret = try_to_grab_pending(work, cflags, irq_flags); 2211 if (likely(ret >= 0)) 2212 return ret; 2213 if (ret != -ENOENT) 2214 goto repeat; 2215 2216 /* 2217 * Someone is already canceling. Wait for it to finish. flush_work() 2218 * doesn't work for PREEMPT_NONE because we may get woken up between 2219 * @work's completion and the other canceling task resuming and clearing 2220 * CANCELING - flush_work() will return false immediately as @work is no 2221 * longer busy, try_to_grab_pending() will return -ENOENT as @work is 2222 * still being canceled and the other canceling task won't be able to 2223 * clear CANCELING as we're hogging the CPU. 2224 * 2225 * Let's wait for completion using a waitqueue. As this may lead to the 2226 * thundering herd problem, use a custom wake function which matches 2227 * @work along with exclusive wait and wakeup. 2228 */ 2229 init_wait(&cwait.wait); 2230 cwait.wait.func = cwt_wakefn; 2231 cwait.work = work; 2232 2233 prepare_to_wait_exclusive(&wq_cancel_waitq, &cwait.wait, 2234 TASK_UNINTERRUPTIBLE); 2235 if (work_is_canceling(work)) 2236 schedule(); 2237 finish_wait(&wq_cancel_waitq, &cwait.wait); 2238 2239 goto repeat; 2240 } 2241 2242 /** 2243 * insert_work - insert a work into a pool 2244 * @pwq: pwq @work belongs to 2245 * @work: work to insert 2246 * @head: insertion point 2247 * @extra_flags: extra WORK_STRUCT_* flags to set 2248 * 2249 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 2250 * work_struct flags. 2251 * 2252 * CONTEXT: 2253 * raw_spin_lock_irq(pool->lock). 2254 */ 2255 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 2256 struct list_head *head, unsigned int extra_flags) 2257 { 2258 debug_work_activate(work); 2259 2260 /* record the work call stack in order to print it in KASAN reports */ 2261 kasan_record_aux_stack_noalloc(work); 2262 2263 /* we own @work, set data and link */ 2264 set_work_pwq(work, pwq, extra_flags); 2265 list_add_tail(&work->entry, head); 2266 get_pwq(pwq); 2267 } 2268 2269 /* 2270 * Test whether @work is being queued from another work executing on the 2271 * same workqueue. 2272 */ 2273 static bool is_chained_work(struct workqueue_struct *wq) 2274 { 2275 struct worker *worker; 2276 2277 worker = current_wq_worker(); 2278 /* 2279 * Return %true iff I'm a worker executing a work item on @wq. If 2280 * I'm @worker, it's safe to dereference it without locking. 2281 */ 2282 return worker && worker->current_pwq->wq == wq; 2283 } 2284 2285 /* 2286 * When queueing an unbound work item to a wq, prefer local CPU if allowed 2287 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to 2288 * avoid perturbing sensitive tasks. 2289 */ 2290 static int wq_select_unbound_cpu(int cpu) 2291 { 2292 int new_cpu; 2293 2294 if (likely(!wq_debug_force_rr_cpu)) { 2295 if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 2296 return cpu; 2297 } else { 2298 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n"); 2299 } 2300 2301 new_cpu = __this_cpu_read(wq_rr_cpu_last); 2302 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 2303 if (unlikely(new_cpu >= nr_cpu_ids)) { 2304 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 2305 if (unlikely(new_cpu >= nr_cpu_ids)) 2306 return cpu; 2307 } 2308 __this_cpu_write(wq_rr_cpu_last, new_cpu); 2309 2310 return new_cpu; 2311 } 2312 2313 static void __queue_work(int cpu, struct workqueue_struct *wq, 2314 struct work_struct *work) 2315 { 2316 struct pool_workqueue *pwq; 2317 struct worker_pool *last_pool, *pool; 2318 unsigned int work_flags; 2319 unsigned int req_cpu = cpu; 2320 2321 /* 2322 * While a work item is PENDING && off queue, a task trying to 2323 * steal the PENDING will busy-loop waiting for it to either get 2324 * queued or lose PENDING. Grabbing PENDING and queueing should 2325 * happen with IRQ disabled. 2326 */ 2327 lockdep_assert_irqs_disabled(); 2328 2329 /* 2330 * For a draining wq, only works from the same workqueue are 2331 * allowed. The __WQ_DESTROYING helps to spot the issue that 2332 * queues a new work item to a wq after destroy_workqueue(wq). 2333 */ 2334 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && 2335 WARN_ON_ONCE(!is_chained_work(wq)))) 2336 return; 2337 rcu_read_lock(); 2338 retry: 2339 /* pwq which will be used unless @work is executing elsewhere */ 2340 if (req_cpu == WORK_CPU_UNBOUND) { 2341 if (wq->flags & WQ_UNBOUND) 2342 cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 2343 else 2344 cpu = raw_smp_processor_id(); 2345 } 2346 2347 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); 2348 pool = pwq->pool; 2349 2350 /* 2351 * If @work was previously on a different pool, it might still be 2352 * running there, in which case the work needs to be queued on that 2353 * pool to guarantee non-reentrancy. 2354 */ 2355 last_pool = get_work_pool(work); 2356 if (last_pool && last_pool != pool) { 2357 struct worker *worker; 2358 2359 raw_spin_lock(&last_pool->lock); 2360 2361 worker = find_worker_executing_work(last_pool, work); 2362 2363 if (worker && worker->current_pwq->wq == wq) { 2364 pwq = worker->current_pwq; 2365 pool = pwq->pool; 2366 WARN_ON_ONCE(pool != last_pool); 2367 } else { 2368 /* meh... not running there, queue here */ 2369 raw_spin_unlock(&last_pool->lock); 2370 raw_spin_lock(&pool->lock); 2371 } 2372 } else { 2373 raw_spin_lock(&pool->lock); 2374 } 2375 2376 /* 2377 * pwq is determined and locked. For unbound pools, we could have raced 2378 * with pwq release and it could already be dead. If its refcnt is zero, 2379 * repeat pwq selection. Note that unbound pwqs never die without 2380 * another pwq replacing it in cpu_pwq or while work items are executing 2381 * on it, so the retrying is guaranteed to make forward-progress. 2382 */ 2383 if (unlikely(!pwq->refcnt)) { 2384 if (wq->flags & WQ_UNBOUND) { 2385 raw_spin_unlock(&pool->lock); 2386 cpu_relax(); 2387 goto retry; 2388 } 2389 /* oops */ 2390 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 2391 wq->name, cpu); 2392 } 2393 2394 /* pwq determined, queue */ 2395 trace_workqueue_queue_work(req_cpu, pwq, work); 2396 2397 if (WARN_ON(!list_empty(&work->entry))) 2398 goto out; 2399 2400 pwq->nr_in_flight[pwq->work_color]++; 2401 work_flags = work_color_to_flags(pwq->work_color); 2402 2403 /* 2404 * Limit the number of concurrently active work items to max_active. 2405 * @work must also queue behind existing inactive work items to maintain 2406 * ordering when max_active changes. See wq_adjust_max_active(). 2407 */ 2408 if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) { 2409 if (list_empty(&pool->worklist)) 2410 pool->watchdog_ts = jiffies; 2411 2412 trace_workqueue_activate_work(work); 2413 insert_work(pwq, work, &pool->worklist, work_flags); 2414 kick_pool(pool); 2415 } else { 2416 work_flags |= WORK_STRUCT_INACTIVE; 2417 insert_work(pwq, work, &pwq->inactive_works, work_flags); 2418 } 2419 2420 out: 2421 raw_spin_unlock(&pool->lock); 2422 rcu_read_unlock(); 2423 } 2424 2425 /** 2426 * queue_work_on - queue work on specific cpu 2427 * @cpu: CPU number to execute work on 2428 * @wq: workqueue to use 2429 * @work: work to queue 2430 * 2431 * We queue the work to a specific CPU, the caller must ensure it 2432 * can't go away. Callers that fail to ensure that the specified 2433 * CPU cannot go away will execute on a randomly chosen CPU. 2434 * But note well that callers specifying a CPU that never has been 2435 * online will get a splat. 2436 * 2437 * Return: %false if @work was already on a queue, %true otherwise. 2438 */ 2439 bool queue_work_on(int cpu, struct workqueue_struct *wq, 2440 struct work_struct *work) 2441 { 2442 bool ret = false; 2443 unsigned long irq_flags; 2444 2445 local_irq_save(irq_flags); 2446 2447 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2448 __queue_work(cpu, wq, work); 2449 ret = true; 2450 } 2451 2452 local_irq_restore(irq_flags); 2453 return ret; 2454 } 2455 EXPORT_SYMBOL(queue_work_on); 2456 2457 /** 2458 * select_numa_node_cpu - Select a CPU based on NUMA node 2459 * @node: NUMA node ID that we want to select a CPU from 2460 * 2461 * This function will attempt to find a "random" cpu available on a given 2462 * node. If there are no CPUs available on the given node it will return 2463 * WORK_CPU_UNBOUND indicating that we should just schedule to any 2464 * available CPU if we need to schedule this work. 2465 */ 2466 static int select_numa_node_cpu(int node) 2467 { 2468 int cpu; 2469 2470 /* Delay binding to CPU if node is not valid or online */ 2471 if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) 2472 return WORK_CPU_UNBOUND; 2473 2474 /* Use local node/cpu if we are already there */ 2475 cpu = raw_smp_processor_id(); 2476 if (node == cpu_to_node(cpu)) 2477 return cpu; 2478 2479 /* Use "random" otherwise know as "first" online CPU of node */ 2480 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); 2481 2482 /* If CPU is valid return that, otherwise just defer */ 2483 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND; 2484 } 2485 2486 /** 2487 * queue_work_node - queue work on a "random" cpu for a given NUMA node 2488 * @node: NUMA node that we are targeting the work for 2489 * @wq: workqueue to use 2490 * @work: work to queue 2491 * 2492 * We queue the work to a "random" CPU within a given NUMA node. The basic 2493 * idea here is to provide a way to somehow associate work with a given 2494 * NUMA node. 2495 * 2496 * This function will only make a best effort attempt at getting this onto 2497 * the right NUMA node. If no node is requested or the requested node is 2498 * offline then we just fall back to standard queue_work behavior. 2499 * 2500 * Currently the "random" CPU ends up being the first available CPU in the 2501 * intersection of cpu_online_mask and the cpumask of the node, unless we 2502 * are running on the node. In that case we just use the current CPU. 2503 * 2504 * Return: %false if @work was already on a queue, %true otherwise. 2505 */ 2506 bool queue_work_node(int node, struct workqueue_struct *wq, 2507 struct work_struct *work) 2508 { 2509 unsigned long irq_flags; 2510 bool ret = false; 2511 2512 /* 2513 * This current implementation is specific to unbound workqueues. 2514 * Specifically we only return the first available CPU for a given 2515 * node instead of cycling through individual CPUs within the node. 2516 * 2517 * If this is used with a per-cpu workqueue then the logic in 2518 * workqueue_select_cpu_near would need to be updated to allow for 2519 * some round robin type logic. 2520 */ 2521 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); 2522 2523 local_irq_save(irq_flags); 2524 2525 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2526 int cpu = select_numa_node_cpu(node); 2527 2528 __queue_work(cpu, wq, work); 2529 ret = true; 2530 } 2531 2532 local_irq_restore(irq_flags); 2533 return ret; 2534 } 2535 EXPORT_SYMBOL_GPL(queue_work_node); 2536 2537 void delayed_work_timer_fn(struct timer_list *t) 2538 { 2539 struct delayed_work *dwork = from_timer(dwork, t, timer); 2540 2541 /* should have been called from irqsafe timer with irq already off */ 2542 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 2543 } 2544 EXPORT_SYMBOL(delayed_work_timer_fn); 2545 2546 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 2547 struct delayed_work *dwork, unsigned long delay) 2548 { 2549 struct timer_list *timer = &dwork->timer; 2550 struct work_struct *work = &dwork->work; 2551 2552 WARN_ON_ONCE(!wq); 2553 WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 2554 WARN_ON_ONCE(timer_pending(timer)); 2555 WARN_ON_ONCE(!list_empty(&work->entry)); 2556 2557 /* 2558 * If @delay is 0, queue @dwork->work immediately. This is for 2559 * both optimization and correctness. The earliest @timer can 2560 * expire is on the closest next tick and delayed_work users depend 2561 * on that there's no such delay when @delay is 0. 2562 */ 2563 if (!delay) { 2564 __queue_work(cpu, wq, &dwork->work); 2565 return; 2566 } 2567 2568 dwork->wq = wq; 2569 dwork->cpu = cpu; 2570 timer->expires = jiffies + delay; 2571 2572 if (housekeeping_enabled(HK_TYPE_TIMER)) { 2573 /* If the current cpu is a housekeeping cpu, use it. */ 2574 cpu = smp_processor_id(); 2575 if (!housekeeping_test_cpu(cpu, HK_TYPE_TIMER)) 2576 cpu = housekeeping_any_cpu(HK_TYPE_TIMER); 2577 add_timer_on(timer, cpu); 2578 } else { 2579 if (likely(cpu == WORK_CPU_UNBOUND)) 2580 add_timer_global(timer); 2581 else 2582 add_timer_on(timer, cpu); 2583 } 2584 } 2585 2586 /** 2587 * queue_delayed_work_on - queue work on specific CPU after delay 2588 * @cpu: CPU number to execute work on 2589 * @wq: workqueue to use 2590 * @dwork: work to queue 2591 * @delay: number of jiffies to wait before queueing 2592 * 2593 * Return: %false if @work was already on a queue, %true otherwise. If 2594 * @delay is zero and @dwork is idle, it will be scheduled for immediate 2595 * execution. 2596 */ 2597 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 2598 struct delayed_work *dwork, unsigned long delay) 2599 { 2600 struct work_struct *work = &dwork->work; 2601 bool ret = false; 2602 unsigned long irq_flags; 2603 2604 /* read the comment in __queue_work() */ 2605 local_irq_save(irq_flags); 2606 2607 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2608 __queue_delayed_work(cpu, wq, dwork, delay); 2609 ret = true; 2610 } 2611 2612 local_irq_restore(irq_flags); 2613 return ret; 2614 } 2615 EXPORT_SYMBOL(queue_delayed_work_on); 2616 2617 /** 2618 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 2619 * @cpu: CPU number to execute work on 2620 * @wq: workqueue to use 2621 * @dwork: work to queue 2622 * @delay: number of jiffies to wait before queueing 2623 * 2624 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 2625 * modify @dwork's timer so that it expires after @delay. If @delay is 2626 * zero, @work is guaranteed to be scheduled immediately regardless of its 2627 * current state. 2628 * 2629 * Return: %false if @dwork was idle and queued, %true if @dwork was 2630 * pending and its timer was modified. 2631 * 2632 * This function is safe to call from any context including IRQ handler. 2633 * See try_to_grab_pending() for details. 2634 */ 2635 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 2636 struct delayed_work *dwork, unsigned long delay) 2637 { 2638 unsigned long irq_flags; 2639 int ret; 2640 2641 do { 2642 ret = try_to_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, 2643 &irq_flags); 2644 } while (unlikely(ret == -EAGAIN)); 2645 2646 if (likely(ret >= 0)) { 2647 __queue_delayed_work(cpu, wq, dwork, delay); 2648 local_irq_restore(irq_flags); 2649 } 2650 2651 /* -ENOENT from try_to_grab_pending() becomes %true */ 2652 return ret; 2653 } 2654 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 2655 2656 static void rcu_work_rcufn(struct rcu_head *rcu) 2657 { 2658 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); 2659 2660 /* read the comment in __queue_work() */ 2661 local_irq_disable(); 2662 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); 2663 local_irq_enable(); 2664 } 2665 2666 /** 2667 * queue_rcu_work - queue work after a RCU grace period 2668 * @wq: workqueue to use 2669 * @rwork: work to queue 2670 * 2671 * Return: %false if @rwork was already pending, %true otherwise. Note 2672 * that a full RCU grace period is guaranteed only after a %true return. 2673 * While @rwork is guaranteed to be executed after a %false return, the 2674 * execution may happen before a full RCU grace period has passed. 2675 */ 2676 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) 2677 { 2678 struct work_struct *work = &rwork->work; 2679 2680 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2681 rwork->wq = wq; 2682 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn); 2683 return true; 2684 } 2685 2686 return false; 2687 } 2688 EXPORT_SYMBOL(queue_rcu_work); 2689 2690 static struct worker *alloc_worker(int node) 2691 { 2692 struct worker *worker; 2693 2694 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 2695 if (worker) { 2696 INIT_LIST_HEAD(&worker->entry); 2697 INIT_LIST_HEAD(&worker->scheduled); 2698 INIT_LIST_HEAD(&worker->node); 2699 /* on creation a worker is in !idle && prep state */ 2700 worker->flags = WORKER_PREP; 2701 } 2702 return worker; 2703 } 2704 2705 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) 2706 { 2707 if (pool->cpu < 0 && pool->attrs->affn_strict) 2708 return pool->attrs->__pod_cpumask; 2709 else 2710 return pool->attrs->cpumask; 2711 } 2712 2713 /** 2714 * worker_attach_to_pool() - attach a worker to a pool 2715 * @worker: worker to be attached 2716 * @pool: the target pool 2717 * 2718 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 2719 * cpu-binding of @worker are kept coordinated with the pool across 2720 * cpu-[un]hotplugs. 2721 */ 2722 static void worker_attach_to_pool(struct worker *worker, 2723 struct worker_pool *pool) 2724 { 2725 mutex_lock(&wq_pool_attach_mutex); 2726 2727 /* 2728 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains stable 2729 * across this function. See the comments above the flag definition for 2730 * details. BH workers are, while per-CPU, always DISASSOCIATED. 2731 */ 2732 if (pool->flags & POOL_DISASSOCIATED) { 2733 worker->flags |= WORKER_UNBOUND; 2734 } else { 2735 WARN_ON_ONCE(pool->flags & POOL_BH); 2736 kthread_set_per_cpu(worker->task, pool->cpu); 2737 } 2738 2739 if (worker->rescue_wq) 2740 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); 2741 2742 list_add_tail(&worker->node, &pool->workers); 2743 worker->pool = pool; 2744 2745 mutex_unlock(&wq_pool_attach_mutex); 2746 } 2747 2748 /** 2749 * worker_detach_from_pool() - detach a worker from its pool 2750 * @worker: worker which is attached to its pool 2751 * 2752 * Undo the attaching which had been done in worker_attach_to_pool(). The 2753 * caller worker shouldn't access to the pool after detached except it has 2754 * other reference to the pool. 2755 */ 2756 static void worker_detach_from_pool(struct worker *worker) 2757 { 2758 struct worker_pool *pool = worker->pool; 2759 struct completion *detach_completion = NULL; 2760 2761 /* there is one permanent BH worker per CPU which should never detach */ 2762 WARN_ON_ONCE(pool->flags & POOL_BH); 2763 2764 mutex_lock(&wq_pool_attach_mutex); 2765 2766 kthread_set_per_cpu(worker->task, -1); 2767 list_del(&worker->node); 2768 worker->pool = NULL; 2769 2770 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers)) 2771 detach_completion = pool->detach_completion; 2772 mutex_unlock(&wq_pool_attach_mutex); 2773 2774 /* clear leftover flags without pool->lock after it is detached */ 2775 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 2776 2777 if (detach_completion) 2778 complete(detach_completion); 2779 } 2780 2781 /** 2782 * create_worker - create a new workqueue worker 2783 * @pool: pool the new worker will belong to 2784 * 2785 * Create and start a new worker which is attached to @pool. 2786 * 2787 * CONTEXT: 2788 * Might sleep. Does GFP_KERNEL allocations. 2789 * 2790 * Return: 2791 * Pointer to the newly created worker. 2792 */ 2793 static struct worker *create_worker(struct worker_pool *pool) 2794 { 2795 struct worker *worker; 2796 int id; 2797 char id_buf[23]; 2798 2799 /* ID is needed to determine kthread name */ 2800 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); 2801 if (id < 0) { 2802 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n", 2803 ERR_PTR(id)); 2804 return NULL; 2805 } 2806 2807 worker = alloc_worker(pool->node); 2808 if (!worker) { 2809 pr_err_once("workqueue: Failed to allocate a worker\n"); 2810 goto fail; 2811 } 2812 2813 worker->id = id; 2814 2815 if (!(pool->flags & POOL_BH)) { 2816 if (pool->cpu >= 0) 2817 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 2818 pool->attrs->nice < 0 ? "H" : ""); 2819 else 2820 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2821 2822 worker->task = kthread_create_on_node(worker_thread, worker, 2823 pool->node, "kworker/%s", id_buf); 2824 if (IS_ERR(worker->task)) { 2825 if (PTR_ERR(worker->task) == -EINTR) { 2826 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", 2827 id_buf); 2828 } else { 2829 pr_err_once("workqueue: Failed to create a worker thread: %pe", 2830 worker->task); 2831 } 2832 goto fail; 2833 } 2834 2835 set_user_nice(worker->task, pool->attrs->nice); 2836 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); 2837 } 2838 2839 /* successful, attach the worker to the pool */ 2840 worker_attach_to_pool(worker, pool); 2841 2842 /* start the newly created worker */ 2843 raw_spin_lock_irq(&pool->lock); 2844 2845 worker->pool->nr_workers++; 2846 worker_enter_idle(worker); 2847 2848 /* 2849 * @worker is waiting on a completion in kthread() and will trigger hung 2850 * check if not woken up soon. As kick_pool() is noop if @pool is empty, 2851 * wake it up explicitly. 2852 */ 2853 if (worker->task) 2854 wake_up_process(worker->task); 2855 2856 raw_spin_unlock_irq(&pool->lock); 2857 2858 return worker; 2859 2860 fail: 2861 ida_free(&pool->worker_ida, id); 2862 kfree(worker); 2863 return NULL; 2864 } 2865 2866 static void unbind_worker(struct worker *worker) 2867 { 2868 lockdep_assert_held(&wq_pool_attach_mutex); 2869 2870 kthread_set_per_cpu(worker->task, -1); 2871 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) 2872 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); 2873 else 2874 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); 2875 } 2876 2877 static void wake_dying_workers(struct list_head *cull_list) 2878 { 2879 struct worker *worker, *tmp; 2880 2881 list_for_each_entry_safe(worker, tmp, cull_list, entry) { 2882 list_del_init(&worker->entry); 2883 unbind_worker(worker); 2884 /* 2885 * If the worker was somehow already running, then it had to be 2886 * in pool->idle_list when set_worker_dying() happened or we 2887 * wouldn't have gotten here. 2888 * 2889 * Thus, the worker must either have observed the WORKER_DIE 2890 * flag, or have set its state to TASK_IDLE. Either way, the 2891 * below will be observed by the worker and is safe to do 2892 * outside of pool->lock. 2893 */ 2894 wake_up_process(worker->task); 2895 } 2896 } 2897 2898 /** 2899 * set_worker_dying - Tag a worker for destruction 2900 * @worker: worker to be destroyed 2901 * @list: transfer worker away from its pool->idle_list and into list 2902 * 2903 * Tag @worker for destruction and adjust @pool stats accordingly. The worker 2904 * should be idle. 2905 * 2906 * CONTEXT: 2907 * raw_spin_lock_irq(pool->lock). 2908 */ 2909 static void set_worker_dying(struct worker *worker, struct list_head *list) 2910 { 2911 struct worker_pool *pool = worker->pool; 2912 2913 lockdep_assert_held(&pool->lock); 2914 lockdep_assert_held(&wq_pool_attach_mutex); 2915 2916 /* sanity check frenzy */ 2917 if (WARN_ON(worker->current_work) || 2918 WARN_ON(!list_empty(&worker->scheduled)) || 2919 WARN_ON(!(worker->flags & WORKER_IDLE))) 2920 return; 2921 2922 pool->nr_workers--; 2923 pool->nr_idle--; 2924 2925 worker->flags |= WORKER_DIE; 2926 2927 list_move(&worker->entry, list); 2928 list_move(&worker->node, &pool->dying_workers); 2929 } 2930 2931 /** 2932 * idle_worker_timeout - check if some idle workers can now be deleted. 2933 * @t: The pool's idle_timer that just expired 2934 * 2935 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in 2936 * worker_leave_idle(), as a worker flicking between idle and active while its 2937 * pool is at the too_many_workers() tipping point would cause too much timer 2938 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let 2939 * it expire and re-evaluate things from there. 2940 */ 2941 static void idle_worker_timeout(struct timer_list *t) 2942 { 2943 struct worker_pool *pool = from_timer(pool, t, idle_timer); 2944 bool do_cull = false; 2945 2946 if (work_pending(&pool->idle_cull_work)) 2947 return; 2948 2949 raw_spin_lock_irq(&pool->lock); 2950 2951 if (too_many_workers(pool)) { 2952 struct worker *worker; 2953 unsigned long expires; 2954 2955 /* idle_list is kept in LIFO order, check the last one */ 2956 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2957 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2958 do_cull = !time_before(jiffies, expires); 2959 2960 if (!do_cull) 2961 mod_timer(&pool->idle_timer, expires); 2962 } 2963 raw_spin_unlock_irq(&pool->lock); 2964 2965 if (do_cull) 2966 queue_work(system_unbound_wq, &pool->idle_cull_work); 2967 } 2968 2969 /** 2970 * idle_cull_fn - cull workers that have been idle for too long. 2971 * @work: the pool's work for handling these idle workers 2972 * 2973 * This goes through a pool's idle workers and gets rid of those that have been 2974 * idle for at least IDLE_WORKER_TIMEOUT seconds. 2975 * 2976 * We don't want to disturb isolated CPUs because of a pcpu kworker being 2977 * culled, so this also resets worker affinity. This requires a sleepable 2978 * context, hence the split between timer callback and work item. 2979 */ 2980 static void idle_cull_fn(struct work_struct *work) 2981 { 2982 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); 2983 LIST_HEAD(cull_list); 2984 2985 /* 2986 * Grabbing wq_pool_attach_mutex here ensures an already-running worker 2987 * cannot proceed beyong worker_detach_from_pool() in its self-destruct 2988 * path. This is required as a previously-preempted worker could run after 2989 * set_worker_dying() has happened but before wake_dying_workers() did. 2990 */ 2991 mutex_lock(&wq_pool_attach_mutex); 2992 raw_spin_lock_irq(&pool->lock); 2993 2994 while (too_many_workers(pool)) { 2995 struct worker *worker; 2996 unsigned long expires; 2997 2998 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2999 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 3000 3001 if (time_before(jiffies, expires)) { 3002 mod_timer(&pool->idle_timer, expires); 3003 break; 3004 } 3005 3006 set_worker_dying(worker, &cull_list); 3007 } 3008 3009 raw_spin_unlock_irq(&pool->lock); 3010 wake_dying_workers(&cull_list); 3011 mutex_unlock(&wq_pool_attach_mutex); 3012 } 3013 3014 static void send_mayday(struct work_struct *work) 3015 { 3016 struct pool_workqueue *pwq = get_work_pwq(work); 3017 struct workqueue_struct *wq = pwq->wq; 3018 3019 lockdep_assert_held(&wq_mayday_lock); 3020 3021 if (!wq->rescuer) 3022 return; 3023 3024 /* mayday mayday mayday */ 3025 if (list_empty(&pwq->mayday_node)) { 3026 /* 3027 * If @pwq is for an unbound wq, its base ref may be put at 3028 * any time due to an attribute change. Pin @pwq until the 3029 * rescuer is done with it. 3030 */ 3031 get_pwq(pwq); 3032 list_add_tail(&pwq->mayday_node, &wq->maydays); 3033 wake_up_process(wq->rescuer->task); 3034 pwq->stats[PWQ_STAT_MAYDAY]++; 3035 } 3036 } 3037 3038 static void pool_mayday_timeout(struct timer_list *t) 3039 { 3040 struct worker_pool *pool = from_timer(pool, t, mayday_timer); 3041 struct work_struct *work; 3042 3043 raw_spin_lock_irq(&pool->lock); 3044 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ 3045 3046 if (need_to_create_worker(pool)) { 3047 /* 3048 * We've been trying to create a new worker but 3049 * haven't been successful. We might be hitting an 3050 * allocation deadlock. Send distress signals to 3051 * rescuers. 3052 */ 3053 list_for_each_entry(work, &pool->worklist, entry) 3054 send_mayday(work); 3055 } 3056 3057 raw_spin_unlock(&wq_mayday_lock); 3058 raw_spin_unlock_irq(&pool->lock); 3059 3060 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 3061 } 3062 3063 /** 3064 * maybe_create_worker - create a new worker if necessary 3065 * @pool: pool to create a new worker for 3066 * 3067 * Create a new worker for @pool if necessary. @pool is guaranteed to 3068 * have at least one idle worker on return from this function. If 3069 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 3070 * sent to all rescuers with works scheduled on @pool to resolve 3071 * possible allocation deadlock. 3072 * 3073 * On return, need_to_create_worker() is guaranteed to be %false and 3074 * may_start_working() %true. 3075 * 3076 * LOCKING: 3077 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 3078 * multiple times. Does GFP_KERNEL allocations. Called only from 3079 * manager. 3080 */ 3081 static void maybe_create_worker(struct worker_pool *pool) 3082 __releases(&pool->lock) 3083 __acquires(&pool->lock) 3084 { 3085 restart: 3086 raw_spin_unlock_irq(&pool->lock); 3087 3088 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 3089 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 3090 3091 while (true) { 3092 if (create_worker(pool) || !need_to_create_worker(pool)) 3093 break; 3094 3095 schedule_timeout_interruptible(CREATE_COOLDOWN); 3096 3097 if (!need_to_create_worker(pool)) 3098 break; 3099 } 3100 3101 del_timer_sync(&pool->mayday_timer); 3102 raw_spin_lock_irq(&pool->lock); 3103 /* 3104 * This is necessary even after a new worker was just successfully 3105 * created as @pool->lock was dropped and the new worker might have 3106 * already become busy. 3107 */ 3108 if (need_to_create_worker(pool)) 3109 goto restart; 3110 } 3111 3112 /** 3113 * manage_workers - manage worker pool 3114 * @worker: self 3115 * 3116 * Assume the manager role and manage the worker pool @worker belongs 3117 * to. At any given time, there can be only zero or one manager per 3118 * pool. The exclusion is handled automatically by this function. 3119 * 3120 * The caller can safely start processing works on false return. On 3121 * true return, it's guaranteed that need_to_create_worker() is false 3122 * and may_start_working() is true. 3123 * 3124 * CONTEXT: 3125 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 3126 * multiple times. Does GFP_KERNEL allocations. 3127 * 3128 * Return: 3129 * %false if the pool doesn't need management and the caller can safely 3130 * start processing works, %true if management function was performed and 3131 * the conditions that the caller verified before calling the function may 3132 * no longer be true. 3133 */ 3134 static bool manage_workers(struct worker *worker) 3135 { 3136 struct worker_pool *pool = worker->pool; 3137 3138 if (pool->flags & POOL_MANAGER_ACTIVE) 3139 return false; 3140 3141 pool->flags |= POOL_MANAGER_ACTIVE; 3142 pool->manager = worker; 3143 3144 maybe_create_worker(pool); 3145 3146 pool->manager = NULL; 3147 pool->flags &= ~POOL_MANAGER_ACTIVE; 3148 rcuwait_wake_up(&manager_wait); 3149 return true; 3150 } 3151 3152 /** 3153 * process_one_work - process single work 3154 * @worker: self 3155 * @work: work to process 3156 * 3157 * Process @work. This function contains all the logics necessary to 3158 * process a single work including synchronization against and 3159 * interaction with other workers on the same cpu, queueing and 3160 * flushing. As long as context requirement is met, any worker can 3161 * call this function to process a work. 3162 * 3163 * CONTEXT: 3164 * raw_spin_lock_irq(pool->lock) which is released and regrabbed. 3165 */ 3166 static void process_one_work(struct worker *worker, struct work_struct *work) 3167 __releases(&pool->lock) 3168 __acquires(&pool->lock) 3169 { 3170 struct pool_workqueue *pwq = get_work_pwq(work); 3171 struct worker_pool *pool = worker->pool; 3172 unsigned long work_data; 3173 int lockdep_start_depth, rcu_start_depth; 3174 bool bh_draining = pool->flags & POOL_BH_DRAINING; 3175 #ifdef CONFIG_LOCKDEP 3176 /* 3177 * It is permissible to free the struct work_struct from 3178 * inside the function that is called from it, this we need to 3179 * take into account for lockdep too. To avoid bogus "held 3180 * lock freed" warnings as well as problems when looking into 3181 * work->lockdep_map, make a copy and use that here. 3182 */ 3183 struct lockdep_map lockdep_map; 3184 3185 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 3186 #endif 3187 /* ensure we're on the correct CPU */ 3188 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 3189 raw_smp_processor_id() != pool->cpu); 3190 3191 /* claim and dequeue */ 3192 debug_work_deactivate(work); 3193 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 3194 worker->current_work = work; 3195 worker->current_func = work->func; 3196 worker->current_pwq = pwq; 3197 if (worker->task) 3198 worker->current_at = worker->task->se.sum_exec_runtime; 3199 work_data = *work_data_bits(work); 3200 worker->current_color = get_work_color(work_data); 3201 3202 /* 3203 * Record wq name for cmdline and debug reporting, may get 3204 * overridden through set_worker_desc(). 3205 */ 3206 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); 3207 3208 list_del_init(&work->entry); 3209 3210 /* 3211 * CPU intensive works don't participate in concurrency management. 3212 * They're the scheduler's responsibility. This takes @worker out 3213 * of concurrency management and the next code block will chain 3214 * execution of the pending work items. 3215 */ 3216 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) 3217 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 3218 3219 /* 3220 * Kick @pool if necessary. It's always noop for per-cpu worker pools 3221 * since nr_running would always be >= 1 at this point. This is used to 3222 * chain execution of the pending work items for WORKER_NOT_RUNNING 3223 * workers such as the UNBOUND and CPU_INTENSIVE ones. 3224 */ 3225 kick_pool(pool); 3226 3227 /* 3228 * Record the last pool and clear PENDING which should be the last 3229 * update to @work. Also, do this inside @pool->lock so that 3230 * PENDING and queued state changes happen together while IRQ is 3231 * disabled. 3232 */ 3233 set_work_pool_and_clear_pending(work, pool->id, 0); 3234 3235 pwq->stats[PWQ_STAT_STARTED]++; 3236 raw_spin_unlock_irq(&pool->lock); 3237 3238 rcu_start_depth = rcu_preempt_depth(); 3239 lockdep_start_depth = lockdep_depth(current); 3240 /* see drain_dead_softirq_workfn() */ 3241 if (!bh_draining) 3242 lock_map_acquire(&pwq->wq->lockdep_map); 3243 lock_map_acquire(&lockdep_map); 3244 /* 3245 * Strictly speaking we should mark the invariant state without holding 3246 * any locks, that is, before these two lock_map_acquire()'s. 3247 * 3248 * However, that would result in: 3249 * 3250 * A(W1) 3251 * WFC(C) 3252 * A(W1) 3253 * C(C) 3254 * 3255 * Which would create W1->C->W1 dependencies, even though there is no 3256 * actual deadlock possible. There are two solutions, using a 3257 * read-recursive acquire on the work(queue) 'locks', but this will then 3258 * hit the lockdep limitation on recursive locks, or simply discard 3259 * these locks. 3260 * 3261 * AFAICT there is no possible deadlock scenario between the 3262 * flush_work() and complete() primitives (except for single-threaded 3263 * workqueues), so hiding them isn't a problem. 3264 */ 3265 lockdep_invariant_state(true); 3266 trace_workqueue_execute_start(work); 3267 worker->current_func(work); 3268 /* 3269 * While we must be careful to not use "work" after this, the trace 3270 * point will only record its address. 3271 */ 3272 trace_workqueue_execute_end(work, worker->current_func); 3273 pwq->stats[PWQ_STAT_COMPLETED]++; 3274 lock_map_release(&lockdep_map); 3275 if (!bh_draining) 3276 lock_map_release(&pwq->wq->lockdep_map); 3277 3278 if (unlikely((worker->task && in_atomic()) || 3279 lockdep_depth(current) != lockdep_start_depth || 3280 rcu_preempt_depth() != rcu_start_depth)) { 3281 pr_err("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n" 3282 " preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n", 3283 current->comm, task_pid_nr(current), preempt_count(), 3284 lockdep_start_depth, lockdep_depth(current), 3285 rcu_start_depth, rcu_preempt_depth(), 3286 worker->current_func); 3287 debug_show_held_locks(current); 3288 dump_stack(); 3289 } 3290 3291 /* 3292 * The following prevents a kworker from hogging CPU on !PREEMPTION 3293 * kernels, where a requeueing work item waiting for something to 3294 * happen could deadlock with stop_machine as such work item could 3295 * indefinitely requeue itself while all other CPUs are trapped in 3296 * stop_machine. At the same time, report a quiescent RCU state so 3297 * the same condition doesn't freeze RCU. 3298 */ 3299 if (worker->task) 3300 cond_resched(); 3301 3302 raw_spin_lock_irq(&pool->lock); 3303 3304 /* 3305 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked 3306 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than 3307 * wq_cpu_intensive_thresh_us. Clear it. 3308 */ 3309 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 3310 3311 /* tag the worker for identification in schedule() */ 3312 worker->last_func = worker->current_func; 3313 3314 /* we're done with it, release */ 3315 hash_del(&worker->hentry); 3316 worker->current_work = NULL; 3317 worker->current_func = NULL; 3318 worker->current_pwq = NULL; 3319 worker->current_color = INT_MAX; 3320 3321 /* must be the last step, see the function comment */ 3322 pwq_dec_nr_in_flight(pwq, work_data); 3323 } 3324 3325 /** 3326 * process_scheduled_works - process scheduled works 3327 * @worker: self 3328 * 3329 * Process all scheduled works. Please note that the scheduled list 3330 * may change while processing a work, so this function repeatedly 3331 * fetches a work from the top and executes it. 3332 * 3333 * CONTEXT: 3334 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 3335 * multiple times. 3336 */ 3337 static void process_scheduled_works(struct worker *worker) 3338 { 3339 struct work_struct *work; 3340 bool first = true; 3341 3342 while ((work = list_first_entry_or_null(&worker->scheduled, 3343 struct work_struct, entry))) { 3344 if (first) { 3345 worker->pool->watchdog_ts = jiffies; 3346 first = false; 3347 } 3348 process_one_work(worker, work); 3349 } 3350 } 3351 3352 static void set_pf_worker(bool val) 3353 { 3354 mutex_lock(&wq_pool_attach_mutex); 3355 if (val) 3356 current->flags |= PF_WQ_WORKER; 3357 else 3358 current->flags &= ~PF_WQ_WORKER; 3359 mutex_unlock(&wq_pool_attach_mutex); 3360 } 3361 3362 /** 3363 * worker_thread - the worker thread function 3364 * @__worker: self 3365 * 3366 * The worker thread function. All workers belong to a worker_pool - 3367 * either a per-cpu one or dynamic unbound one. These workers process all 3368 * work items regardless of their specific target workqueue. The only 3369 * exception is work items which belong to workqueues with a rescuer which 3370 * will be explained in rescuer_thread(). 3371 * 3372 * Return: 0 3373 */ 3374 static int worker_thread(void *__worker) 3375 { 3376 struct worker *worker = __worker; 3377 struct worker_pool *pool = worker->pool; 3378 3379 /* tell the scheduler that this is a workqueue worker */ 3380 set_pf_worker(true); 3381 woke_up: 3382 raw_spin_lock_irq(&pool->lock); 3383 3384 /* am I supposed to die? */ 3385 if (unlikely(worker->flags & WORKER_DIE)) { 3386 raw_spin_unlock_irq(&pool->lock); 3387 set_pf_worker(false); 3388 3389 set_task_comm(worker->task, "kworker/dying"); 3390 ida_free(&pool->worker_ida, worker->id); 3391 worker_detach_from_pool(worker); 3392 WARN_ON_ONCE(!list_empty(&worker->entry)); 3393 kfree(worker); 3394 return 0; 3395 } 3396 3397 worker_leave_idle(worker); 3398 recheck: 3399 /* no more worker necessary? */ 3400 if (!need_more_worker(pool)) 3401 goto sleep; 3402 3403 /* do we need to manage? */ 3404 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 3405 goto recheck; 3406 3407 /* 3408 * ->scheduled list can only be filled while a worker is 3409 * preparing to process a work or actually processing it. 3410 * Make sure nobody diddled with it while I was sleeping. 3411 */ 3412 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 3413 3414 /* 3415 * Finish PREP stage. We're guaranteed to have at least one idle 3416 * worker or that someone else has already assumed the manager 3417 * role. This is where @worker starts participating in concurrency 3418 * management if applicable and concurrency management is restored 3419 * after being rebound. See rebind_workers() for details. 3420 */ 3421 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 3422 3423 do { 3424 struct work_struct *work = 3425 list_first_entry(&pool->worklist, 3426 struct work_struct, entry); 3427 3428 if (assign_work(work, worker, NULL)) 3429 process_scheduled_works(worker); 3430 } while (keep_working(pool)); 3431 3432 worker_set_flags(worker, WORKER_PREP); 3433 sleep: 3434 /* 3435 * pool->lock is held and there's no work to process and no need to 3436 * manage, sleep. Workers are woken up only while holding 3437 * pool->lock or from local cpu, so setting the current state 3438 * before releasing pool->lock is enough to prevent losing any 3439 * event. 3440 */ 3441 worker_enter_idle(worker); 3442 __set_current_state(TASK_IDLE); 3443 raw_spin_unlock_irq(&pool->lock); 3444 schedule(); 3445 goto woke_up; 3446 } 3447 3448 /** 3449 * rescuer_thread - the rescuer thread function 3450 * @__rescuer: self 3451 * 3452 * Workqueue rescuer thread function. There's one rescuer for each 3453 * workqueue which has WQ_MEM_RECLAIM set. 3454 * 3455 * Regular work processing on a pool may block trying to create a new 3456 * worker which uses GFP_KERNEL allocation which has slight chance of 3457 * developing into deadlock if some works currently on the same queue 3458 * need to be processed to satisfy the GFP_KERNEL allocation. This is 3459 * the problem rescuer solves. 3460 * 3461 * When such condition is possible, the pool summons rescuers of all 3462 * workqueues which have works queued on the pool and let them process 3463 * those works so that forward progress can be guaranteed. 3464 * 3465 * This should happen rarely. 3466 * 3467 * Return: 0 3468 */ 3469 static int rescuer_thread(void *__rescuer) 3470 { 3471 struct worker *rescuer = __rescuer; 3472 struct workqueue_struct *wq = rescuer->rescue_wq; 3473 bool should_stop; 3474 3475 set_user_nice(current, RESCUER_NICE_LEVEL); 3476 3477 /* 3478 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 3479 * doesn't participate in concurrency management. 3480 */ 3481 set_pf_worker(true); 3482 repeat: 3483 set_current_state(TASK_IDLE); 3484 3485 /* 3486 * By the time the rescuer is requested to stop, the workqueue 3487 * shouldn't have any work pending, but @wq->maydays may still have 3488 * pwq(s) queued. This can happen by non-rescuer workers consuming 3489 * all the work items before the rescuer got to them. Go through 3490 * @wq->maydays processing before acting on should_stop so that the 3491 * list is always empty on exit. 3492 */ 3493 should_stop = kthread_should_stop(); 3494 3495 /* see whether any pwq is asking for help */ 3496 raw_spin_lock_irq(&wq_mayday_lock); 3497 3498 while (!list_empty(&wq->maydays)) { 3499 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 3500 struct pool_workqueue, mayday_node); 3501 struct worker_pool *pool = pwq->pool; 3502 struct work_struct *work, *n; 3503 3504 __set_current_state(TASK_RUNNING); 3505 list_del_init(&pwq->mayday_node); 3506 3507 raw_spin_unlock_irq(&wq_mayday_lock); 3508 3509 worker_attach_to_pool(rescuer, pool); 3510 3511 raw_spin_lock_irq(&pool->lock); 3512 3513 /* 3514 * Slurp in all works issued via this workqueue and 3515 * process'em. 3516 */ 3517 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 3518 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 3519 if (get_work_pwq(work) == pwq && 3520 assign_work(work, rescuer, &n)) 3521 pwq->stats[PWQ_STAT_RESCUED]++; 3522 } 3523 3524 if (!list_empty(&rescuer->scheduled)) { 3525 process_scheduled_works(rescuer); 3526 3527 /* 3528 * The above execution of rescued work items could 3529 * have created more to rescue through 3530 * pwq_activate_first_inactive() or chained 3531 * queueing. Let's put @pwq back on mayday list so 3532 * that such back-to-back work items, which may be 3533 * being used to relieve memory pressure, don't 3534 * incur MAYDAY_INTERVAL delay inbetween. 3535 */ 3536 if (pwq->nr_active && need_to_create_worker(pool)) { 3537 raw_spin_lock(&wq_mayday_lock); 3538 /* 3539 * Queue iff we aren't racing destruction 3540 * and somebody else hasn't queued it already. 3541 */ 3542 if (wq->rescuer && list_empty(&pwq->mayday_node)) { 3543 get_pwq(pwq); 3544 list_add_tail(&pwq->mayday_node, &wq->maydays); 3545 } 3546 raw_spin_unlock(&wq_mayday_lock); 3547 } 3548 } 3549 3550 /* 3551 * Put the reference grabbed by send_mayday(). @pool won't 3552 * go away while we're still attached to it. 3553 */ 3554 put_pwq(pwq); 3555 3556 /* 3557 * Leave this pool. Notify regular workers; otherwise, we end up 3558 * with 0 concurrency and stalling the execution. 3559 */ 3560 kick_pool(pool); 3561 3562 raw_spin_unlock_irq(&pool->lock); 3563 3564 worker_detach_from_pool(rescuer); 3565 3566 raw_spin_lock_irq(&wq_mayday_lock); 3567 } 3568 3569 raw_spin_unlock_irq(&wq_mayday_lock); 3570 3571 if (should_stop) { 3572 __set_current_state(TASK_RUNNING); 3573 set_pf_worker(false); 3574 return 0; 3575 } 3576 3577 /* rescuers should never participate in concurrency management */ 3578 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 3579 schedule(); 3580 goto repeat; 3581 } 3582 3583 static void bh_worker(struct worker *worker) 3584 { 3585 struct worker_pool *pool = worker->pool; 3586 int nr_restarts = BH_WORKER_RESTARTS; 3587 unsigned long end = jiffies + BH_WORKER_JIFFIES; 3588 3589 raw_spin_lock_irq(&pool->lock); 3590 worker_leave_idle(worker); 3591 3592 /* 3593 * This function follows the structure of worker_thread(). See there for 3594 * explanations on each step. 3595 */ 3596 if (!need_more_worker(pool)) 3597 goto done; 3598 3599 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 3600 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 3601 3602 do { 3603 struct work_struct *work = 3604 list_first_entry(&pool->worklist, 3605 struct work_struct, entry); 3606 3607 if (assign_work(work, worker, NULL)) 3608 process_scheduled_works(worker); 3609 } while (keep_working(pool) && 3610 --nr_restarts && time_before(jiffies, end)); 3611 3612 worker_set_flags(worker, WORKER_PREP); 3613 done: 3614 worker_enter_idle(worker); 3615 kick_pool(pool); 3616 raw_spin_unlock_irq(&pool->lock); 3617 } 3618 3619 /* 3620 * TODO: Convert all tasklet users to workqueue and use softirq directly. 3621 * 3622 * This is currently called from tasklet[_hi]action() and thus is also called 3623 * whenever there are tasklets to run. Let's do an early exit if there's nothing 3624 * queued. Once conversion from tasklet is complete, the need_more_worker() test 3625 * can be dropped. 3626 * 3627 * After full conversion, we'll add worker->softirq_action, directly use the 3628 * softirq action and obtain the worker pointer from the softirq_action pointer. 3629 */ 3630 void workqueue_softirq_action(bool highpri) 3631 { 3632 struct worker_pool *pool = 3633 &per_cpu(bh_worker_pools, smp_processor_id())[highpri]; 3634 if (need_more_worker(pool)) 3635 bh_worker(list_first_entry(&pool->workers, struct worker, node)); 3636 } 3637 3638 struct wq_drain_dead_softirq_work { 3639 struct work_struct work; 3640 struct worker_pool *pool; 3641 struct completion done; 3642 }; 3643 3644 static void drain_dead_softirq_workfn(struct work_struct *work) 3645 { 3646 struct wq_drain_dead_softirq_work *dead_work = 3647 container_of(work, struct wq_drain_dead_softirq_work, work); 3648 struct worker_pool *pool = dead_work->pool; 3649 bool repeat; 3650 3651 /* 3652 * @pool's CPU is dead and we want to execute its still pending work 3653 * items from this BH work item which is running on a different CPU. As 3654 * its CPU is dead, @pool can't be kicked and, as work execution path 3655 * will be nested, a lockdep annotation needs to be suppressed. Mark 3656 * @pool with %POOL_BH_DRAINING for the special treatments. 3657 */ 3658 raw_spin_lock_irq(&pool->lock); 3659 pool->flags |= POOL_BH_DRAINING; 3660 raw_spin_unlock_irq(&pool->lock); 3661 3662 bh_worker(list_first_entry(&pool->workers, struct worker, node)); 3663 3664 raw_spin_lock_irq(&pool->lock); 3665 pool->flags &= ~POOL_BH_DRAINING; 3666 repeat = need_more_worker(pool); 3667 raw_spin_unlock_irq(&pool->lock); 3668 3669 /* 3670 * bh_worker() might hit consecutive execution limit and bail. If there 3671 * still are pending work items, reschedule self and return so that we 3672 * don't hog this CPU's BH. 3673 */ 3674 if (repeat) { 3675 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) 3676 queue_work(system_bh_highpri_wq, work); 3677 else 3678 queue_work(system_bh_wq, work); 3679 } else { 3680 complete(&dead_work->done); 3681 } 3682 } 3683 3684 /* 3685 * @cpu is dead. Drain the remaining BH work items on the current CPU. It's 3686 * possible to allocate dead_work per CPU and avoid flushing. However, then we 3687 * have to worry about draining overlapping with CPU coming back online or 3688 * nesting (one CPU's dead_work queued on another CPU which is also dead and so 3689 * on). Let's keep it simple and drain them synchronously. These are BH work 3690 * items which shouldn't be requeued on the same pool. Shouldn't take long. 3691 */ 3692 void workqueue_softirq_dead(unsigned int cpu) 3693 { 3694 int i; 3695 3696 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 3697 struct worker_pool *pool = &per_cpu(bh_worker_pools, cpu)[i]; 3698 struct wq_drain_dead_softirq_work dead_work; 3699 3700 if (!need_more_worker(pool)) 3701 continue; 3702 3703 INIT_WORK(&dead_work.work, drain_dead_softirq_workfn); 3704 dead_work.pool = pool; 3705 init_completion(&dead_work.done); 3706 3707 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) 3708 queue_work(system_bh_highpri_wq, &dead_work.work); 3709 else 3710 queue_work(system_bh_wq, &dead_work.work); 3711 3712 wait_for_completion(&dead_work.done); 3713 } 3714 } 3715 3716 /** 3717 * check_flush_dependency - check for flush dependency sanity 3718 * @target_wq: workqueue being flushed 3719 * @target_work: work item being flushed (NULL for workqueue flushes) 3720 * 3721 * %current is trying to flush the whole @target_wq or @target_work on it. 3722 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not 3723 * reclaiming memory or running on a workqueue which doesn't have 3724 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to 3725 * a deadlock. 3726 */ 3727 static void check_flush_dependency(struct workqueue_struct *target_wq, 3728 struct work_struct *target_work) 3729 { 3730 work_func_t target_func = target_work ? target_work->func : NULL; 3731 struct worker *worker; 3732 3733 if (target_wq->flags & WQ_MEM_RECLAIM) 3734 return; 3735 3736 worker = current_wq_worker(); 3737 3738 WARN_ONCE(current->flags & PF_MEMALLOC, 3739 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", 3740 current->pid, current->comm, target_wq->name, target_func); 3741 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 3742 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 3743 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", 3744 worker->current_pwq->wq->name, worker->current_func, 3745 target_wq->name, target_func); 3746 } 3747 3748 struct wq_barrier { 3749 struct work_struct work; 3750 struct completion done; 3751 struct task_struct *task; /* purely informational */ 3752 }; 3753 3754 static void wq_barrier_func(struct work_struct *work) 3755 { 3756 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 3757 complete(&barr->done); 3758 } 3759 3760 /** 3761 * insert_wq_barrier - insert a barrier work 3762 * @pwq: pwq to insert barrier into 3763 * @barr: wq_barrier to insert 3764 * @target: target work to attach @barr to 3765 * @worker: worker currently executing @target, NULL if @target is not executing 3766 * 3767 * @barr is linked to @target such that @barr is completed only after 3768 * @target finishes execution. Please note that the ordering 3769 * guarantee is observed only with respect to @target and on the local 3770 * cpu. 3771 * 3772 * Currently, a queued barrier can't be canceled. This is because 3773 * try_to_grab_pending() can't determine whether the work to be 3774 * grabbed is at the head of the queue and thus can't clear LINKED 3775 * flag of the previous work while there must be a valid next work 3776 * after a work with LINKED flag set. 3777 * 3778 * Note that when @worker is non-NULL, @target may be modified 3779 * underneath us, so we can't reliably determine pwq from @target. 3780 * 3781 * CONTEXT: 3782 * raw_spin_lock_irq(pool->lock). 3783 */ 3784 static void insert_wq_barrier(struct pool_workqueue *pwq, 3785 struct wq_barrier *barr, 3786 struct work_struct *target, struct worker *worker) 3787 { 3788 static __maybe_unused struct lock_class_key bh_key, thr_key; 3789 unsigned int work_flags = 0; 3790 unsigned int work_color; 3791 struct list_head *head; 3792 3793 /* 3794 * debugobject calls are safe here even with pool->lock locked 3795 * as we know for sure that this will not trigger any of the 3796 * checks and call back into the fixup functions where we 3797 * might deadlock. 3798 * 3799 * BH and threaded workqueues need separate lockdep keys to avoid 3800 * spuriously triggering "inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} 3801 * usage". 3802 */ 3803 INIT_WORK_ONSTACK_KEY(&barr->work, wq_barrier_func, 3804 (pwq->wq->flags & WQ_BH) ? &bh_key : &thr_key); 3805 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 3806 3807 init_completion_map(&barr->done, &target->lockdep_map); 3808 3809 barr->task = current; 3810 3811 /* The barrier work item does not participate in nr_active. */ 3812 work_flags |= WORK_STRUCT_INACTIVE; 3813 3814 /* 3815 * If @target is currently being executed, schedule the 3816 * barrier to the worker; otherwise, put it after @target. 3817 */ 3818 if (worker) { 3819 head = worker->scheduled.next; 3820 work_color = worker->current_color; 3821 } else { 3822 unsigned long *bits = work_data_bits(target); 3823 3824 head = target->entry.next; 3825 /* there can already be other linked works, inherit and set */ 3826 work_flags |= *bits & WORK_STRUCT_LINKED; 3827 work_color = get_work_color(*bits); 3828 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3829 } 3830 3831 pwq->nr_in_flight[work_color]++; 3832 work_flags |= work_color_to_flags(work_color); 3833 3834 insert_work(pwq, &barr->work, head, work_flags); 3835 } 3836 3837 /** 3838 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 3839 * @wq: workqueue being flushed 3840 * @flush_color: new flush color, < 0 for no-op 3841 * @work_color: new work color, < 0 for no-op 3842 * 3843 * Prepare pwqs for workqueue flushing. 3844 * 3845 * If @flush_color is non-negative, flush_color on all pwqs should be 3846 * -1. If no pwq has in-flight commands at the specified color, all 3847 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3848 * has in flight commands, its pwq->flush_color is set to 3849 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 3850 * wakeup logic is armed and %true is returned. 3851 * 3852 * The caller should have initialized @wq->first_flusher prior to 3853 * calling this function with non-negative @flush_color. If 3854 * @flush_color is negative, no flush color update is done and %false 3855 * is returned. 3856 * 3857 * If @work_color is non-negative, all pwqs should have the same 3858 * work_color which is previous to @work_color and all will be 3859 * advanced to @work_color. 3860 * 3861 * CONTEXT: 3862 * mutex_lock(wq->mutex). 3863 * 3864 * Return: 3865 * %true if @flush_color >= 0 and there's something to flush. %false 3866 * otherwise. 3867 */ 3868 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 3869 int flush_color, int work_color) 3870 { 3871 bool wait = false; 3872 struct pool_workqueue *pwq; 3873 3874 if (flush_color >= 0) { 3875 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3876 atomic_set(&wq->nr_pwqs_to_flush, 1); 3877 } 3878 3879 for_each_pwq(pwq, wq) { 3880 struct worker_pool *pool = pwq->pool; 3881 3882 raw_spin_lock_irq(&pool->lock); 3883 3884 if (flush_color >= 0) { 3885 WARN_ON_ONCE(pwq->flush_color != -1); 3886 3887 if (pwq->nr_in_flight[flush_color]) { 3888 pwq->flush_color = flush_color; 3889 atomic_inc(&wq->nr_pwqs_to_flush); 3890 wait = true; 3891 } 3892 } 3893 3894 if (work_color >= 0) { 3895 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3896 pwq->work_color = work_color; 3897 } 3898 3899 raw_spin_unlock_irq(&pool->lock); 3900 } 3901 3902 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 3903 complete(&wq->first_flusher->done); 3904 3905 return wait; 3906 } 3907 3908 static void touch_wq_lockdep_map(struct workqueue_struct *wq) 3909 { 3910 #ifdef CONFIG_LOCKDEP 3911 if (wq->flags & WQ_BH) 3912 local_bh_disable(); 3913 3914 lock_map_acquire(&wq->lockdep_map); 3915 lock_map_release(&wq->lockdep_map); 3916 3917 if (wq->flags & WQ_BH) 3918 local_bh_enable(); 3919 #endif 3920 } 3921 3922 static void touch_work_lockdep_map(struct work_struct *work, 3923 struct workqueue_struct *wq) 3924 { 3925 #ifdef CONFIG_LOCKDEP 3926 if (wq->flags & WQ_BH) 3927 local_bh_disable(); 3928 3929 lock_map_acquire(&work->lockdep_map); 3930 lock_map_release(&work->lockdep_map); 3931 3932 if (wq->flags & WQ_BH) 3933 local_bh_enable(); 3934 #endif 3935 } 3936 3937 /** 3938 * __flush_workqueue - ensure that any scheduled work has run to completion. 3939 * @wq: workqueue to flush 3940 * 3941 * This function sleeps until all work items which were queued on entry 3942 * have finished execution, but it is not livelocked by new incoming ones. 3943 */ 3944 void __flush_workqueue(struct workqueue_struct *wq) 3945 { 3946 struct wq_flusher this_flusher = { 3947 .list = LIST_HEAD_INIT(this_flusher.list), 3948 .flush_color = -1, 3949 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), 3950 }; 3951 int next_color; 3952 3953 if (WARN_ON(!wq_online)) 3954 return; 3955 3956 touch_wq_lockdep_map(wq); 3957 3958 mutex_lock(&wq->mutex); 3959 3960 /* 3961 * Start-to-wait phase 3962 */ 3963 next_color = work_next_color(wq->work_color); 3964 3965 if (next_color != wq->flush_color) { 3966 /* 3967 * Color space is not full. The current work_color 3968 * becomes our flush_color and work_color is advanced 3969 * by one. 3970 */ 3971 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 3972 this_flusher.flush_color = wq->work_color; 3973 wq->work_color = next_color; 3974 3975 if (!wq->first_flusher) { 3976 /* no flush in progress, become the first flusher */ 3977 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3978 3979 wq->first_flusher = &this_flusher; 3980 3981 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 3982 wq->work_color)) { 3983 /* nothing to flush, done */ 3984 wq->flush_color = next_color; 3985 wq->first_flusher = NULL; 3986 goto out_unlock; 3987 } 3988 } else { 3989 /* wait in queue */ 3990 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 3991 list_add_tail(&this_flusher.list, &wq->flusher_queue); 3992 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3993 } 3994 } else { 3995 /* 3996 * Oops, color space is full, wait on overflow queue. 3997 * The next flush completion will assign us 3998 * flush_color and transfer to flusher_queue. 3999 */ 4000 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 4001 } 4002 4003 check_flush_dependency(wq, NULL); 4004 4005 mutex_unlock(&wq->mutex); 4006 4007 wait_for_completion(&this_flusher.done); 4008 4009 /* 4010 * Wake-up-and-cascade phase 4011 * 4012 * First flushers are responsible for cascading flushes and 4013 * handling overflow. Non-first flushers can simply return. 4014 */ 4015 if (READ_ONCE(wq->first_flusher) != &this_flusher) 4016 return; 4017 4018 mutex_lock(&wq->mutex); 4019 4020 /* we might have raced, check again with mutex held */ 4021 if (wq->first_flusher != &this_flusher) 4022 goto out_unlock; 4023 4024 WRITE_ONCE(wq->first_flusher, NULL); 4025 4026 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 4027 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 4028 4029 while (true) { 4030 struct wq_flusher *next, *tmp; 4031 4032 /* complete all the flushers sharing the current flush color */ 4033 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 4034 if (next->flush_color != wq->flush_color) 4035 break; 4036 list_del_init(&next->list); 4037 complete(&next->done); 4038 } 4039 4040 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 4041 wq->flush_color != work_next_color(wq->work_color)); 4042 4043 /* this flush_color is finished, advance by one */ 4044 wq->flush_color = work_next_color(wq->flush_color); 4045 4046 /* one color has been freed, handle overflow queue */ 4047 if (!list_empty(&wq->flusher_overflow)) { 4048 /* 4049 * Assign the same color to all overflowed 4050 * flushers, advance work_color and append to 4051 * flusher_queue. This is the start-to-wait 4052 * phase for these overflowed flushers. 4053 */ 4054 list_for_each_entry(tmp, &wq->flusher_overflow, list) 4055 tmp->flush_color = wq->work_color; 4056 4057 wq->work_color = work_next_color(wq->work_color); 4058 4059 list_splice_tail_init(&wq->flusher_overflow, 4060 &wq->flusher_queue); 4061 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 4062 } 4063 4064 if (list_empty(&wq->flusher_queue)) { 4065 WARN_ON_ONCE(wq->flush_color != wq->work_color); 4066 break; 4067 } 4068 4069 /* 4070 * Need to flush more colors. Make the next flusher 4071 * the new first flusher and arm pwqs. 4072 */ 4073 WARN_ON_ONCE(wq->flush_color == wq->work_color); 4074 WARN_ON_ONCE(wq->flush_color != next->flush_color); 4075 4076 list_del_init(&next->list); 4077 wq->first_flusher = next; 4078 4079 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 4080 break; 4081 4082 /* 4083 * Meh... this color is already done, clear first 4084 * flusher and repeat cascading. 4085 */ 4086 wq->first_flusher = NULL; 4087 } 4088 4089 out_unlock: 4090 mutex_unlock(&wq->mutex); 4091 } 4092 EXPORT_SYMBOL(__flush_workqueue); 4093 4094 /** 4095 * drain_workqueue - drain a workqueue 4096 * @wq: workqueue to drain 4097 * 4098 * Wait until the workqueue becomes empty. While draining is in progress, 4099 * only chain queueing is allowed. IOW, only currently pending or running 4100 * work items on @wq can queue further work items on it. @wq is flushed 4101 * repeatedly until it becomes empty. The number of flushing is determined 4102 * by the depth of chaining and should be relatively short. Whine if it 4103 * takes too long. 4104 */ 4105 void drain_workqueue(struct workqueue_struct *wq) 4106 { 4107 unsigned int flush_cnt = 0; 4108 struct pool_workqueue *pwq; 4109 4110 /* 4111 * __queue_work() needs to test whether there are drainers, is much 4112 * hotter than drain_workqueue() and already looks at @wq->flags. 4113 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 4114 */ 4115 mutex_lock(&wq->mutex); 4116 if (!wq->nr_drainers++) 4117 wq->flags |= __WQ_DRAINING; 4118 mutex_unlock(&wq->mutex); 4119 reflush: 4120 __flush_workqueue(wq); 4121 4122 mutex_lock(&wq->mutex); 4123 4124 for_each_pwq(pwq, wq) { 4125 bool drained; 4126 4127 raw_spin_lock_irq(&pwq->pool->lock); 4128 drained = pwq_is_empty(pwq); 4129 raw_spin_unlock_irq(&pwq->pool->lock); 4130 4131 if (drained) 4132 continue; 4133 4134 if (++flush_cnt == 10 || 4135 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 4136 pr_warn("workqueue %s: %s() isn't complete after %u tries\n", 4137 wq->name, __func__, flush_cnt); 4138 4139 mutex_unlock(&wq->mutex); 4140 goto reflush; 4141 } 4142 4143 if (!--wq->nr_drainers) 4144 wq->flags &= ~__WQ_DRAINING; 4145 mutex_unlock(&wq->mutex); 4146 } 4147 EXPORT_SYMBOL_GPL(drain_workqueue); 4148 4149 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 4150 bool from_cancel) 4151 { 4152 struct worker *worker = NULL; 4153 struct worker_pool *pool; 4154 struct pool_workqueue *pwq; 4155 struct workqueue_struct *wq; 4156 4157 might_sleep(); 4158 4159 rcu_read_lock(); 4160 pool = get_work_pool(work); 4161 if (!pool) { 4162 rcu_read_unlock(); 4163 return false; 4164 } 4165 4166 raw_spin_lock_irq(&pool->lock); 4167 /* see the comment in try_to_grab_pending() with the same code */ 4168 pwq = get_work_pwq(work); 4169 if (pwq) { 4170 if (unlikely(pwq->pool != pool)) 4171 goto already_gone; 4172 } else { 4173 worker = find_worker_executing_work(pool, work); 4174 if (!worker) 4175 goto already_gone; 4176 pwq = worker->current_pwq; 4177 } 4178 4179 wq = pwq->wq; 4180 check_flush_dependency(wq, work); 4181 4182 insert_wq_barrier(pwq, barr, work, worker); 4183 raw_spin_unlock_irq(&pool->lock); 4184 4185 touch_work_lockdep_map(work, wq); 4186 4187 /* 4188 * Force a lock recursion deadlock when using flush_work() inside a 4189 * single-threaded or rescuer equipped workqueue. 4190 * 4191 * For single threaded workqueues the deadlock happens when the work 4192 * is after the work issuing the flush_work(). For rescuer equipped 4193 * workqueues the deadlock happens when the rescuer stalls, blocking 4194 * forward progress. 4195 */ 4196 if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer)) 4197 touch_wq_lockdep_map(wq); 4198 4199 rcu_read_unlock(); 4200 return true; 4201 already_gone: 4202 raw_spin_unlock_irq(&pool->lock); 4203 rcu_read_unlock(); 4204 return false; 4205 } 4206 4207 static bool __flush_work(struct work_struct *work, bool from_cancel) 4208 { 4209 struct wq_barrier barr; 4210 4211 if (WARN_ON(!wq_online)) 4212 return false; 4213 4214 if (WARN_ON(!work->func)) 4215 return false; 4216 4217 if (start_flush_work(work, &barr, from_cancel)) { 4218 wait_for_completion(&barr.done); 4219 destroy_work_on_stack(&barr.work); 4220 return true; 4221 } else { 4222 return false; 4223 } 4224 } 4225 4226 /** 4227 * flush_work - wait for a work to finish executing the last queueing instance 4228 * @work: the work to flush 4229 * 4230 * Wait until @work has finished execution. @work is guaranteed to be idle 4231 * on return if it hasn't been requeued since flush started. 4232 * 4233 * Return: 4234 * %true if flush_work() waited for the work to finish execution, 4235 * %false if it was already idle. 4236 */ 4237 bool flush_work(struct work_struct *work) 4238 { 4239 return __flush_work(work, false); 4240 } 4241 EXPORT_SYMBOL_GPL(flush_work); 4242 4243 /** 4244 * flush_delayed_work - wait for a dwork to finish executing the last queueing 4245 * @dwork: the delayed work to flush 4246 * 4247 * Delayed timer is cancelled and the pending work is queued for 4248 * immediate execution. Like flush_work(), this function only 4249 * considers the last queueing instance of @dwork. 4250 * 4251 * Return: 4252 * %true if flush_work() waited for the work to finish execution, 4253 * %false if it was already idle. 4254 */ 4255 bool flush_delayed_work(struct delayed_work *dwork) 4256 { 4257 local_irq_disable(); 4258 if (del_timer_sync(&dwork->timer)) 4259 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 4260 local_irq_enable(); 4261 return flush_work(&dwork->work); 4262 } 4263 EXPORT_SYMBOL(flush_delayed_work); 4264 4265 /** 4266 * flush_rcu_work - wait for a rwork to finish executing the last queueing 4267 * @rwork: the rcu work to flush 4268 * 4269 * Return: 4270 * %true if flush_rcu_work() waited for the work to finish execution, 4271 * %false if it was already idle. 4272 */ 4273 bool flush_rcu_work(struct rcu_work *rwork) 4274 { 4275 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { 4276 rcu_barrier(); 4277 flush_work(&rwork->work); 4278 return true; 4279 } else { 4280 return flush_work(&rwork->work); 4281 } 4282 } 4283 EXPORT_SYMBOL(flush_rcu_work); 4284 4285 static bool __cancel_work(struct work_struct *work, u32 cflags) 4286 { 4287 unsigned long irq_flags; 4288 int ret; 4289 4290 do { 4291 ret = try_to_grab_pending(work, cflags, &irq_flags); 4292 } while (unlikely(ret == -EAGAIN)); 4293 4294 if (unlikely(ret < 0)) 4295 return false; 4296 4297 set_work_pool_and_clear_pending(work, get_work_pool_id(work), 0); 4298 local_irq_restore(irq_flags); 4299 return ret; 4300 } 4301 4302 static bool __cancel_work_sync(struct work_struct *work, u32 cflags) 4303 { 4304 unsigned long irq_flags; 4305 bool ret; 4306 4307 /* claim @work and tell other tasks trying to grab @work to back off */ 4308 ret = work_grab_pending(work, cflags, &irq_flags); 4309 mark_work_canceling(work); 4310 local_irq_restore(irq_flags); 4311 4312 /* 4313 * Skip __flush_work() during early boot when we know that @work isn't 4314 * executing. This allows canceling during early boot. 4315 */ 4316 if (wq_online) 4317 __flush_work(work, true); 4318 4319 /* 4320 * smp_mb() at the end of set_work_pool_and_clear_pending() is paired 4321 * with prepare_to_wait() above so that either waitqueue_active() is 4322 * visible here or !work_is_canceling() is visible there. 4323 */ 4324 set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, 0); 4325 4326 if (waitqueue_active(&wq_cancel_waitq)) 4327 __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); 4328 4329 return ret; 4330 } 4331 4332 /* 4333 * See cancel_delayed_work() 4334 */ 4335 bool cancel_work(struct work_struct *work) 4336 { 4337 return __cancel_work(work, 0); 4338 } 4339 EXPORT_SYMBOL(cancel_work); 4340 4341 /** 4342 * cancel_work_sync - cancel a work and wait for it to finish 4343 * @work: the work to cancel 4344 * 4345 * Cancel @work and wait for its execution to finish. This function 4346 * can be used even if the work re-queues itself or migrates to 4347 * another workqueue. On return from this function, @work is 4348 * guaranteed to be not pending or executing on any CPU. 4349 * 4350 * cancel_work_sync(&delayed_work->work) must not be used for 4351 * delayed_work's. Use cancel_delayed_work_sync() instead. 4352 * 4353 * The caller must ensure that the workqueue on which @work was last 4354 * queued can't be destroyed before this function returns. 4355 * 4356 * Return: 4357 * %true if @work was pending, %false otherwise. 4358 */ 4359 bool cancel_work_sync(struct work_struct *work) 4360 { 4361 return __cancel_work_sync(work, 0); 4362 } 4363 EXPORT_SYMBOL_GPL(cancel_work_sync); 4364 4365 /** 4366 * cancel_delayed_work - cancel a delayed work 4367 * @dwork: delayed_work to cancel 4368 * 4369 * Kill off a pending delayed_work. 4370 * 4371 * Return: %true if @dwork was pending and canceled; %false if it wasn't 4372 * pending. 4373 * 4374 * Note: 4375 * The work callback function may still be running on return, unless 4376 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 4377 * use cancel_delayed_work_sync() to wait on it. 4378 * 4379 * This function is safe to call from any context including IRQ handler. 4380 */ 4381 bool cancel_delayed_work(struct delayed_work *dwork) 4382 { 4383 return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED); 4384 } 4385 EXPORT_SYMBOL(cancel_delayed_work); 4386 4387 /** 4388 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 4389 * @dwork: the delayed work cancel 4390 * 4391 * This is cancel_work_sync() for delayed works. 4392 * 4393 * Return: 4394 * %true if @dwork was pending, %false otherwise. 4395 */ 4396 bool cancel_delayed_work_sync(struct delayed_work *dwork) 4397 { 4398 return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED); 4399 } 4400 EXPORT_SYMBOL(cancel_delayed_work_sync); 4401 4402 /** 4403 * schedule_on_each_cpu - execute a function synchronously on each online CPU 4404 * @func: the function to call 4405 * 4406 * schedule_on_each_cpu() executes @func on each online CPU using the 4407 * system workqueue and blocks until all CPUs have completed. 4408 * schedule_on_each_cpu() is very slow. 4409 * 4410 * Return: 4411 * 0 on success, -errno on failure. 4412 */ 4413 int schedule_on_each_cpu(work_func_t func) 4414 { 4415 int cpu; 4416 struct work_struct __percpu *works; 4417 4418 works = alloc_percpu(struct work_struct); 4419 if (!works) 4420 return -ENOMEM; 4421 4422 cpus_read_lock(); 4423 4424 for_each_online_cpu(cpu) { 4425 struct work_struct *work = per_cpu_ptr(works, cpu); 4426 4427 INIT_WORK(work, func); 4428 schedule_work_on(cpu, work); 4429 } 4430 4431 for_each_online_cpu(cpu) 4432 flush_work(per_cpu_ptr(works, cpu)); 4433 4434 cpus_read_unlock(); 4435 free_percpu(works); 4436 return 0; 4437 } 4438 4439 /** 4440 * execute_in_process_context - reliably execute the routine with user context 4441 * @fn: the function to execute 4442 * @ew: guaranteed storage for the execute work structure (must 4443 * be available when the work executes) 4444 * 4445 * Executes the function immediately if process context is available, 4446 * otherwise schedules the function for delayed execution. 4447 * 4448 * Return: 0 - function was executed 4449 * 1 - function was scheduled for execution 4450 */ 4451 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 4452 { 4453 if (!in_interrupt()) { 4454 fn(&ew->work); 4455 return 0; 4456 } 4457 4458 INIT_WORK(&ew->work, fn); 4459 schedule_work(&ew->work); 4460 4461 return 1; 4462 } 4463 EXPORT_SYMBOL_GPL(execute_in_process_context); 4464 4465 /** 4466 * free_workqueue_attrs - free a workqueue_attrs 4467 * @attrs: workqueue_attrs to free 4468 * 4469 * Undo alloc_workqueue_attrs(). 4470 */ 4471 void free_workqueue_attrs(struct workqueue_attrs *attrs) 4472 { 4473 if (attrs) { 4474 free_cpumask_var(attrs->cpumask); 4475 free_cpumask_var(attrs->__pod_cpumask); 4476 kfree(attrs); 4477 } 4478 } 4479 4480 /** 4481 * alloc_workqueue_attrs - allocate a workqueue_attrs 4482 * 4483 * Allocate a new workqueue_attrs, initialize with default settings and 4484 * return it. 4485 * 4486 * Return: The allocated new workqueue_attr on success. %NULL on failure. 4487 */ 4488 struct workqueue_attrs *alloc_workqueue_attrs(void) 4489 { 4490 struct workqueue_attrs *attrs; 4491 4492 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 4493 if (!attrs) 4494 goto fail; 4495 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) 4496 goto fail; 4497 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) 4498 goto fail; 4499 4500 cpumask_copy(attrs->cpumask, cpu_possible_mask); 4501 attrs->affn_scope = WQ_AFFN_DFL; 4502 return attrs; 4503 fail: 4504 free_workqueue_attrs(attrs); 4505 return NULL; 4506 } 4507 4508 static void copy_workqueue_attrs(struct workqueue_attrs *to, 4509 const struct workqueue_attrs *from) 4510 { 4511 to->nice = from->nice; 4512 cpumask_copy(to->cpumask, from->cpumask); 4513 cpumask_copy(to->__pod_cpumask, from->__pod_cpumask); 4514 to->affn_strict = from->affn_strict; 4515 4516 /* 4517 * Unlike hash and equality test, copying shouldn't ignore wq-only 4518 * fields as copying is used for both pool and wq attrs. Instead, 4519 * get_unbound_pool() explicitly clears the fields. 4520 */ 4521 to->affn_scope = from->affn_scope; 4522 to->ordered = from->ordered; 4523 } 4524 4525 /* 4526 * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the 4527 * comments in 'struct workqueue_attrs' definition. 4528 */ 4529 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) 4530 { 4531 attrs->affn_scope = WQ_AFFN_NR_TYPES; 4532 attrs->ordered = false; 4533 } 4534 4535 /* hash value of the content of @attr */ 4536 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 4537 { 4538 u32 hash = 0; 4539 4540 hash = jhash_1word(attrs->nice, hash); 4541 hash = jhash(cpumask_bits(attrs->cpumask), 4542 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 4543 hash = jhash(cpumask_bits(attrs->__pod_cpumask), 4544 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 4545 hash = jhash_1word(attrs->affn_strict, hash); 4546 return hash; 4547 } 4548 4549 /* content equality test */ 4550 static bool wqattrs_equal(const struct workqueue_attrs *a, 4551 const struct workqueue_attrs *b) 4552 { 4553 if (a->nice != b->nice) 4554 return false; 4555 if (!cpumask_equal(a->cpumask, b->cpumask)) 4556 return false; 4557 if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask)) 4558 return false; 4559 if (a->affn_strict != b->affn_strict) 4560 return false; 4561 return true; 4562 } 4563 4564 /* Update @attrs with actually available CPUs */ 4565 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, 4566 const cpumask_t *unbound_cpumask) 4567 { 4568 /* 4569 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If 4570 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to 4571 * @unbound_cpumask. 4572 */ 4573 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); 4574 if (unlikely(cpumask_empty(attrs->cpumask))) 4575 cpumask_copy(attrs->cpumask, unbound_cpumask); 4576 } 4577 4578 /* find wq_pod_type to use for @attrs */ 4579 static const struct wq_pod_type * 4580 wqattrs_pod_type(const struct workqueue_attrs *attrs) 4581 { 4582 enum wq_affn_scope scope; 4583 struct wq_pod_type *pt; 4584 4585 /* to synchronize access to wq_affn_dfl */ 4586 lockdep_assert_held(&wq_pool_mutex); 4587 4588 if (attrs->affn_scope == WQ_AFFN_DFL) 4589 scope = wq_affn_dfl; 4590 else 4591 scope = attrs->affn_scope; 4592 4593 pt = &wq_pod_types[scope]; 4594 4595 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && 4596 likely(pt->nr_pods)) 4597 return pt; 4598 4599 /* 4600 * Before workqueue_init_topology(), only SYSTEM is available which is 4601 * initialized in workqueue_init_early(). 4602 */ 4603 pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 4604 BUG_ON(!pt->nr_pods); 4605 return pt; 4606 } 4607 4608 /** 4609 * init_worker_pool - initialize a newly zalloc'd worker_pool 4610 * @pool: worker_pool to initialize 4611 * 4612 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 4613 * 4614 * Return: 0 on success, -errno on failure. Even on failure, all fields 4615 * inside @pool proper are initialized and put_unbound_pool() can be called 4616 * on @pool safely to release it. 4617 */ 4618 static int init_worker_pool(struct worker_pool *pool) 4619 { 4620 raw_spin_lock_init(&pool->lock); 4621 pool->id = -1; 4622 pool->cpu = -1; 4623 pool->node = NUMA_NO_NODE; 4624 pool->flags |= POOL_DISASSOCIATED; 4625 pool->watchdog_ts = jiffies; 4626 INIT_LIST_HEAD(&pool->worklist); 4627 INIT_LIST_HEAD(&pool->idle_list); 4628 hash_init(pool->busy_hash); 4629 4630 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 4631 INIT_WORK(&pool->idle_cull_work, idle_cull_fn); 4632 4633 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 4634 4635 INIT_LIST_HEAD(&pool->workers); 4636 INIT_LIST_HEAD(&pool->dying_workers); 4637 4638 ida_init(&pool->worker_ida); 4639 INIT_HLIST_NODE(&pool->hash_node); 4640 pool->refcnt = 1; 4641 4642 /* shouldn't fail above this point */ 4643 pool->attrs = alloc_workqueue_attrs(); 4644 if (!pool->attrs) 4645 return -ENOMEM; 4646 4647 wqattrs_clear_for_pool(pool->attrs); 4648 4649 return 0; 4650 } 4651 4652 #ifdef CONFIG_LOCKDEP 4653 static void wq_init_lockdep(struct workqueue_struct *wq) 4654 { 4655 char *lock_name; 4656 4657 lockdep_register_key(&wq->key); 4658 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); 4659 if (!lock_name) 4660 lock_name = wq->name; 4661 4662 wq->lock_name = lock_name; 4663 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); 4664 } 4665 4666 static void wq_unregister_lockdep(struct workqueue_struct *wq) 4667 { 4668 lockdep_unregister_key(&wq->key); 4669 } 4670 4671 static void wq_free_lockdep(struct workqueue_struct *wq) 4672 { 4673 if (wq->lock_name != wq->name) 4674 kfree(wq->lock_name); 4675 } 4676 #else 4677 static void wq_init_lockdep(struct workqueue_struct *wq) 4678 { 4679 } 4680 4681 static void wq_unregister_lockdep(struct workqueue_struct *wq) 4682 { 4683 } 4684 4685 static void wq_free_lockdep(struct workqueue_struct *wq) 4686 { 4687 } 4688 #endif 4689 4690 static void free_node_nr_active(struct wq_node_nr_active **nna_ar) 4691 { 4692 int node; 4693 4694 for_each_node(node) { 4695 kfree(nna_ar[node]); 4696 nna_ar[node] = NULL; 4697 } 4698 4699 kfree(nna_ar[nr_node_ids]); 4700 nna_ar[nr_node_ids] = NULL; 4701 } 4702 4703 static void init_node_nr_active(struct wq_node_nr_active *nna) 4704 { 4705 nna->max = WQ_DFL_MIN_ACTIVE; 4706 atomic_set(&nna->nr, 0); 4707 raw_spin_lock_init(&nna->lock); 4708 INIT_LIST_HEAD(&nna->pending_pwqs); 4709 } 4710 4711 /* 4712 * Each node's nr_active counter will be accessed mostly from its own node and 4713 * should be allocated in the node. 4714 */ 4715 static int alloc_node_nr_active(struct wq_node_nr_active **nna_ar) 4716 { 4717 struct wq_node_nr_active *nna; 4718 int node; 4719 4720 for_each_node(node) { 4721 nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, node); 4722 if (!nna) 4723 goto err_free; 4724 init_node_nr_active(nna); 4725 nna_ar[node] = nna; 4726 } 4727 4728 /* [nr_node_ids] is used as the fallback */ 4729 nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, NUMA_NO_NODE); 4730 if (!nna) 4731 goto err_free; 4732 init_node_nr_active(nna); 4733 nna_ar[nr_node_ids] = nna; 4734 4735 return 0; 4736 4737 err_free: 4738 free_node_nr_active(nna_ar); 4739 return -ENOMEM; 4740 } 4741 4742 static void rcu_free_wq(struct rcu_head *rcu) 4743 { 4744 struct workqueue_struct *wq = 4745 container_of(rcu, struct workqueue_struct, rcu); 4746 4747 if (wq->flags & WQ_UNBOUND) 4748 free_node_nr_active(wq->node_nr_active); 4749 4750 wq_free_lockdep(wq); 4751 free_percpu(wq->cpu_pwq); 4752 free_workqueue_attrs(wq->unbound_attrs); 4753 kfree(wq); 4754 } 4755 4756 static void rcu_free_pool(struct rcu_head *rcu) 4757 { 4758 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 4759 4760 ida_destroy(&pool->worker_ida); 4761 free_workqueue_attrs(pool->attrs); 4762 kfree(pool); 4763 } 4764 4765 /** 4766 * put_unbound_pool - put a worker_pool 4767 * @pool: worker_pool to put 4768 * 4769 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU 4770 * safe manner. get_unbound_pool() calls this function on its failure path 4771 * and this function should be able to release pools which went through, 4772 * successfully or not, init_worker_pool(). 4773 * 4774 * Should be called with wq_pool_mutex held. 4775 */ 4776 static void put_unbound_pool(struct worker_pool *pool) 4777 { 4778 DECLARE_COMPLETION_ONSTACK(detach_completion); 4779 struct worker *worker; 4780 LIST_HEAD(cull_list); 4781 4782 lockdep_assert_held(&wq_pool_mutex); 4783 4784 if (--pool->refcnt) 4785 return; 4786 4787 /* sanity checks */ 4788 if (WARN_ON(!(pool->cpu < 0)) || 4789 WARN_ON(!list_empty(&pool->worklist))) 4790 return; 4791 4792 /* release id and unhash */ 4793 if (pool->id >= 0) 4794 idr_remove(&worker_pool_idr, pool->id); 4795 hash_del(&pool->hash_node); 4796 4797 /* 4798 * Become the manager and destroy all workers. This prevents 4799 * @pool's workers from blocking on attach_mutex. We're the last 4800 * manager and @pool gets freed with the flag set. 4801 * 4802 * Having a concurrent manager is quite unlikely to happen as we can 4803 * only get here with 4804 * pwq->refcnt == pool->refcnt == 0 4805 * which implies no work queued to the pool, which implies no worker can 4806 * become the manager. However a worker could have taken the role of 4807 * manager before the refcnts dropped to 0, since maybe_create_worker() 4808 * drops pool->lock 4809 */ 4810 while (true) { 4811 rcuwait_wait_event(&manager_wait, 4812 !(pool->flags & POOL_MANAGER_ACTIVE), 4813 TASK_UNINTERRUPTIBLE); 4814 4815 mutex_lock(&wq_pool_attach_mutex); 4816 raw_spin_lock_irq(&pool->lock); 4817 if (!(pool->flags & POOL_MANAGER_ACTIVE)) { 4818 pool->flags |= POOL_MANAGER_ACTIVE; 4819 break; 4820 } 4821 raw_spin_unlock_irq(&pool->lock); 4822 mutex_unlock(&wq_pool_attach_mutex); 4823 } 4824 4825 while ((worker = first_idle_worker(pool))) 4826 set_worker_dying(worker, &cull_list); 4827 WARN_ON(pool->nr_workers || pool->nr_idle); 4828 raw_spin_unlock_irq(&pool->lock); 4829 4830 wake_dying_workers(&cull_list); 4831 4832 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers)) 4833 pool->detach_completion = &detach_completion; 4834 mutex_unlock(&wq_pool_attach_mutex); 4835 4836 if (pool->detach_completion) 4837 wait_for_completion(pool->detach_completion); 4838 4839 /* shut down the timers */ 4840 del_timer_sync(&pool->idle_timer); 4841 cancel_work_sync(&pool->idle_cull_work); 4842 del_timer_sync(&pool->mayday_timer); 4843 4844 /* RCU protected to allow dereferences from get_work_pool() */ 4845 call_rcu(&pool->rcu, rcu_free_pool); 4846 } 4847 4848 /** 4849 * get_unbound_pool - get a worker_pool with the specified attributes 4850 * @attrs: the attributes of the worker_pool to get 4851 * 4852 * Obtain a worker_pool which has the same attributes as @attrs, bump the 4853 * reference count and return it. If there already is a matching 4854 * worker_pool, it will be used; otherwise, this function attempts to 4855 * create a new one. 4856 * 4857 * Should be called with wq_pool_mutex held. 4858 * 4859 * Return: On success, a worker_pool with the same attributes as @attrs. 4860 * On failure, %NULL. 4861 */ 4862 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 4863 { 4864 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA]; 4865 u32 hash = wqattrs_hash(attrs); 4866 struct worker_pool *pool; 4867 int pod, node = NUMA_NO_NODE; 4868 4869 lockdep_assert_held(&wq_pool_mutex); 4870 4871 /* do we already have a matching pool? */ 4872 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 4873 if (wqattrs_equal(pool->attrs, attrs)) { 4874 pool->refcnt++; 4875 return pool; 4876 } 4877 } 4878 4879 /* If __pod_cpumask is contained inside a NUMA pod, that's our node */ 4880 for (pod = 0; pod < pt->nr_pods; pod++) { 4881 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { 4882 node = pt->pod_node[pod]; 4883 break; 4884 } 4885 } 4886 4887 /* nope, create a new one */ 4888 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); 4889 if (!pool || init_worker_pool(pool) < 0) 4890 goto fail; 4891 4892 pool->node = node; 4893 copy_workqueue_attrs(pool->attrs, attrs); 4894 wqattrs_clear_for_pool(pool->attrs); 4895 4896 if (worker_pool_assign_id(pool) < 0) 4897 goto fail; 4898 4899 /* create and start the initial worker */ 4900 if (wq_online && !create_worker(pool)) 4901 goto fail; 4902 4903 /* install */ 4904 hash_add(unbound_pool_hash, &pool->hash_node, hash); 4905 4906 return pool; 4907 fail: 4908 if (pool) 4909 put_unbound_pool(pool); 4910 return NULL; 4911 } 4912 4913 static void rcu_free_pwq(struct rcu_head *rcu) 4914 { 4915 kmem_cache_free(pwq_cache, 4916 container_of(rcu, struct pool_workqueue, rcu)); 4917 } 4918 4919 /* 4920 * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero 4921 * refcnt and needs to be destroyed. 4922 */ 4923 static void pwq_release_workfn(struct kthread_work *work) 4924 { 4925 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 4926 release_work); 4927 struct workqueue_struct *wq = pwq->wq; 4928 struct worker_pool *pool = pwq->pool; 4929 bool is_last = false; 4930 4931 /* 4932 * When @pwq is not linked, it doesn't hold any reference to the 4933 * @wq, and @wq is invalid to access. 4934 */ 4935 if (!list_empty(&pwq->pwqs_node)) { 4936 mutex_lock(&wq->mutex); 4937 list_del_rcu(&pwq->pwqs_node); 4938 is_last = list_empty(&wq->pwqs); 4939 4940 /* 4941 * For ordered workqueue with a plugged dfl_pwq, restart it now. 4942 */ 4943 if (!is_last && (wq->flags & __WQ_ORDERED)) 4944 unplug_oldest_pwq(wq); 4945 4946 mutex_unlock(&wq->mutex); 4947 } 4948 4949 if (wq->flags & WQ_UNBOUND) { 4950 mutex_lock(&wq_pool_mutex); 4951 put_unbound_pool(pool); 4952 mutex_unlock(&wq_pool_mutex); 4953 } 4954 4955 if (!list_empty(&pwq->pending_node)) { 4956 struct wq_node_nr_active *nna = 4957 wq_node_nr_active(pwq->wq, pwq->pool->node); 4958 4959 raw_spin_lock_irq(&nna->lock); 4960 list_del_init(&pwq->pending_node); 4961 raw_spin_unlock_irq(&nna->lock); 4962 } 4963 4964 call_rcu(&pwq->rcu, rcu_free_pwq); 4965 4966 /* 4967 * If we're the last pwq going away, @wq is already dead and no one 4968 * is gonna access it anymore. Schedule RCU free. 4969 */ 4970 if (is_last) { 4971 wq_unregister_lockdep(wq); 4972 call_rcu(&wq->rcu, rcu_free_wq); 4973 } 4974 } 4975 4976 /* initialize newly allocated @pwq which is associated with @wq and @pool */ 4977 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 4978 struct worker_pool *pool) 4979 { 4980 BUG_ON((unsigned long)pwq & ~WORK_STRUCT_PWQ_MASK); 4981 4982 memset(pwq, 0, sizeof(*pwq)); 4983 4984 pwq->pool = pool; 4985 pwq->wq = wq; 4986 pwq->flush_color = -1; 4987 pwq->refcnt = 1; 4988 INIT_LIST_HEAD(&pwq->inactive_works); 4989 INIT_LIST_HEAD(&pwq->pending_node); 4990 INIT_LIST_HEAD(&pwq->pwqs_node); 4991 INIT_LIST_HEAD(&pwq->mayday_node); 4992 kthread_init_work(&pwq->release_work, pwq_release_workfn); 4993 } 4994 4995 /* sync @pwq with the current state of its associated wq and link it */ 4996 static void link_pwq(struct pool_workqueue *pwq) 4997 { 4998 struct workqueue_struct *wq = pwq->wq; 4999 5000 lockdep_assert_held(&wq->mutex); 5001 5002 /* may be called multiple times, ignore if already linked */ 5003 if (!list_empty(&pwq->pwqs_node)) 5004 return; 5005 5006 /* set the matching work_color */ 5007 pwq->work_color = wq->work_color; 5008 5009 /* link in @pwq */ 5010 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); 5011 } 5012 5013 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 5014 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 5015 const struct workqueue_attrs *attrs) 5016 { 5017 struct worker_pool *pool; 5018 struct pool_workqueue *pwq; 5019 5020 lockdep_assert_held(&wq_pool_mutex); 5021 5022 pool = get_unbound_pool(attrs); 5023 if (!pool) 5024 return NULL; 5025 5026 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 5027 if (!pwq) { 5028 put_unbound_pool(pool); 5029 return NULL; 5030 } 5031 5032 init_pwq(pwq, wq, pool); 5033 return pwq; 5034 } 5035 5036 /** 5037 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod 5038 * @attrs: the wq_attrs of the default pwq of the target workqueue 5039 * @cpu: the target CPU 5040 * @cpu_going_down: if >= 0, the CPU to consider as offline 5041 * 5042 * Calculate the cpumask a workqueue with @attrs should use on @pod. If 5043 * @cpu_going_down is >= 0, that cpu is considered offline during calculation. 5044 * The result is stored in @attrs->__pod_cpumask. 5045 * 5046 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled 5047 * and @pod has online CPUs requested by @attrs, the returned cpumask is the 5048 * intersection of the possible CPUs of @pod and @attrs->cpumask. 5049 * 5050 * The caller is responsible for ensuring that the cpumask of @pod stays stable. 5051 */ 5052 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, 5053 int cpu_going_down) 5054 { 5055 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5056 int pod = pt->cpu_pod[cpu]; 5057 5058 /* does @pod have any online CPUs @attrs wants? */ 5059 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); 5060 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask); 5061 if (cpu_going_down >= 0) 5062 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask); 5063 5064 if (cpumask_empty(attrs->__pod_cpumask)) { 5065 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); 5066 return; 5067 } 5068 5069 /* yeap, return possible CPUs in @pod that @attrs wants */ 5070 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]); 5071 5072 if (cpumask_empty(attrs->__pod_cpumask)) 5073 pr_warn_once("WARNING: workqueue cpumask: online intersect > " 5074 "possible intersect\n"); 5075 } 5076 5077 /* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */ 5078 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, 5079 int cpu, struct pool_workqueue *pwq) 5080 { 5081 struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu); 5082 struct pool_workqueue *old_pwq; 5083 5084 lockdep_assert_held(&wq_pool_mutex); 5085 lockdep_assert_held(&wq->mutex); 5086 5087 /* link_pwq() can handle duplicate calls */ 5088 link_pwq(pwq); 5089 5090 old_pwq = rcu_access_pointer(*slot); 5091 rcu_assign_pointer(*slot, pwq); 5092 return old_pwq; 5093 } 5094 5095 /* context to store the prepared attrs & pwqs before applying */ 5096 struct apply_wqattrs_ctx { 5097 struct workqueue_struct *wq; /* target workqueue */ 5098 struct workqueue_attrs *attrs; /* attrs to apply */ 5099 struct list_head list; /* queued for batching commit */ 5100 struct pool_workqueue *dfl_pwq; 5101 struct pool_workqueue *pwq_tbl[]; 5102 }; 5103 5104 /* free the resources after success or abort */ 5105 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 5106 { 5107 if (ctx) { 5108 int cpu; 5109 5110 for_each_possible_cpu(cpu) 5111 put_pwq_unlocked(ctx->pwq_tbl[cpu]); 5112 put_pwq_unlocked(ctx->dfl_pwq); 5113 5114 free_workqueue_attrs(ctx->attrs); 5115 5116 kfree(ctx); 5117 } 5118 } 5119 5120 /* allocate the attrs and pwqs for later installation */ 5121 static struct apply_wqattrs_ctx * 5122 apply_wqattrs_prepare(struct workqueue_struct *wq, 5123 const struct workqueue_attrs *attrs, 5124 const cpumask_var_t unbound_cpumask) 5125 { 5126 struct apply_wqattrs_ctx *ctx; 5127 struct workqueue_attrs *new_attrs; 5128 int cpu; 5129 5130 lockdep_assert_held(&wq_pool_mutex); 5131 5132 if (WARN_ON(attrs->affn_scope < 0 || 5133 attrs->affn_scope >= WQ_AFFN_NR_TYPES)) 5134 return ERR_PTR(-EINVAL); 5135 5136 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); 5137 5138 new_attrs = alloc_workqueue_attrs(); 5139 if (!ctx || !new_attrs) 5140 goto out_free; 5141 5142 /* 5143 * If something goes wrong during CPU up/down, we'll fall back to 5144 * the default pwq covering whole @attrs->cpumask. Always create 5145 * it even if we don't use it immediately. 5146 */ 5147 copy_workqueue_attrs(new_attrs, attrs); 5148 wqattrs_actualize_cpumask(new_attrs, unbound_cpumask); 5149 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 5150 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 5151 if (!ctx->dfl_pwq) 5152 goto out_free; 5153 5154 for_each_possible_cpu(cpu) { 5155 if (new_attrs->ordered) { 5156 ctx->dfl_pwq->refcnt++; 5157 ctx->pwq_tbl[cpu] = ctx->dfl_pwq; 5158 } else { 5159 wq_calc_pod_cpumask(new_attrs, cpu, -1); 5160 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs); 5161 if (!ctx->pwq_tbl[cpu]) 5162 goto out_free; 5163 } 5164 } 5165 5166 /* save the user configured attrs and sanitize it. */ 5167 copy_workqueue_attrs(new_attrs, attrs); 5168 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 5169 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 5170 ctx->attrs = new_attrs; 5171 5172 /* 5173 * For initialized ordered workqueues, there should only be one pwq 5174 * (dfl_pwq). Set the plugged flag of ctx->dfl_pwq to suspend execution 5175 * of newly queued work items until execution of older work items in 5176 * the old pwq's have completed. 5177 */ 5178 if ((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)) 5179 ctx->dfl_pwq->plugged = true; 5180 5181 ctx->wq = wq; 5182 return ctx; 5183 5184 out_free: 5185 free_workqueue_attrs(new_attrs); 5186 apply_wqattrs_cleanup(ctx); 5187 return ERR_PTR(-ENOMEM); 5188 } 5189 5190 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 5191 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 5192 { 5193 int cpu; 5194 5195 /* all pwqs have been created successfully, let's install'em */ 5196 mutex_lock(&ctx->wq->mutex); 5197 5198 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 5199 5200 /* save the previous pwqs and install the new ones */ 5201 for_each_possible_cpu(cpu) 5202 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, 5203 ctx->pwq_tbl[cpu]); 5204 ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq); 5205 5206 /* update node_nr_active->max */ 5207 wq_update_node_max_active(ctx->wq, -1); 5208 5209 /* rescuer needs to respect wq cpumask changes */ 5210 if (ctx->wq->rescuer) 5211 set_cpus_allowed_ptr(ctx->wq->rescuer->task, 5212 unbound_effective_cpumask(ctx->wq)); 5213 5214 mutex_unlock(&ctx->wq->mutex); 5215 } 5216 5217 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 5218 const struct workqueue_attrs *attrs) 5219 { 5220 struct apply_wqattrs_ctx *ctx; 5221 5222 /* only unbound workqueues can change attributes */ 5223 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 5224 return -EINVAL; 5225 5226 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); 5227 if (IS_ERR(ctx)) 5228 return PTR_ERR(ctx); 5229 5230 /* the ctx has been prepared successfully, let's commit it */ 5231 apply_wqattrs_commit(ctx); 5232 apply_wqattrs_cleanup(ctx); 5233 5234 return 0; 5235 } 5236 5237 /** 5238 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 5239 * @wq: the target workqueue 5240 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 5241 * 5242 * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps 5243 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that 5244 * work items are affine to the pod it was issued on. Older pwqs are released as 5245 * in-flight work items finish. Note that a work item which repeatedly requeues 5246 * itself back-to-back will stay on its current pwq. 5247 * 5248 * Performs GFP_KERNEL allocations. 5249 * 5250 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock(). 5251 * 5252 * Return: 0 on success and -errno on failure. 5253 */ 5254 int apply_workqueue_attrs(struct workqueue_struct *wq, 5255 const struct workqueue_attrs *attrs) 5256 { 5257 int ret; 5258 5259 lockdep_assert_cpus_held(); 5260 5261 mutex_lock(&wq_pool_mutex); 5262 ret = apply_workqueue_attrs_locked(wq, attrs); 5263 mutex_unlock(&wq_pool_mutex); 5264 5265 return ret; 5266 } 5267 5268 /** 5269 * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug 5270 * @wq: the target workqueue 5271 * @cpu: the CPU to update pool association for 5272 * @hotplug_cpu: the CPU coming up or going down 5273 * @online: whether @cpu is coming up or going down 5274 * 5275 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 5276 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of 5277 * @wq accordingly. 5278 * 5279 * 5280 * If pod affinity can't be adjusted due to memory allocation failure, it falls 5281 * back to @wq->dfl_pwq which may not be optimal but is always correct. 5282 * 5283 * Note that when the last allowed CPU of a pod goes offline for a workqueue 5284 * with a cpumask spanning multiple pods, the workers which were already 5285 * executing the work items for the workqueue will lose their CPU affinity and 5286 * may execute on any CPU. This is similar to how per-cpu workqueues behave on 5287 * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's 5288 * responsibility to flush the work item from CPU_DOWN_PREPARE. 5289 */ 5290 static void wq_update_pod(struct workqueue_struct *wq, int cpu, 5291 int hotplug_cpu, bool online) 5292 { 5293 int off_cpu = online ? -1 : hotplug_cpu; 5294 struct pool_workqueue *old_pwq = NULL, *pwq; 5295 struct workqueue_attrs *target_attrs; 5296 5297 lockdep_assert_held(&wq_pool_mutex); 5298 5299 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered) 5300 return; 5301 5302 /* 5303 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 5304 * Let's use a preallocated one. The following buf is protected by 5305 * CPU hotplug exclusion. 5306 */ 5307 target_attrs = wq_update_pod_attrs_buf; 5308 5309 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 5310 wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask); 5311 5312 /* nothing to do if the target cpumask matches the current pwq */ 5313 wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); 5314 if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs)) 5315 return; 5316 5317 /* create a new pwq */ 5318 pwq = alloc_unbound_pwq(wq, target_attrs); 5319 if (!pwq) { 5320 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n", 5321 wq->name); 5322 goto use_dfl_pwq; 5323 } 5324 5325 /* Install the new pwq. */ 5326 mutex_lock(&wq->mutex); 5327 old_pwq = install_unbound_pwq(wq, cpu, pwq); 5328 goto out_unlock; 5329 5330 use_dfl_pwq: 5331 mutex_lock(&wq->mutex); 5332 pwq = unbound_pwq(wq, -1); 5333 raw_spin_lock_irq(&pwq->pool->lock); 5334 get_pwq(pwq); 5335 raw_spin_unlock_irq(&pwq->pool->lock); 5336 old_pwq = install_unbound_pwq(wq, cpu, pwq); 5337 out_unlock: 5338 mutex_unlock(&wq->mutex); 5339 put_pwq_unlocked(old_pwq); 5340 } 5341 5342 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 5343 { 5344 bool highpri = wq->flags & WQ_HIGHPRI; 5345 int cpu, ret; 5346 5347 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); 5348 if (!wq->cpu_pwq) 5349 goto enomem; 5350 5351 if (!(wq->flags & WQ_UNBOUND)) { 5352 for_each_possible_cpu(cpu) { 5353 struct pool_workqueue **pwq_p; 5354 struct worker_pool __percpu *pools; 5355 struct worker_pool *pool; 5356 5357 if (wq->flags & WQ_BH) 5358 pools = bh_worker_pools; 5359 else 5360 pools = cpu_worker_pools; 5361 5362 pool = &(per_cpu_ptr(pools, cpu)[highpri]); 5363 pwq_p = per_cpu_ptr(wq->cpu_pwq, cpu); 5364 5365 *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, 5366 pool->node); 5367 if (!*pwq_p) 5368 goto enomem; 5369 5370 init_pwq(*pwq_p, wq, pool); 5371 5372 mutex_lock(&wq->mutex); 5373 link_pwq(*pwq_p); 5374 mutex_unlock(&wq->mutex); 5375 } 5376 return 0; 5377 } 5378 5379 cpus_read_lock(); 5380 if (wq->flags & __WQ_ORDERED) { 5381 struct pool_workqueue *dfl_pwq; 5382 5383 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 5384 /* there should only be single pwq for ordering guarantee */ 5385 dfl_pwq = rcu_access_pointer(wq->dfl_pwq); 5386 WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node || 5387 wq->pwqs.prev != &dfl_pwq->pwqs_node), 5388 "ordering guarantee broken for workqueue %s\n", wq->name); 5389 } else { 5390 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 5391 } 5392 cpus_read_unlock(); 5393 5394 /* for unbound pwq, flush the pwq_release_worker ensures that the 5395 * pwq_release_workfn() completes before calling kfree(wq). 5396 */ 5397 if (ret) 5398 kthread_flush_worker(pwq_release_worker); 5399 5400 return ret; 5401 5402 enomem: 5403 if (wq->cpu_pwq) { 5404 for_each_possible_cpu(cpu) { 5405 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 5406 5407 if (pwq) 5408 kmem_cache_free(pwq_cache, pwq); 5409 } 5410 free_percpu(wq->cpu_pwq); 5411 wq->cpu_pwq = NULL; 5412 } 5413 return -ENOMEM; 5414 } 5415 5416 static int wq_clamp_max_active(int max_active, unsigned int flags, 5417 const char *name) 5418 { 5419 if (max_active < 1 || max_active > WQ_MAX_ACTIVE) 5420 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 5421 max_active, name, 1, WQ_MAX_ACTIVE); 5422 5423 return clamp_val(max_active, 1, WQ_MAX_ACTIVE); 5424 } 5425 5426 /* 5427 * Workqueues which may be used during memory reclaim should have a rescuer 5428 * to guarantee forward progress. 5429 */ 5430 static int init_rescuer(struct workqueue_struct *wq) 5431 { 5432 struct worker *rescuer; 5433 int ret; 5434 5435 if (!(wq->flags & WQ_MEM_RECLAIM)) 5436 return 0; 5437 5438 rescuer = alloc_worker(NUMA_NO_NODE); 5439 if (!rescuer) { 5440 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n", 5441 wq->name); 5442 return -ENOMEM; 5443 } 5444 5445 rescuer->rescue_wq = wq; 5446 rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name); 5447 if (IS_ERR(rescuer->task)) { 5448 ret = PTR_ERR(rescuer->task); 5449 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe", 5450 wq->name, ERR_PTR(ret)); 5451 kfree(rescuer); 5452 return ret; 5453 } 5454 5455 wq->rescuer = rescuer; 5456 if (wq->flags & WQ_UNBOUND) 5457 kthread_bind_mask(rescuer->task, wq_unbound_cpumask); 5458 else 5459 kthread_bind_mask(rescuer->task, cpu_possible_mask); 5460 wake_up_process(rescuer->task); 5461 5462 return 0; 5463 } 5464 5465 /** 5466 * wq_adjust_max_active - update a wq's max_active to the current setting 5467 * @wq: target workqueue 5468 * 5469 * If @wq isn't freezing, set @wq->max_active to the saved_max_active and 5470 * activate inactive work items accordingly. If @wq is freezing, clear 5471 * @wq->max_active to zero. 5472 */ 5473 static void wq_adjust_max_active(struct workqueue_struct *wq) 5474 { 5475 bool activated; 5476 int new_max, new_min; 5477 5478 lockdep_assert_held(&wq->mutex); 5479 5480 if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) { 5481 new_max = 0; 5482 new_min = 0; 5483 } else { 5484 new_max = wq->saved_max_active; 5485 new_min = wq->saved_min_active; 5486 } 5487 5488 if (wq->max_active == new_max && wq->min_active == new_min) 5489 return; 5490 5491 /* 5492 * Update @wq->max/min_active and then kick inactive work items if more 5493 * active work items are allowed. This doesn't break work item ordering 5494 * because new work items are always queued behind existing inactive 5495 * work items if there are any. 5496 */ 5497 WRITE_ONCE(wq->max_active, new_max); 5498 WRITE_ONCE(wq->min_active, new_min); 5499 5500 if (wq->flags & WQ_UNBOUND) 5501 wq_update_node_max_active(wq, -1); 5502 5503 if (new_max == 0) 5504 return; 5505 5506 /* 5507 * Round-robin through pwq's activating the first inactive work item 5508 * until max_active is filled. 5509 */ 5510 do { 5511 struct pool_workqueue *pwq; 5512 5513 activated = false; 5514 for_each_pwq(pwq, wq) { 5515 unsigned long irq_flags; 5516 5517 /* can be called during early boot w/ irq disabled */ 5518 raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); 5519 if (pwq_activate_first_inactive(pwq, true)) { 5520 activated = true; 5521 kick_pool(pwq->pool); 5522 } 5523 raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); 5524 } 5525 } while (activated); 5526 } 5527 5528 __printf(1, 4) 5529 struct workqueue_struct *alloc_workqueue(const char *fmt, 5530 unsigned int flags, 5531 int max_active, ...) 5532 { 5533 va_list args; 5534 struct workqueue_struct *wq; 5535 size_t wq_size; 5536 int name_len; 5537 5538 if (flags & WQ_BH) { 5539 if (WARN_ON_ONCE(flags & ~__WQ_BH_ALLOWS)) 5540 return NULL; 5541 if (WARN_ON_ONCE(max_active)) 5542 return NULL; 5543 } 5544 5545 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 5546 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 5547 flags |= WQ_UNBOUND; 5548 5549 /* allocate wq and format name */ 5550 if (flags & WQ_UNBOUND) 5551 wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1); 5552 else 5553 wq_size = sizeof(*wq); 5554 5555 wq = kzalloc(wq_size, GFP_KERNEL); 5556 if (!wq) 5557 return NULL; 5558 5559 if (flags & WQ_UNBOUND) { 5560 wq->unbound_attrs = alloc_workqueue_attrs(); 5561 if (!wq->unbound_attrs) 5562 goto err_free_wq; 5563 } 5564 5565 va_start(args, max_active); 5566 name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); 5567 va_end(args); 5568 5569 if (name_len >= WQ_NAME_LEN) 5570 pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", 5571 wq->name); 5572 5573 if (flags & WQ_BH) { 5574 /* 5575 * BH workqueues always share a single execution context per CPU 5576 * and don't impose any max_active limit. 5577 */ 5578 max_active = INT_MAX; 5579 } else { 5580 max_active = max_active ?: WQ_DFL_ACTIVE; 5581 max_active = wq_clamp_max_active(max_active, flags, wq->name); 5582 } 5583 5584 /* init wq */ 5585 wq->flags = flags; 5586 wq->max_active = max_active; 5587 wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE); 5588 wq->saved_max_active = wq->max_active; 5589 wq->saved_min_active = wq->min_active; 5590 mutex_init(&wq->mutex); 5591 atomic_set(&wq->nr_pwqs_to_flush, 0); 5592 INIT_LIST_HEAD(&wq->pwqs); 5593 INIT_LIST_HEAD(&wq->flusher_queue); 5594 INIT_LIST_HEAD(&wq->flusher_overflow); 5595 INIT_LIST_HEAD(&wq->maydays); 5596 5597 wq_init_lockdep(wq); 5598 INIT_LIST_HEAD(&wq->list); 5599 5600 if (flags & WQ_UNBOUND) { 5601 if (alloc_node_nr_active(wq->node_nr_active) < 0) 5602 goto err_unreg_lockdep; 5603 } 5604 5605 if (alloc_and_link_pwqs(wq) < 0) 5606 goto err_free_node_nr_active; 5607 5608 if (wq_online && init_rescuer(wq) < 0) 5609 goto err_destroy; 5610 5611 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 5612 goto err_destroy; 5613 5614 /* 5615 * wq_pool_mutex protects global freeze state and workqueues list. 5616 * Grab it, adjust max_active and add the new @wq to workqueues 5617 * list. 5618 */ 5619 mutex_lock(&wq_pool_mutex); 5620 5621 mutex_lock(&wq->mutex); 5622 wq_adjust_max_active(wq); 5623 mutex_unlock(&wq->mutex); 5624 5625 list_add_tail_rcu(&wq->list, &workqueues); 5626 5627 mutex_unlock(&wq_pool_mutex); 5628 5629 return wq; 5630 5631 err_free_node_nr_active: 5632 if (wq->flags & WQ_UNBOUND) 5633 free_node_nr_active(wq->node_nr_active); 5634 err_unreg_lockdep: 5635 wq_unregister_lockdep(wq); 5636 wq_free_lockdep(wq); 5637 err_free_wq: 5638 free_workqueue_attrs(wq->unbound_attrs); 5639 kfree(wq); 5640 return NULL; 5641 err_destroy: 5642 destroy_workqueue(wq); 5643 return NULL; 5644 } 5645 EXPORT_SYMBOL_GPL(alloc_workqueue); 5646 5647 static bool pwq_busy(struct pool_workqueue *pwq) 5648 { 5649 int i; 5650 5651 for (i = 0; i < WORK_NR_COLORS; i++) 5652 if (pwq->nr_in_flight[i]) 5653 return true; 5654 5655 if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1)) 5656 return true; 5657 if (!pwq_is_empty(pwq)) 5658 return true; 5659 5660 return false; 5661 } 5662 5663 /** 5664 * destroy_workqueue - safely terminate a workqueue 5665 * @wq: target workqueue 5666 * 5667 * Safely destroy a workqueue. All work currently pending will be done first. 5668 */ 5669 void destroy_workqueue(struct workqueue_struct *wq) 5670 { 5671 struct pool_workqueue *pwq; 5672 int cpu; 5673 5674 /* 5675 * Remove it from sysfs first so that sanity check failure doesn't 5676 * lead to sysfs name conflicts. 5677 */ 5678 workqueue_sysfs_unregister(wq); 5679 5680 /* mark the workqueue destruction is in progress */ 5681 mutex_lock(&wq->mutex); 5682 wq->flags |= __WQ_DESTROYING; 5683 mutex_unlock(&wq->mutex); 5684 5685 /* drain it before proceeding with destruction */ 5686 drain_workqueue(wq); 5687 5688 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */ 5689 if (wq->rescuer) { 5690 struct worker *rescuer = wq->rescuer; 5691 5692 /* this prevents new queueing */ 5693 raw_spin_lock_irq(&wq_mayday_lock); 5694 wq->rescuer = NULL; 5695 raw_spin_unlock_irq(&wq_mayday_lock); 5696 5697 /* rescuer will empty maydays list before exiting */ 5698 kthread_stop(rescuer->task); 5699 kfree(rescuer); 5700 } 5701 5702 /* 5703 * Sanity checks - grab all the locks so that we wait for all 5704 * in-flight operations which may do put_pwq(). 5705 */ 5706 mutex_lock(&wq_pool_mutex); 5707 mutex_lock(&wq->mutex); 5708 for_each_pwq(pwq, wq) { 5709 raw_spin_lock_irq(&pwq->pool->lock); 5710 if (WARN_ON(pwq_busy(pwq))) { 5711 pr_warn("%s: %s has the following busy pwq\n", 5712 __func__, wq->name); 5713 show_pwq(pwq); 5714 raw_spin_unlock_irq(&pwq->pool->lock); 5715 mutex_unlock(&wq->mutex); 5716 mutex_unlock(&wq_pool_mutex); 5717 show_one_workqueue(wq); 5718 return; 5719 } 5720 raw_spin_unlock_irq(&pwq->pool->lock); 5721 } 5722 mutex_unlock(&wq->mutex); 5723 5724 /* 5725 * wq list is used to freeze wq, remove from list after 5726 * flushing is complete in case freeze races us. 5727 */ 5728 list_del_rcu(&wq->list); 5729 mutex_unlock(&wq_pool_mutex); 5730 5731 /* 5732 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq 5733 * to put the base refs. @wq will be auto-destroyed from the last 5734 * pwq_put. RCU read lock prevents @wq from going away from under us. 5735 */ 5736 rcu_read_lock(); 5737 5738 for_each_possible_cpu(cpu) { 5739 put_pwq_unlocked(unbound_pwq(wq, cpu)); 5740 RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL); 5741 } 5742 5743 put_pwq_unlocked(unbound_pwq(wq, -1)); 5744 RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL); 5745 5746 rcu_read_unlock(); 5747 } 5748 EXPORT_SYMBOL_GPL(destroy_workqueue); 5749 5750 /** 5751 * workqueue_set_max_active - adjust max_active of a workqueue 5752 * @wq: target workqueue 5753 * @max_active: new max_active value. 5754 * 5755 * Set max_active of @wq to @max_active. See the alloc_workqueue() function 5756 * comment. 5757 * 5758 * CONTEXT: 5759 * Don't call from IRQ context. 5760 */ 5761 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 5762 { 5763 /* max_active doesn't mean anything for BH workqueues */ 5764 if (WARN_ON(wq->flags & WQ_BH)) 5765 return; 5766 /* disallow meddling with max_active for ordered workqueues */ 5767 if (WARN_ON(wq->flags & __WQ_ORDERED)) 5768 return; 5769 5770 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 5771 5772 mutex_lock(&wq->mutex); 5773 5774 wq->saved_max_active = max_active; 5775 if (wq->flags & WQ_UNBOUND) 5776 wq->saved_min_active = min(wq->saved_min_active, max_active); 5777 5778 wq_adjust_max_active(wq); 5779 5780 mutex_unlock(&wq->mutex); 5781 } 5782 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 5783 5784 /** 5785 * workqueue_set_min_active - adjust min_active of an unbound workqueue 5786 * @wq: target unbound workqueue 5787 * @min_active: new min_active value 5788 * 5789 * Set min_active of an unbound workqueue. Unlike other types of workqueues, an 5790 * unbound workqueue is not guaranteed to be able to process max_active 5791 * interdependent work items. Instead, an unbound workqueue is guaranteed to be 5792 * able to process min_active number of interdependent work items which is 5793 * %WQ_DFL_MIN_ACTIVE by default. 5794 * 5795 * Use this function to adjust the min_active value between 0 and the current 5796 * max_active. 5797 */ 5798 void workqueue_set_min_active(struct workqueue_struct *wq, int min_active) 5799 { 5800 /* min_active is only meaningful for non-ordered unbound workqueues */ 5801 if (WARN_ON((wq->flags & (WQ_BH | WQ_UNBOUND | __WQ_ORDERED)) != 5802 WQ_UNBOUND)) 5803 return; 5804 5805 mutex_lock(&wq->mutex); 5806 wq->saved_min_active = clamp(min_active, 0, wq->saved_max_active); 5807 wq_adjust_max_active(wq); 5808 mutex_unlock(&wq->mutex); 5809 } 5810 5811 /** 5812 * current_work - retrieve %current task's work struct 5813 * 5814 * Determine if %current task is a workqueue worker and what it's working on. 5815 * Useful to find out the context that the %current task is running in. 5816 * 5817 * Return: work struct if %current task is a workqueue worker, %NULL otherwise. 5818 */ 5819 struct work_struct *current_work(void) 5820 { 5821 struct worker *worker = current_wq_worker(); 5822 5823 return worker ? worker->current_work : NULL; 5824 } 5825 EXPORT_SYMBOL(current_work); 5826 5827 /** 5828 * current_is_workqueue_rescuer - is %current workqueue rescuer? 5829 * 5830 * Determine whether %current is a workqueue rescuer. Can be used from 5831 * work functions to determine whether it's being run off the rescuer task. 5832 * 5833 * Return: %true if %current is a workqueue rescuer. %false otherwise. 5834 */ 5835 bool current_is_workqueue_rescuer(void) 5836 { 5837 struct worker *worker = current_wq_worker(); 5838 5839 return worker && worker->rescue_wq; 5840 } 5841 5842 /** 5843 * workqueue_congested - test whether a workqueue is congested 5844 * @cpu: CPU in question 5845 * @wq: target workqueue 5846 * 5847 * Test whether @wq's cpu workqueue for @cpu is congested. There is 5848 * no synchronization around this function and the test result is 5849 * unreliable and only useful as advisory hints or for debugging. 5850 * 5851 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 5852 * 5853 * With the exception of ordered workqueues, all workqueues have per-cpu 5854 * pool_workqueues, each with its own congested state. A workqueue being 5855 * congested on one CPU doesn't mean that the workqueue is contested on any 5856 * other CPUs. 5857 * 5858 * Return: 5859 * %true if congested, %false otherwise. 5860 */ 5861 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 5862 { 5863 struct pool_workqueue *pwq; 5864 bool ret; 5865 5866 rcu_read_lock(); 5867 preempt_disable(); 5868 5869 if (cpu == WORK_CPU_UNBOUND) 5870 cpu = smp_processor_id(); 5871 5872 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 5873 ret = !list_empty(&pwq->inactive_works); 5874 5875 preempt_enable(); 5876 rcu_read_unlock(); 5877 5878 return ret; 5879 } 5880 EXPORT_SYMBOL_GPL(workqueue_congested); 5881 5882 /** 5883 * work_busy - test whether a work is currently pending or running 5884 * @work: the work to be tested 5885 * 5886 * Test whether @work is currently pending or running. There is no 5887 * synchronization around this function and the test result is 5888 * unreliable and only useful as advisory hints or for debugging. 5889 * 5890 * Return: 5891 * OR'd bitmask of WORK_BUSY_* bits. 5892 */ 5893 unsigned int work_busy(struct work_struct *work) 5894 { 5895 struct worker_pool *pool; 5896 unsigned long irq_flags; 5897 unsigned int ret = 0; 5898 5899 if (work_pending(work)) 5900 ret |= WORK_BUSY_PENDING; 5901 5902 rcu_read_lock(); 5903 pool = get_work_pool(work); 5904 if (pool) { 5905 raw_spin_lock_irqsave(&pool->lock, irq_flags); 5906 if (find_worker_executing_work(pool, work)) 5907 ret |= WORK_BUSY_RUNNING; 5908 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); 5909 } 5910 rcu_read_unlock(); 5911 5912 return ret; 5913 } 5914 EXPORT_SYMBOL_GPL(work_busy); 5915 5916 /** 5917 * set_worker_desc - set description for the current work item 5918 * @fmt: printf-style format string 5919 * @...: arguments for the format string 5920 * 5921 * This function can be called by a running work function to describe what 5922 * the work item is about. If the worker task gets dumped, this 5923 * information will be printed out together to help debugging. The 5924 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 5925 */ 5926 void set_worker_desc(const char *fmt, ...) 5927 { 5928 struct worker *worker = current_wq_worker(); 5929 va_list args; 5930 5931 if (worker) { 5932 va_start(args, fmt); 5933 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 5934 va_end(args); 5935 } 5936 } 5937 EXPORT_SYMBOL_GPL(set_worker_desc); 5938 5939 /** 5940 * print_worker_info - print out worker information and description 5941 * @log_lvl: the log level to use when printing 5942 * @task: target task 5943 * 5944 * If @task is a worker and currently executing a work item, print out the 5945 * name of the workqueue being serviced and worker description set with 5946 * set_worker_desc() by the currently executing work item. 5947 * 5948 * This function can be safely called on any task as long as the 5949 * task_struct itself is accessible. While safe, this function isn't 5950 * synchronized and may print out mixups or garbages of limited length. 5951 */ 5952 void print_worker_info(const char *log_lvl, struct task_struct *task) 5953 { 5954 work_func_t *fn = NULL; 5955 char name[WQ_NAME_LEN] = { }; 5956 char desc[WORKER_DESC_LEN] = { }; 5957 struct pool_workqueue *pwq = NULL; 5958 struct workqueue_struct *wq = NULL; 5959 struct worker *worker; 5960 5961 if (!(task->flags & PF_WQ_WORKER)) 5962 return; 5963 5964 /* 5965 * This function is called without any synchronization and @task 5966 * could be in any state. Be careful with dereferences. 5967 */ 5968 worker = kthread_probe_data(task); 5969 5970 /* 5971 * Carefully copy the associated workqueue's workfn, name and desc. 5972 * Keep the original last '\0' in case the original is garbage. 5973 */ 5974 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); 5975 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); 5976 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); 5977 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); 5978 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); 5979 5980 if (fn || name[0] || desc[0]) { 5981 printk("%sWorkqueue: %s %ps", log_lvl, name, fn); 5982 if (strcmp(name, desc)) 5983 pr_cont(" (%s)", desc); 5984 pr_cont("\n"); 5985 } 5986 } 5987 5988 static void pr_cont_pool_info(struct worker_pool *pool) 5989 { 5990 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 5991 if (pool->node != NUMA_NO_NODE) 5992 pr_cont(" node=%d", pool->node); 5993 pr_cont(" flags=0x%x", pool->flags); 5994 if (pool->flags & POOL_BH) 5995 pr_cont(" bh%s", 5996 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); 5997 else 5998 pr_cont(" nice=%d", pool->attrs->nice); 5999 } 6000 6001 static void pr_cont_worker_id(struct worker *worker) 6002 { 6003 struct worker_pool *pool = worker->pool; 6004 6005 if (pool->flags & WQ_BH) 6006 pr_cont("bh%s", 6007 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); 6008 else 6009 pr_cont("%d%s", task_pid_nr(worker->task), 6010 worker->rescue_wq ? "(RESCUER)" : ""); 6011 } 6012 6013 struct pr_cont_work_struct { 6014 bool comma; 6015 work_func_t func; 6016 long ctr; 6017 }; 6018 6019 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp) 6020 { 6021 if (!pcwsp->ctr) 6022 goto out_record; 6023 if (func == pcwsp->func) { 6024 pcwsp->ctr++; 6025 return; 6026 } 6027 if (pcwsp->ctr == 1) 6028 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func); 6029 else 6030 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func); 6031 pcwsp->ctr = 0; 6032 out_record: 6033 if ((long)func == -1L) 6034 return; 6035 pcwsp->comma = comma; 6036 pcwsp->func = func; 6037 pcwsp->ctr = 1; 6038 } 6039 6040 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) 6041 { 6042 if (work->func == wq_barrier_func) { 6043 struct wq_barrier *barr; 6044 6045 barr = container_of(work, struct wq_barrier, work); 6046 6047 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 6048 pr_cont("%s BAR(%d)", comma ? "," : "", 6049 task_pid_nr(barr->task)); 6050 } else { 6051 if (!comma) 6052 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 6053 pr_cont_work_flush(comma, work->func, pcwsp); 6054 } 6055 } 6056 6057 static void show_pwq(struct pool_workqueue *pwq) 6058 { 6059 struct pr_cont_work_struct pcws = { .ctr = 0, }; 6060 struct worker_pool *pool = pwq->pool; 6061 struct work_struct *work; 6062 struct worker *worker; 6063 bool has_in_flight = false, has_pending = false; 6064 int bkt; 6065 6066 pr_info(" pwq %d:", pool->id); 6067 pr_cont_pool_info(pool); 6068 6069 pr_cont(" active=%d refcnt=%d%s\n", 6070 pwq->nr_active, pwq->refcnt, 6071 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 6072 6073 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6074 if (worker->current_pwq == pwq) { 6075 has_in_flight = true; 6076 break; 6077 } 6078 } 6079 if (has_in_flight) { 6080 bool comma = false; 6081 6082 pr_info(" in-flight:"); 6083 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6084 if (worker->current_pwq != pwq) 6085 continue; 6086 6087 pr_cont(" %s", comma ? "," : ""); 6088 pr_cont_worker_id(worker); 6089 pr_cont(":%ps", worker->current_func); 6090 list_for_each_entry(work, &worker->scheduled, entry) 6091 pr_cont_work(false, work, &pcws); 6092 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 6093 comma = true; 6094 } 6095 pr_cont("\n"); 6096 } 6097 6098 list_for_each_entry(work, &pool->worklist, entry) { 6099 if (get_work_pwq(work) == pwq) { 6100 has_pending = true; 6101 break; 6102 } 6103 } 6104 if (has_pending) { 6105 bool comma = false; 6106 6107 pr_info(" pending:"); 6108 list_for_each_entry(work, &pool->worklist, entry) { 6109 if (get_work_pwq(work) != pwq) 6110 continue; 6111 6112 pr_cont_work(comma, work, &pcws); 6113 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 6114 } 6115 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 6116 pr_cont("\n"); 6117 } 6118 6119 if (!list_empty(&pwq->inactive_works)) { 6120 bool comma = false; 6121 6122 pr_info(" inactive:"); 6123 list_for_each_entry(work, &pwq->inactive_works, entry) { 6124 pr_cont_work(comma, work, &pcws); 6125 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 6126 } 6127 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 6128 pr_cont("\n"); 6129 } 6130 } 6131 6132 /** 6133 * show_one_workqueue - dump state of specified workqueue 6134 * @wq: workqueue whose state will be printed 6135 */ 6136 void show_one_workqueue(struct workqueue_struct *wq) 6137 { 6138 struct pool_workqueue *pwq; 6139 bool idle = true; 6140 unsigned long irq_flags; 6141 6142 for_each_pwq(pwq, wq) { 6143 if (!pwq_is_empty(pwq)) { 6144 idle = false; 6145 break; 6146 } 6147 } 6148 if (idle) /* Nothing to print for idle workqueue */ 6149 return; 6150 6151 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 6152 6153 for_each_pwq(pwq, wq) { 6154 raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); 6155 if (!pwq_is_empty(pwq)) { 6156 /* 6157 * Defer printing to avoid deadlocks in console 6158 * drivers that queue work while holding locks 6159 * also taken in their write paths. 6160 */ 6161 printk_deferred_enter(); 6162 show_pwq(pwq); 6163 printk_deferred_exit(); 6164 } 6165 raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); 6166 /* 6167 * We could be printing a lot from atomic context, e.g. 6168 * sysrq-t -> show_all_workqueues(). Avoid triggering 6169 * hard lockup. 6170 */ 6171 touch_nmi_watchdog(); 6172 } 6173 6174 } 6175 6176 /** 6177 * show_one_worker_pool - dump state of specified worker pool 6178 * @pool: worker pool whose state will be printed 6179 */ 6180 static void show_one_worker_pool(struct worker_pool *pool) 6181 { 6182 struct worker *worker; 6183 bool first = true; 6184 unsigned long irq_flags; 6185 unsigned long hung = 0; 6186 6187 raw_spin_lock_irqsave(&pool->lock, irq_flags); 6188 if (pool->nr_workers == pool->nr_idle) 6189 goto next_pool; 6190 6191 /* How long the first pending work is waiting for a worker. */ 6192 if (!list_empty(&pool->worklist)) 6193 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 6194 6195 /* 6196 * Defer printing to avoid deadlocks in console drivers that 6197 * queue work while holding locks also taken in their write 6198 * paths. 6199 */ 6200 printk_deferred_enter(); 6201 pr_info("pool %d:", pool->id); 6202 pr_cont_pool_info(pool); 6203 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); 6204 if (pool->manager) 6205 pr_cont(" manager: %d", 6206 task_pid_nr(pool->manager->task)); 6207 list_for_each_entry(worker, &pool->idle_list, entry) { 6208 pr_cont(" %s", first ? "idle: " : ""); 6209 pr_cont_worker_id(worker); 6210 first = false; 6211 } 6212 pr_cont("\n"); 6213 printk_deferred_exit(); 6214 next_pool: 6215 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); 6216 /* 6217 * We could be printing a lot from atomic context, e.g. 6218 * sysrq-t -> show_all_workqueues(). Avoid triggering 6219 * hard lockup. 6220 */ 6221 touch_nmi_watchdog(); 6222 6223 } 6224 6225 /** 6226 * show_all_workqueues - dump workqueue state 6227 * 6228 * Called from a sysrq handler and prints out all busy workqueues and pools. 6229 */ 6230 void show_all_workqueues(void) 6231 { 6232 struct workqueue_struct *wq; 6233 struct worker_pool *pool; 6234 int pi; 6235 6236 rcu_read_lock(); 6237 6238 pr_info("Showing busy workqueues and worker pools:\n"); 6239 6240 list_for_each_entry_rcu(wq, &workqueues, list) 6241 show_one_workqueue(wq); 6242 6243 for_each_pool(pool, pi) 6244 show_one_worker_pool(pool); 6245 6246 rcu_read_unlock(); 6247 } 6248 6249 /** 6250 * show_freezable_workqueues - dump freezable workqueue state 6251 * 6252 * Called from try_to_freeze_tasks() and prints out all freezable workqueues 6253 * still busy. 6254 */ 6255 void show_freezable_workqueues(void) 6256 { 6257 struct workqueue_struct *wq; 6258 6259 rcu_read_lock(); 6260 6261 pr_info("Showing freezable workqueues that are still busy:\n"); 6262 6263 list_for_each_entry_rcu(wq, &workqueues, list) { 6264 if (!(wq->flags & WQ_FREEZABLE)) 6265 continue; 6266 show_one_workqueue(wq); 6267 } 6268 6269 rcu_read_unlock(); 6270 } 6271 6272 /* used to show worker information through /proc/PID/{comm,stat,status} */ 6273 void wq_worker_comm(char *buf, size_t size, struct task_struct *task) 6274 { 6275 int off; 6276 6277 /* always show the actual comm */ 6278 off = strscpy(buf, task->comm, size); 6279 if (off < 0) 6280 return; 6281 6282 /* stabilize PF_WQ_WORKER and worker pool association */ 6283 mutex_lock(&wq_pool_attach_mutex); 6284 6285 if (task->flags & PF_WQ_WORKER) { 6286 struct worker *worker = kthread_data(task); 6287 struct worker_pool *pool = worker->pool; 6288 6289 if (pool) { 6290 raw_spin_lock_irq(&pool->lock); 6291 /* 6292 * ->desc tracks information (wq name or 6293 * set_worker_desc()) for the latest execution. If 6294 * current, prepend '+', otherwise '-'. 6295 */ 6296 if (worker->desc[0] != '\0') { 6297 if (worker->current_work) 6298 scnprintf(buf + off, size - off, "+%s", 6299 worker->desc); 6300 else 6301 scnprintf(buf + off, size - off, "-%s", 6302 worker->desc); 6303 } 6304 raw_spin_unlock_irq(&pool->lock); 6305 } 6306 } 6307 6308 mutex_unlock(&wq_pool_attach_mutex); 6309 } 6310 6311 #ifdef CONFIG_SMP 6312 6313 /* 6314 * CPU hotplug. 6315 * 6316 * There are two challenges in supporting CPU hotplug. Firstly, there 6317 * are a lot of assumptions on strong associations among work, pwq and 6318 * pool which make migrating pending and scheduled works very 6319 * difficult to implement without impacting hot paths. Secondly, 6320 * worker pools serve mix of short, long and very long running works making 6321 * blocked draining impractical. 6322 * 6323 * This is solved by allowing the pools to be disassociated from the CPU 6324 * running as an unbound one and allowing it to be reattached later if the 6325 * cpu comes back online. 6326 */ 6327 6328 static void unbind_workers(int cpu) 6329 { 6330 struct worker_pool *pool; 6331 struct worker *worker; 6332 6333 for_each_cpu_worker_pool(pool, cpu) { 6334 mutex_lock(&wq_pool_attach_mutex); 6335 raw_spin_lock_irq(&pool->lock); 6336 6337 /* 6338 * We've blocked all attach/detach operations. Make all workers 6339 * unbound and set DISASSOCIATED. Before this, all workers 6340 * must be on the cpu. After this, they may become diasporas. 6341 * And the preemption disabled section in their sched callbacks 6342 * are guaranteed to see WORKER_UNBOUND since the code here 6343 * is on the same cpu. 6344 */ 6345 for_each_pool_worker(worker, pool) 6346 worker->flags |= WORKER_UNBOUND; 6347 6348 pool->flags |= POOL_DISASSOCIATED; 6349 6350 /* 6351 * The handling of nr_running in sched callbacks are disabled 6352 * now. Zap nr_running. After this, nr_running stays zero and 6353 * need_more_worker() and keep_working() are always true as 6354 * long as the worklist is not empty. This pool now behaves as 6355 * an unbound (in terms of concurrency management) pool which 6356 * are served by workers tied to the pool. 6357 */ 6358 pool->nr_running = 0; 6359 6360 /* 6361 * With concurrency management just turned off, a busy 6362 * worker blocking could lead to lengthy stalls. Kick off 6363 * unbound chain execution of currently pending work items. 6364 */ 6365 kick_pool(pool); 6366 6367 raw_spin_unlock_irq(&pool->lock); 6368 6369 for_each_pool_worker(worker, pool) 6370 unbind_worker(worker); 6371 6372 mutex_unlock(&wq_pool_attach_mutex); 6373 } 6374 } 6375 6376 /** 6377 * rebind_workers - rebind all workers of a pool to the associated CPU 6378 * @pool: pool of interest 6379 * 6380 * @pool->cpu is coming online. Rebind all workers to the CPU. 6381 */ 6382 static void rebind_workers(struct worker_pool *pool) 6383 { 6384 struct worker *worker; 6385 6386 lockdep_assert_held(&wq_pool_attach_mutex); 6387 6388 /* 6389 * Restore CPU affinity of all workers. As all idle workers should 6390 * be on the run-queue of the associated CPU before any local 6391 * wake-ups for concurrency management happen, restore CPU affinity 6392 * of all workers first and then clear UNBOUND. As we're called 6393 * from CPU_ONLINE, the following shouldn't fail. 6394 */ 6395 for_each_pool_worker(worker, pool) { 6396 kthread_set_per_cpu(worker->task, pool->cpu); 6397 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 6398 pool_allowed_cpus(pool)) < 0); 6399 } 6400 6401 raw_spin_lock_irq(&pool->lock); 6402 6403 pool->flags &= ~POOL_DISASSOCIATED; 6404 6405 for_each_pool_worker(worker, pool) { 6406 unsigned int worker_flags = worker->flags; 6407 6408 /* 6409 * We want to clear UNBOUND but can't directly call 6410 * worker_clr_flags() or adjust nr_running. Atomically 6411 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 6412 * @worker will clear REBOUND using worker_clr_flags() when 6413 * it initiates the next execution cycle thus restoring 6414 * concurrency management. Note that when or whether 6415 * @worker clears REBOUND doesn't affect correctness. 6416 * 6417 * WRITE_ONCE() is necessary because @worker->flags may be 6418 * tested without holding any lock in 6419 * wq_worker_running(). Without it, NOT_RUNNING test may 6420 * fail incorrectly leading to premature concurrency 6421 * management operations. 6422 */ 6423 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 6424 worker_flags |= WORKER_REBOUND; 6425 worker_flags &= ~WORKER_UNBOUND; 6426 WRITE_ONCE(worker->flags, worker_flags); 6427 } 6428 6429 raw_spin_unlock_irq(&pool->lock); 6430 } 6431 6432 /** 6433 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 6434 * @pool: unbound pool of interest 6435 * @cpu: the CPU which is coming up 6436 * 6437 * An unbound pool may end up with a cpumask which doesn't have any online 6438 * CPUs. When a worker of such pool get scheduled, the scheduler resets 6439 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 6440 * online CPU before, cpus_allowed of all its workers should be restored. 6441 */ 6442 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 6443 { 6444 static cpumask_t cpumask; 6445 struct worker *worker; 6446 6447 lockdep_assert_held(&wq_pool_attach_mutex); 6448 6449 /* is @cpu allowed for @pool? */ 6450 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 6451 return; 6452 6453 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 6454 6455 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 6456 for_each_pool_worker(worker, pool) 6457 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 6458 } 6459 6460 int workqueue_prepare_cpu(unsigned int cpu) 6461 { 6462 struct worker_pool *pool; 6463 6464 for_each_cpu_worker_pool(pool, cpu) { 6465 if (pool->nr_workers) 6466 continue; 6467 if (!create_worker(pool)) 6468 return -ENOMEM; 6469 } 6470 return 0; 6471 } 6472 6473 int workqueue_online_cpu(unsigned int cpu) 6474 { 6475 struct worker_pool *pool; 6476 struct workqueue_struct *wq; 6477 int pi; 6478 6479 mutex_lock(&wq_pool_mutex); 6480 6481 for_each_pool(pool, pi) { 6482 /* BH pools aren't affected by hotplug */ 6483 if (pool->flags & POOL_BH) 6484 continue; 6485 6486 mutex_lock(&wq_pool_attach_mutex); 6487 if (pool->cpu == cpu) 6488 rebind_workers(pool); 6489 else if (pool->cpu < 0) 6490 restore_unbound_workers_cpumask(pool, cpu); 6491 mutex_unlock(&wq_pool_attach_mutex); 6492 } 6493 6494 /* update pod affinity of unbound workqueues */ 6495 list_for_each_entry(wq, &workqueues, list) { 6496 struct workqueue_attrs *attrs = wq->unbound_attrs; 6497 6498 if (attrs) { 6499 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 6500 int tcpu; 6501 6502 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 6503 wq_update_pod(wq, tcpu, cpu, true); 6504 6505 mutex_lock(&wq->mutex); 6506 wq_update_node_max_active(wq, -1); 6507 mutex_unlock(&wq->mutex); 6508 } 6509 } 6510 6511 mutex_unlock(&wq_pool_mutex); 6512 return 0; 6513 } 6514 6515 int workqueue_offline_cpu(unsigned int cpu) 6516 { 6517 struct workqueue_struct *wq; 6518 6519 /* unbinding per-cpu workers should happen on the local CPU */ 6520 if (WARN_ON(cpu != smp_processor_id())) 6521 return -1; 6522 6523 unbind_workers(cpu); 6524 6525 /* update pod affinity of unbound workqueues */ 6526 mutex_lock(&wq_pool_mutex); 6527 list_for_each_entry(wq, &workqueues, list) { 6528 struct workqueue_attrs *attrs = wq->unbound_attrs; 6529 6530 if (attrs) { 6531 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 6532 int tcpu; 6533 6534 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 6535 wq_update_pod(wq, tcpu, cpu, false); 6536 6537 mutex_lock(&wq->mutex); 6538 wq_update_node_max_active(wq, cpu); 6539 mutex_unlock(&wq->mutex); 6540 } 6541 } 6542 mutex_unlock(&wq_pool_mutex); 6543 6544 return 0; 6545 } 6546 6547 struct work_for_cpu { 6548 struct work_struct work; 6549 long (*fn)(void *); 6550 void *arg; 6551 long ret; 6552 }; 6553 6554 static void work_for_cpu_fn(struct work_struct *work) 6555 { 6556 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 6557 6558 wfc->ret = wfc->fn(wfc->arg); 6559 } 6560 6561 /** 6562 * work_on_cpu_key - run a function in thread context on a particular cpu 6563 * @cpu: the cpu to run on 6564 * @fn: the function to run 6565 * @arg: the function arg 6566 * @key: The lock class key for lock debugging purposes 6567 * 6568 * It is up to the caller to ensure that the cpu doesn't go offline. 6569 * The caller must not hold any locks which would prevent @fn from completing. 6570 * 6571 * Return: The value @fn returns. 6572 */ 6573 long work_on_cpu_key(int cpu, long (*fn)(void *), 6574 void *arg, struct lock_class_key *key) 6575 { 6576 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 6577 6578 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key); 6579 schedule_work_on(cpu, &wfc.work); 6580 flush_work(&wfc.work); 6581 destroy_work_on_stack(&wfc.work); 6582 return wfc.ret; 6583 } 6584 EXPORT_SYMBOL_GPL(work_on_cpu_key); 6585 6586 /** 6587 * work_on_cpu_safe_key - run a function in thread context on a particular cpu 6588 * @cpu: the cpu to run on 6589 * @fn: the function to run 6590 * @arg: the function argument 6591 * @key: The lock class key for lock debugging purposes 6592 * 6593 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold 6594 * any locks which would prevent @fn from completing. 6595 * 6596 * Return: The value @fn returns. 6597 */ 6598 long work_on_cpu_safe_key(int cpu, long (*fn)(void *), 6599 void *arg, struct lock_class_key *key) 6600 { 6601 long ret = -ENODEV; 6602 6603 cpus_read_lock(); 6604 if (cpu_online(cpu)) 6605 ret = work_on_cpu_key(cpu, fn, arg, key); 6606 cpus_read_unlock(); 6607 return ret; 6608 } 6609 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); 6610 #endif /* CONFIG_SMP */ 6611 6612 #ifdef CONFIG_FREEZER 6613 6614 /** 6615 * freeze_workqueues_begin - begin freezing workqueues 6616 * 6617 * Start freezing workqueues. After this function returns, all freezable 6618 * workqueues will queue new works to their inactive_works list instead of 6619 * pool->worklist. 6620 * 6621 * CONTEXT: 6622 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 6623 */ 6624 void freeze_workqueues_begin(void) 6625 { 6626 struct workqueue_struct *wq; 6627 6628 mutex_lock(&wq_pool_mutex); 6629 6630 WARN_ON_ONCE(workqueue_freezing); 6631 workqueue_freezing = true; 6632 6633 list_for_each_entry(wq, &workqueues, list) { 6634 mutex_lock(&wq->mutex); 6635 wq_adjust_max_active(wq); 6636 mutex_unlock(&wq->mutex); 6637 } 6638 6639 mutex_unlock(&wq_pool_mutex); 6640 } 6641 6642 /** 6643 * freeze_workqueues_busy - are freezable workqueues still busy? 6644 * 6645 * Check whether freezing is complete. This function must be called 6646 * between freeze_workqueues_begin() and thaw_workqueues(). 6647 * 6648 * CONTEXT: 6649 * Grabs and releases wq_pool_mutex. 6650 * 6651 * Return: 6652 * %true if some freezable workqueues are still busy. %false if freezing 6653 * is complete. 6654 */ 6655 bool freeze_workqueues_busy(void) 6656 { 6657 bool busy = false; 6658 struct workqueue_struct *wq; 6659 struct pool_workqueue *pwq; 6660 6661 mutex_lock(&wq_pool_mutex); 6662 6663 WARN_ON_ONCE(!workqueue_freezing); 6664 6665 list_for_each_entry(wq, &workqueues, list) { 6666 if (!(wq->flags & WQ_FREEZABLE)) 6667 continue; 6668 /* 6669 * nr_active is monotonically decreasing. It's safe 6670 * to peek without lock. 6671 */ 6672 rcu_read_lock(); 6673 for_each_pwq(pwq, wq) { 6674 WARN_ON_ONCE(pwq->nr_active < 0); 6675 if (pwq->nr_active) { 6676 busy = true; 6677 rcu_read_unlock(); 6678 goto out_unlock; 6679 } 6680 } 6681 rcu_read_unlock(); 6682 } 6683 out_unlock: 6684 mutex_unlock(&wq_pool_mutex); 6685 return busy; 6686 } 6687 6688 /** 6689 * thaw_workqueues - thaw workqueues 6690 * 6691 * Thaw workqueues. Normal queueing is restored and all collected 6692 * frozen works are transferred to their respective pool worklists. 6693 * 6694 * CONTEXT: 6695 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 6696 */ 6697 void thaw_workqueues(void) 6698 { 6699 struct workqueue_struct *wq; 6700 6701 mutex_lock(&wq_pool_mutex); 6702 6703 if (!workqueue_freezing) 6704 goto out_unlock; 6705 6706 workqueue_freezing = false; 6707 6708 /* restore max_active and repopulate worklist */ 6709 list_for_each_entry(wq, &workqueues, list) { 6710 mutex_lock(&wq->mutex); 6711 wq_adjust_max_active(wq); 6712 mutex_unlock(&wq->mutex); 6713 } 6714 6715 out_unlock: 6716 mutex_unlock(&wq_pool_mutex); 6717 } 6718 #endif /* CONFIG_FREEZER */ 6719 6720 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) 6721 { 6722 LIST_HEAD(ctxs); 6723 int ret = 0; 6724 struct workqueue_struct *wq; 6725 struct apply_wqattrs_ctx *ctx, *n; 6726 6727 lockdep_assert_held(&wq_pool_mutex); 6728 6729 list_for_each_entry(wq, &workqueues, list) { 6730 if (!(wq->flags & WQ_UNBOUND) || (wq->flags & __WQ_DESTROYING)) 6731 continue; 6732 6733 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); 6734 if (IS_ERR(ctx)) { 6735 ret = PTR_ERR(ctx); 6736 break; 6737 } 6738 6739 list_add_tail(&ctx->list, &ctxs); 6740 } 6741 6742 list_for_each_entry_safe(ctx, n, &ctxs, list) { 6743 if (!ret) 6744 apply_wqattrs_commit(ctx); 6745 apply_wqattrs_cleanup(ctx); 6746 } 6747 6748 if (!ret) { 6749 mutex_lock(&wq_pool_attach_mutex); 6750 cpumask_copy(wq_unbound_cpumask, unbound_cpumask); 6751 mutex_unlock(&wq_pool_attach_mutex); 6752 } 6753 return ret; 6754 } 6755 6756 /** 6757 * workqueue_unbound_exclude_cpumask - Exclude given CPUs from unbound cpumask 6758 * @exclude_cpumask: the cpumask to be excluded from wq_unbound_cpumask 6759 * 6760 * This function can be called from cpuset code to provide a set of isolated 6761 * CPUs that should be excluded from wq_unbound_cpumask. The caller must hold 6762 * either cpus_read_lock or cpus_write_lock. 6763 */ 6764 int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask) 6765 { 6766 cpumask_var_t cpumask; 6767 int ret = 0; 6768 6769 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 6770 return -ENOMEM; 6771 6772 lockdep_assert_cpus_held(); 6773 mutex_lock(&wq_pool_mutex); 6774 6775 /* Save the current isolated cpumask & export it via sysfs */ 6776 cpumask_copy(wq_isolated_cpumask, exclude_cpumask); 6777 6778 /* 6779 * If the operation fails, it will fall back to 6780 * wq_requested_unbound_cpumask which is initially set to 6781 * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) house keeping mask and rewritten 6782 * by any subsequent write to workqueue/cpumask sysfs file. 6783 */ 6784 if (!cpumask_andnot(cpumask, wq_requested_unbound_cpumask, exclude_cpumask)) 6785 cpumask_copy(cpumask, wq_requested_unbound_cpumask); 6786 if (!cpumask_equal(cpumask, wq_unbound_cpumask)) 6787 ret = workqueue_apply_unbound_cpumask(cpumask); 6788 6789 mutex_unlock(&wq_pool_mutex); 6790 free_cpumask_var(cpumask); 6791 return ret; 6792 } 6793 6794 static int parse_affn_scope(const char *val) 6795 { 6796 int i; 6797 6798 for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) { 6799 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i]))) 6800 return i; 6801 } 6802 return -EINVAL; 6803 } 6804 6805 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) 6806 { 6807 struct workqueue_struct *wq; 6808 int affn, cpu; 6809 6810 affn = parse_affn_scope(val); 6811 if (affn < 0) 6812 return affn; 6813 if (affn == WQ_AFFN_DFL) 6814 return -EINVAL; 6815 6816 cpus_read_lock(); 6817 mutex_lock(&wq_pool_mutex); 6818 6819 wq_affn_dfl = affn; 6820 6821 list_for_each_entry(wq, &workqueues, list) { 6822 for_each_online_cpu(cpu) { 6823 wq_update_pod(wq, cpu, cpu, true); 6824 } 6825 } 6826 6827 mutex_unlock(&wq_pool_mutex); 6828 cpus_read_unlock(); 6829 6830 return 0; 6831 } 6832 6833 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp) 6834 { 6835 return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]); 6836 } 6837 6838 static const struct kernel_param_ops wq_affn_dfl_ops = { 6839 .set = wq_affn_dfl_set, 6840 .get = wq_affn_dfl_get, 6841 }; 6842 6843 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644); 6844 6845 #ifdef CONFIG_SYSFS 6846 /* 6847 * Workqueues with WQ_SYSFS flag set is visible to userland via 6848 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 6849 * following attributes. 6850 * 6851 * per_cpu RO bool : whether the workqueue is per-cpu or unbound 6852 * max_active RW int : maximum number of in-flight work items 6853 * 6854 * Unbound workqueues have the following extra attributes. 6855 * 6856 * nice RW int : nice value of the workers 6857 * cpumask RW mask : bitmask of allowed CPUs for the workers 6858 * affinity_scope RW str : worker CPU affinity scope (cache, numa, none) 6859 * affinity_strict RW bool : worker CPU affinity is strict 6860 */ 6861 struct wq_device { 6862 struct workqueue_struct *wq; 6863 struct device dev; 6864 }; 6865 6866 static struct workqueue_struct *dev_to_wq(struct device *dev) 6867 { 6868 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 6869 6870 return wq_dev->wq; 6871 } 6872 6873 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 6874 char *buf) 6875 { 6876 struct workqueue_struct *wq = dev_to_wq(dev); 6877 6878 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 6879 } 6880 static DEVICE_ATTR_RO(per_cpu); 6881 6882 static ssize_t max_active_show(struct device *dev, 6883 struct device_attribute *attr, char *buf) 6884 { 6885 struct workqueue_struct *wq = dev_to_wq(dev); 6886 6887 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 6888 } 6889 6890 static ssize_t max_active_store(struct device *dev, 6891 struct device_attribute *attr, const char *buf, 6892 size_t count) 6893 { 6894 struct workqueue_struct *wq = dev_to_wq(dev); 6895 int val; 6896 6897 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 6898 return -EINVAL; 6899 6900 workqueue_set_max_active(wq, val); 6901 return count; 6902 } 6903 static DEVICE_ATTR_RW(max_active); 6904 6905 static struct attribute *wq_sysfs_attrs[] = { 6906 &dev_attr_per_cpu.attr, 6907 &dev_attr_max_active.attr, 6908 NULL, 6909 }; 6910 ATTRIBUTE_GROUPS(wq_sysfs); 6911 6912 static void apply_wqattrs_lock(void) 6913 { 6914 /* CPUs should stay stable across pwq creations and installations */ 6915 cpus_read_lock(); 6916 mutex_lock(&wq_pool_mutex); 6917 } 6918 6919 static void apply_wqattrs_unlock(void) 6920 { 6921 mutex_unlock(&wq_pool_mutex); 6922 cpus_read_unlock(); 6923 } 6924 6925 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 6926 char *buf) 6927 { 6928 struct workqueue_struct *wq = dev_to_wq(dev); 6929 int written; 6930 6931 mutex_lock(&wq->mutex); 6932 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 6933 mutex_unlock(&wq->mutex); 6934 6935 return written; 6936 } 6937 6938 /* prepare workqueue_attrs for sysfs store operations */ 6939 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 6940 { 6941 struct workqueue_attrs *attrs; 6942 6943 lockdep_assert_held(&wq_pool_mutex); 6944 6945 attrs = alloc_workqueue_attrs(); 6946 if (!attrs) 6947 return NULL; 6948 6949 copy_workqueue_attrs(attrs, wq->unbound_attrs); 6950 return attrs; 6951 } 6952 6953 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 6954 const char *buf, size_t count) 6955 { 6956 struct workqueue_struct *wq = dev_to_wq(dev); 6957 struct workqueue_attrs *attrs; 6958 int ret = -ENOMEM; 6959 6960 apply_wqattrs_lock(); 6961 6962 attrs = wq_sysfs_prep_attrs(wq); 6963 if (!attrs) 6964 goto out_unlock; 6965 6966 if (sscanf(buf, "%d", &attrs->nice) == 1 && 6967 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 6968 ret = apply_workqueue_attrs_locked(wq, attrs); 6969 else 6970 ret = -EINVAL; 6971 6972 out_unlock: 6973 apply_wqattrs_unlock(); 6974 free_workqueue_attrs(attrs); 6975 return ret ?: count; 6976 } 6977 6978 static ssize_t wq_cpumask_show(struct device *dev, 6979 struct device_attribute *attr, char *buf) 6980 { 6981 struct workqueue_struct *wq = dev_to_wq(dev); 6982 int written; 6983 6984 mutex_lock(&wq->mutex); 6985 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6986 cpumask_pr_args(wq->unbound_attrs->cpumask)); 6987 mutex_unlock(&wq->mutex); 6988 return written; 6989 } 6990 6991 static ssize_t wq_cpumask_store(struct device *dev, 6992 struct device_attribute *attr, 6993 const char *buf, size_t count) 6994 { 6995 struct workqueue_struct *wq = dev_to_wq(dev); 6996 struct workqueue_attrs *attrs; 6997 int ret = -ENOMEM; 6998 6999 apply_wqattrs_lock(); 7000 7001 attrs = wq_sysfs_prep_attrs(wq); 7002 if (!attrs) 7003 goto out_unlock; 7004 7005 ret = cpumask_parse(buf, attrs->cpumask); 7006 if (!ret) 7007 ret = apply_workqueue_attrs_locked(wq, attrs); 7008 7009 out_unlock: 7010 apply_wqattrs_unlock(); 7011 free_workqueue_attrs(attrs); 7012 return ret ?: count; 7013 } 7014 7015 static ssize_t wq_affn_scope_show(struct device *dev, 7016 struct device_attribute *attr, char *buf) 7017 { 7018 struct workqueue_struct *wq = dev_to_wq(dev); 7019 int written; 7020 7021 mutex_lock(&wq->mutex); 7022 if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL) 7023 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n", 7024 wq_affn_names[WQ_AFFN_DFL], 7025 wq_affn_names[wq_affn_dfl]); 7026 else 7027 written = scnprintf(buf, PAGE_SIZE, "%s\n", 7028 wq_affn_names[wq->unbound_attrs->affn_scope]); 7029 mutex_unlock(&wq->mutex); 7030 7031 return written; 7032 } 7033 7034 static ssize_t wq_affn_scope_store(struct device *dev, 7035 struct device_attribute *attr, 7036 const char *buf, size_t count) 7037 { 7038 struct workqueue_struct *wq = dev_to_wq(dev); 7039 struct workqueue_attrs *attrs; 7040 int affn, ret = -ENOMEM; 7041 7042 affn = parse_affn_scope(buf); 7043 if (affn < 0) 7044 return affn; 7045 7046 apply_wqattrs_lock(); 7047 attrs = wq_sysfs_prep_attrs(wq); 7048 if (attrs) { 7049 attrs->affn_scope = affn; 7050 ret = apply_workqueue_attrs_locked(wq, attrs); 7051 } 7052 apply_wqattrs_unlock(); 7053 free_workqueue_attrs(attrs); 7054 return ret ?: count; 7055 } 7056 7057 static ssize_t wq_affinity_strict_show(struct device *dev, 7058 struct device_attribute *attr, char *buf) 7059 { 7060 struct workqueue_struct *wq = dev_to_wq(dev); 7061 7062 return scnprintf(buf, PAGE_SIZE, "%d\n", 7063 wq->unbound_attrs->affn_strict); 7064 } 7065 7066 static ssize_t wq_affinity_strict_store(struct device *dev, 7067 struct device_attribute *attr, 7068 const char *buf, size_t count) 7069 { 7070 struct workqueue_struct *wq = dev_to_wq(dev); 7071 struct workqueue_attrs *attrs; 7072 int v, ret = -ENOMEM; 7073 7074 if (sscanf(buf, "%d", &v) != 1) 7075 return -EINVAL; 7076 7077 apply_wqattrs_lock(); 7078 attrs = wq_sysfs_prep_attrs(wq); 7079 if (attrs) { 7080 attrs->affn_strict = (bool)v; 7081 ret = apply_workqueue_attrs_locked(wq, attrs); 7082 } 7083 apply_wqattrs_unlock(); 7084 free_workqueue_attrs(attrs); 7085 return ret ?: count; 7086 } 7087 7088 static struct device_attribute wq_sysfs_unbound_attrs[] = { 7089 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 7090 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 7091 __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store), 7092 __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store), 7093 __ATTR_NULL, 7094 }; 7095 7096 static const struct bus_type wq_subsys = { 7097 .name = "workqueue", 7098 .dev_groups = wq_sysfs_groups, 7099 }; 7100 7101 /** 7102 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 7103 * @cpumask: the cpumask to set 7104 * 7105 * The low-level workqueues cpumask is a global cpumask that limits 7106 * the affinity of all unbound workqueues. This function check the @cpumask 7107 * and apply it to all unbound workqueues and updates all pwqs of them. 7108 * 7109 * Return: 0 - Success 7110 * -EINVAL - Invalid @cpumask 7111 * -ENOMEM - Failed to allocate memory for attrs or pwqs. 7112 */ 7113 static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 7114 { 7115 int ret = -EINVAL; 7116 7117 /* 7118 * Not excluding isolated cpus on purpose. 7119 * If the user wishes to include them, we allow that. 7120 */ 7121 cpumask_and(cpumask, cpumask, cpu_possible_mask); 7122 if (!cpumask_empty(cpumask)) { 7123 apply_wqattrs_lock(); 7124 cpumask_copy(wq_requested_unbound_cpumask, cpumask); 7125 if (cpumask_equal(cpumask, wq_unbound_cpumask)) { 7126 ret = 0; 7127 goto out_unlock; 7128 } 7129 7130 ret = workqueue_apply_unbound_cpumask(cpumask); 7131 7132 out_unlock: 7133 apply_wqattrs_unlock(); 7134 } 7135 7136 return ret; 7137 } 7138 7139 static ssize_t __wq_cpumask_show(struct device *dev, 7140 struct device_attribute *attr, char *buf, cpumask_var_t mask) 7141 { 7142 int written; 7143 7144 mutex_lock(&wq_pool_mutex); 7145 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); 7146 mutex_unlock(&wq_pool_mutex); 7147 7148 return written; 7149 } 7150 7151 static ssize_t wq_unbound_cpumask_show(struct device *dev, 7152 struct device_attribute *attr, char *buf) 7153 { 7154 return __wq_cpumask_show(dev, attr, buf, wq_unbound_cpumask); 7155 } 7156 7157 static ssize_t wq_requested_cpumask_show(struct device *dev, 7158 struct device_attribute *attr, char *buf) 7159 { 7160 return __wq_cpumask_show(dev, attr, buf, wq_requested_unbound_cpumask); 7161 } 7162 7163 static ssize_t wq_isolated_cpumask_show(struct device *dev, 7164 struct device_attribute *attr, char *buf) 7165 { 7166 return __wq_cpumask_show(dev, attr, buf, wq_isolated_cpumask); 7167 } 7168 7169 static ssize_t wq_unbound_cpumask_store(struct device *dev, 7170 struct device_attribute *attr, const char *buf, size_t count) 7171 { 7172 cpumask_var_t cpumask; 7173 int ret; 7174 7175 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 7176 return -ENOMEM; 7177 7178 ret = cpumask_parse(buf, cpumask); 7179 if (!ret) 7180 ret = workqueue_set_unbound_cpumask(cpumask); 7181 7182 free_cpumask_var(cpumask); 7183 return ret ? ret : count; 7184 } 7185 7186 static struct device_attribute wq_sysfs_cpumask_attrs[] = { 7187 __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 7188 wq_unbound_cpumask_store), 7189 __ATTR(cpumask_requested, 0444, wq_requested_cpumask_show, NULL), 7190 __ATTR(cpumask_isolated, 0444, wq_isolated_cpumask_show, NULL), 7191 __ATTR_NULL, 7192 }; 7193 7194 static int __init wq_sysfs_init(void) 7195 { 7196 struct device *dev_root; 7197 int err; 7198 7199 err = subsys_virtual_register(&wq_subsys, NULL); 7200 if (err) 7201 return err; 7202 7203 dev_root = bus_get_dev_root(&wq_subsys); 7204 if (dev_root) { 7205 struct device_attribute *attr; 7206 7207 for (attr = wq_sysfs_cpumask_attrs; attr->attr.name; attr++) { 7208 err = device_create_file(dev_root, attr); 7209 if (err) 7210 break; 7211 } 7212 put_device(dev_root); 7213 } 7214 return err; 7215 } 7216 core_initcall(wq_sysfs_init); 7217 7218 static void wq_device_release(struct device *dev) 7219 { 7220 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 7221 7222 kfree(wq_dev); 7223 } 7224 7225 /** 7226 * workqueue_sysfs_register - make a workqueue visible in sysfs 7227 * @wq: the workqueue to register 7228 * 7229 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 7230 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 7231 * which is the preferred method. 7232 * 7233 * Workqueue user should use this function directly iff it wants to apply 7234 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 7235 * apply_workqueue_attrs() may race against userland updating the 7236 * attributes. 7237 * 7238 * Return: 0 on success, -errno on failure. 7239 */ 7240 int workqueue_sysfs_register(struct workqueue_struct *wq) 7241 { 7242 struct wq_device *wq_dev; 7243 int ret; 7244 7245 /* 7246 * Adjusting max_active breaks ordering guarantee. Disallow exposing 7247 * ordered workqueues. 7248 */ 7249 if (WARN_ON(wq->flags & __WQ_ORDERED)) 7250 return -EINVAL; 7251 7252 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 7253 if (!wq_dev) 7254 return -ENOMEM; 7255 7256 wq_dev->wq = wq; 7257 wq_dev->dev.bus = &wq_subsys; 7258 wq_dev->dev.release = wq_device_release; 7259 dev_set_name(&wq_dev->dev, "%s", wq->name); 7260 7261 /* 7262 * unbound_attrs are created separately. Suppress uevent until 7263 * everything is ready. 7264 */ 7265 dev_set_uevent_suppress(&wq_dev->dev, true); 7266 7267 ret = device_register(&wq_dev->dev); 7268 if (ret) { 7269 put_device(&wq_dev->dev); 7270 wq->wq_dev = NULL; 7271 return ret; 7272 } 7273 7274 if (wq->flags & WQ_UNBOUND) { 7275 struct device_attribute *attr; 7276 7277 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 7278 ret = device_create_file(&wq_dev->dev, attr); 7279 if (ret) { 7280 device_unregister(&wq_dev->dev); 7281 wq->wq_dev = NULL; 7282 return ret; 7283 } 7284 } 7285 } 7286 7287 dev_set_uevent_suppress(&wq_dev->dev, false); 7288 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 7289 return 0; 7290 } 7291 7292 /** 7293 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 7294 * @wq: the workqueue to unregister 7295 * 7296 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 7297 */ 7298 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 7299 { 7300 struct wq_device *wq_dev = wq->wq_dev; 7301 7302 if (!wq->wq_dev) 7303 return; 7304 7305 wq->wq_dev = NULL; 7306 device_unregister(&wq_dev->dev); 7307 } 7308 #else /* CONFIG_SYSFS */ 7309 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 7310 #endif /* CONFIG_SYSFS */ 7311 7312 /* 7313 * Workqueue watchdog. 7314 * 7315 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal 7316 * flush dependency, a concurrency managed work item which stays RUNNING 7317 * indefinitely. Workqueue stalls can be very difficult to debug as the 7318 * usual warning mechanisms don't trigger and internal workqueue state is 7319 * largely opaque. 7320 * 7321 * Workqueue watchdog monitors all worker pools periodically and dumps 7322 * state if some pools failed to make forward progress for a while where 7323 * forward progress is defined as the first item on ->worklist changing. 7324 * 7325 * This mechanism is controlled through the kernel parameter 7326 * "workqueue.watchdog_thresh" which can be updated at runtime through the 7327 * corresponding sysfs parameter file. 7328 */ 7329 #ifdef CONFIG_WQ_WATCHDOG 7330 7331 static unsigned long wq_watchdog_thresh = 30; 7332 static struct timer_list wq_watchdog_timer; 7333 7334 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 7335 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 7336 7337 /* 7338 * Show workers that might prevent the processing of pending work items. 7339 * The only candidates are CPU-bound workers in the running state. 7340 * Pending work items should be handled by another idle worker 7341 * in all other situations. 7342 */ 7343 static void show_cpu_pool_hog(struct worker_pool *pool) 7344 { 7345 struct worker *worker; 7346 unsigned long irq_flags; 7347 int bkt; 7348 7349 raw_spin_lock_irqsave(&pool->lock, irq_flags); 7350 7351 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 7352 if (task_is_running(worker->task)) { 7353 /* 7354 * Defer printing to avoid deadlocks in console 7355 * drivers that queue work while holding locks 7356 * also taken in their write paths. 7357 */ 7358 printk_deferred_enter(); 7359 7360 pr_info("pool %d:\n", pool->id); 7361 sched_show_task(worker->task); 7362 7363 printk_deferred_exit(); 7364 } 7365 } 7366 7367 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); 7368 } 7369 7370 static void show_cpu_pools_hogs(void) 7371 { 7372 struct worker_pool *pool; 7373 int pi; 7374 7375 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 7376 7377 rcu_read_lock(); 7378 7379 for_each_pool(pool, pi) { 7380 if (pool->cpu_stall) 7381 show_cpu_pool_hog(pool); 7382 7383 } 7384 7385 rcu_read_unlock(); 7386 } 7387 7388 static void wq_watchdog_reset_touched(void) 7389 { 7390 int cpu; 7391 7392 wq_watchdog_touched = jiffies; 7393 for_each_possible_cpu(cpu) 7394 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 7395 } 7396 7397 static void wq_watchdog_timer_fn(struct timer_list *unused) 7398 { 7399 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 7400 bool lockup_detected = false; 7401 bool cpu_pool_stall = false; 7402 unsigned long now = jiffies; 7403 struct worker_pool *pool; 7404 int pi; 7405 7406 if (!thresh) 7407 return; 7408 7409 rcu_read_lock(); 7410 7411 for_each_pool(pool, pi) { 7412 unsigned long pool_ts, touched, ts; 7413 7414 pool->cpu_stall = false; 7415 if (list_empty(&pool->worklist)) 7416 continue; 7417 7418 /* 7419 * If a virtual machine is stopped by the host it can look to 7420 * the watchdog like a stall. 7421 */ 7422 kvm_check_and_clear_guest_paused(); 7423 7424 /* get the latest of pool and touched timestamps */ 7425 if (pool->cpu >= 0) 7426 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 7427 else 7428 touched = READ_ONCE(wq_watchdog_touched); 7429 pool_ts = READ_ONCE(pool->watchdog_ts); 7430 7431 if (time_after(pool_ts, touched)) 7432 ts = pool_ts; 7433 else 7434 ts = touched; 7435 7436 /* did we stall? */ 7437 if (time_after(now, ts + thresh)) { 7438 lockup_detected = true; 7439 if (pool->cpu >= 0 && !(pool->flags & POOL_BH)) { 7440 pool->cpu_stall = true; 7441 cpu_pool_stall = true; 7442 } 7443 pr_emerg("BUG: workqueue lockup - pool"); 7444 pr_cont_pool_info(pool); 7445 pr_cont(" stuck for %us!\n", 7446 jiffies_to_msecs(now - pool_ts) / 1000); 7447 } 7448 7449 7450 } 7451 7452 rcu_read_unlock(); 7453 7454 if (lockup_detected) 7455 show_all_workqueues(); 7456 7457 if (cpu_pool_stall) 7458 show_cpu_pools_hogs(); 7459 7460 wq_watchdog_reset_touched(); 7461 mod_timer(&wq_watchdog_timer, jiffies + thresh); 7462 } 7463 7464 notrace void wq_watchdog_touch(int cpu) 7465 { 7466 if (cpu >= 0) 7467 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 7468 7469 wq_watchdog_touched = jiffies; 7470 } 7471 7472 static void wq_watchdog_set_thresh(unsigned long thresh) 7473 { 7474 wq_watchdog_thresh = 0; 7475 del_timer_sync(&wq_watchdog_timer); 7476 7477 if (thresh) { 7478 wq_watchdog_thresh = thresh; 7479 wq_watchdog_reset_touched(); 7480 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ); 7481 } 7482 } 7483 7484 static int wq_watchdog_param_set_thresh(const char *val, 7485 const struct kernel_param *kp) 7486 { 7487 unsigned long thresh; 7488 int ret; 7489 7490 ret = kstrtoul(val, 0, &thresh); 7491 if (ret) 7492 return ret; 7493 7494 if (system_wq) 7495 wq_watchdog_set_thresh(thresh); 7496 else 7497 wq_watchdog_thresh = thresh; 7498 7499 return 0; 7500 } 7501 7502 static const struct kernel_param_ops wq_watchdog_thresh_ops = { 7503 .set = wq_watchdog_param_set_thresh, 7504 .get = param_get_ulong, 7505 }; 7506 7507 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh, 7508 0644); 7509 7510 static void wq_watchdog_init(void) 7511 { 7512 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 7513 wq_watchdog_set_thresh(wq_watchdog_thresh); 7514 } 7515 7516 #else /* CONFIG_WQ_WATCHDOG */ 7517 7518 static inline void wq_watchdog_init(void) { } 7519 7520 #endif /* CONFIG_WQ_WATCHDOG */ 7521 7522 static void bh_pool_kick_normal(struct irq_work *irq_work) 7523 { 7524 raise_softirq_irqoff(TASKLET_SOFTIRQ); 7525 } 7526 7527 static void bh_pool_kick_highpri(struct irq_work *irq_work) 7528 { 7529 raise_softirq_irqoff(HI_SOFTIRQ); 7530 } 7531 7532 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask) 7533 { 7534 if (!cpumask_intersects(wq_unbound_cpumask, mask)) { 7535 pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n", 7536 cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask)); 7537 return; 7538 } 7539 7540 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask); 7541 } 7542 7543 static void __init init_cpu_worker_pool(struct worker_pool *pool, int cpu, int nice) 7544 { 7545 BUG_ON(init_worker_pool(pool)); 7546 pool->cpu = cpu; 7547 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 7548 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); 7549 pool->attrs->nice = nice; 7550 pool->attrs->affn_strict = true; 7551 pool->node = cpu_to_node(cpu); 7552 7553 /* alloc pool ID */ 7554 mutex_lock(&wq_pool_mutex); 7555 BUG_ON(worker_pool_assign_id(pool)); 7556 mutex_unlock(&wq_pool_mutex); 7557 } 7558 7559 /** 7560 * workqueue_init_early - early init for workqueue subsystem 7561 * 7562 * This is the first step of three-staged workqueue subsystem initialization and 7563 * invoked as soon as the bare basics - memory allocation, cpumasks and idr are 7564 * up. It sets up all the data structures and system workqueues and allows early 7565 * boot code to create workqueues and queue/cancel work items. Actual work item 7566 * execution starts only after kthreads can be created and scheduled right 7567 * before early initcalls. 7568 */ 7569 void __init workqueue_init_early(void) 7570 { 7571 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 7572 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 7573 void (*irq_work_fns[2])(struct irq_work *) = { bh_pool_kick_normal, 7574 bh_pool_kick_highpri }; 7575 int i, cpu; 7576 7577 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 7578 7579 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 7580 BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL)); 7581 BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL)); 7582 7583 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); 7584 restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ)); 7585 restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN)); 7586 if (!cpumask_empty(&wq_cmdline_cpumask)) 7587 restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask); 7588 7589 cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask); 7590 7591 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 7592 7593 wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 7594 BUG_ON(!wq_update_pod_attrs_buf); 7595 7596 /* 7597 * If nohz_full is enabled, set power efficient workqueue as unbound. 7598 * This allows workqueue items to be moved to HK CPUs. 7599 */ 7600 if (housekeeping_enabled(HK_TYPE_TICK)) 7601 wq_power_efficient = true; 7602 7603 /* initialize WQ_AFFN_SYSTEM pods */ 7604 pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 7605 pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); 7606 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 7607 BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); 7608 7609 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); 7610 7611 pt->nr_pods = 1; 7612 cpumask_copy(pt->pod_cpus[0], cpu_possible_mask); 7613 pt->pod_node[0] = NUMA_NO_NODE; 7614 pt->cpu_pod[0] = 0; 7615 7616 /* initialize BH and CPU pools */ 7617 for_each_possible_cpu(cpu) { 7618 struct worker_pool *pool; 7619 7620 i = 0; 7621 for_each_bh_worker_pool(pool, cpu) { 7622 init_cpu_worker_pool(pool, cpu, std_nice[i]); 7623 pool->flags |= POOL_BH; 7624 init_irq_work(bh_pool_irq_work(pool), irq_work_fns[i]); 7625 i++; 7626 } 7627 7628 i = 0; 7629 for_each_cpu_worker_pool(pool, cpu) 7630 init_cpu_worker_pool(pool, cpu, std_nice[i++]); 7631 } 7632 7633 /* create default unbound and ordered wq attrs */ 7634 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 7635 struct workqueue_attrs *attrs; 7636 7637 BUG_ON(!(attrs = alloc_workqueue_attrs())); 7638 attrs->nice = std_nice[i]; 7639 unbound_std_wq_attrs[i] = attrs; 7640 7641 /* 7642 * An ordered wq should have only one pwq as ordering is 7643 * guaranteed by max_active which is enforced by pwqs. 7644 */ 7645 BUG_ON(!(attrs = alloc_workqueue_attrs())); 7646 attrs->nice = std_nice[i]; 7647 attrs->ordered = true; 7648 ordered_wq_attrs[i] = attrs; 7649 } 7650 7651 system_wq = alloc_workqueue("events", 0, 0); 7652 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 7653 system_long_wq = alloc_workqueue("events_long", 0, 0); 7654 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 7655 WQ_MAX_ACTIVE); 7656 system_freezable_wq = alloc_workqueue("events_freezable", 7657 WQ_FREEZABLE, 0); 7658 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 7659 WQ_POWER_EFFICIENT, 0); 7660 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_pwr_efficient", 7661 WQ_FREEZABLE | WQ_POWER_EFFICIENT, 7662 0); 7663 system_bh_wq = alloc_workqueue("events_bh", WQ_BH, 0); 7664 system_bh_highpri_wq = alloc_workqueue("events_bh_highpri", 7665 WQ_BH | WQ_HIGHPRI, 0); 7666 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 7667 !system_unbound_wq || !system_freezable_wq || 7668 !system_power_efficient_wq || 7669 !system_freezable_power_efficient_wq || 7670 !system_bh_wq || !system_bh_highpri_wq); 7671 } 7672 7673 static void __init wq_cpu_intensive_thresh_init(void) 7674 { 7675 unsigned long thresh; 7676 unsigned long bogo; 7677 7678 pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); 7679 BUG_ON(IS_ERR(pwq_release_worker)); 7680 7681 /* if the user set it to a specific value, keep it */ 7682 if (wq_cpu_intensive_thresh_us != ULONG_MAX) 7683 return; 7684 7685 /* 7686 * The default of 10ms is derived from the fact that most modern (as of 7687 * 2023) processors can do a lot in 10ms and that it's just below what 7688 * most consider human-perceivable. However, the kernel also runs on a 7689 * lot slower CPUs including microcontrollers where the threshold is way 7690 * too low. 7691 * 7692 * Let's scale up the threshold upto 1 second if BogoMips is below 4000. 7693 * This is by no means accurate but it doesn't have to be. The mechanism 7694 * is still useful even when the threshold is fully scaled up. Also, as 7695 * the reports would usually be applicable to everyone, some machines 7696 * operating on longer thresholds won't significantly diminish their 7697 * usefulness. 7698 */ 7699 thresh = 10 * USEC_PER_MSEC; 7700 7701 /* see init/calibrate.c for lpj -> BogoMIPS calculation */ 7702 bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); 7703 if (bogo < 4000) 7704 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); 7705 7706 pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", 7707 loops_per_jiffy, bogo, thresh); 7708 7709 wq_cpu_intensive_thresh_us = thresh; 7710 } 7711 7712 /** 7713 * workqueue_init - bring workqueue subsystem fully online 7714 * 7715 * This is the second step of three-staged workqueue subsystem initialization 7716 * and invoked as soon as kthreads can be created and scheduled. Workqueues have 7717 * been created and work items queued on them, but there are no kworkers 7718 * executing the work items yet. Populate the worker pools with the initial 7719 * workers and enable future kworker creations. 7720 */ 7721 void __init workqueue_init(void) 7722 { 7723 struct workqueue_struct *wq; 7724 struct worker_pool *pool; 7725 int cpu, bkt; 7726 7727 wq_cpu_intensive_thresh_init(); 7728 7729 mutex_lock(&wq_pool_mutex); 7730 7731 /* 7732 * Per-cpu pools created earlier could be missing node hint. Fix them 7733 * up. Also, create a rescuer for workqueues that requested it. 7734 */ 7735 for_each_possible_cpu(cpu) { 7736 for_each_bh_worker_pool(pool, cpu) 7737 pool->node = cpu_to_node(cpu); 7738 for_each_cpu_worker_pool(pool, cpu) 7739 pool->node = cpu_to_node(cpu); 7740 } 7741 7742 list_for_each_entry(wq, &workqueues, list) { 7743 WARN(init_rescuer(wq), 7744 "workqueue: failed to create early rescuer for %s", 7745 wq->name); 7746 } 7747 7748 mutex_unlock(&wq_pool_mutex); 7749 7750 /* 7751 * Create the initial workers. A BH pool has one pseudo worker that 7752 * represents the shared BH execution context and thus doesn't get 7753 * affected by hotplug events. Create the BH pseudo workers for all 7754 * possible CPUs here. 7755 */ 7756 for_each_possible_cpu(cpu) 7757 for_each_bh_worker_pool(pool, cpu) 7758 BUG_ON(!create_worker(pool)); 7759 7760 for_each_online_cpu(cpu) { 7761 for_each_cpu_worker_pool(pool, cpu) { 7762 pool->flags &= ~POOL_DISASSOCIATED; 7763 BUG_ON(!create_worker(pool)); 7764 } 7765 } 7766 7767 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 7768 BUG_ON(!create_worker(pool)); 7769 7770 wq_online = true; 7771 wq_watchdog_init(); 7772 } 7773 7774 /* 7775 * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to 7776 * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique 7777 * and consecutive pod ID. The rest of @pt is initialized accordingly. 7778 */ 7779 static void __init init_pod_type(struct wq_pod_type *pt, 7780 bool (*cpus_share_pod)(int, int)) 7781 { 7782 int cur, pre, cpu, pod; 7783 7784 pt->nr_pods = 0; 7785 7786 /* init @pt->cpu_pod[] according to @cpus_share_pod() */ 7787 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 7788 BUG_ON(!pt->cpu_pod); 7789 7790 for_each_possible_cpu(cur) { 7791 for_each_possible_cpu(pre) { 7792 if (pre >= cur) { 7793 pt->cpu_pod[cur] = pt->nr_pods++; 7794 break; 7795 } 7796 if (cpus_share_pod(cur, pre)) { 7797 pt->cpu_pod[cur] = pt->cpu_pod[pre]; 7798 break; 7799 } 7800 } 7801 } 7802 7803 /* init the rest to match @pt->cpu_pod[] */ 7804 pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 7805 pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL); 7806 BUG_ON(!pt->pod_cpus || !pt->pod_node); 7807 7808 for (pod = 0; pod < pt->nr_pods; pod++) 7809 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL)); 7810 7811 for_each_possible_cpu(cpu) { 7812 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]); 7813 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu); 7814 } 7815 } 7816 7817 static bool __init cpus_dont_share(int cpu0, int cpu1) 7818 { 7819 return false; 7820 } 7821 7822 static bool __init cpus_share_smt(int cpu0, int cpu1) 7823 { 7824 #ifdef CONFIG_SCHED_SMT 7825 return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1)); 7826 #else 7827 return false; 7828 #endif 7829 } 7830 7831 static bool __init cpus_share_numa(int cpu0, int cpu1) 7832 { 7833 return cpu_to_node(cpu0) == cpu_to_node(cpu1); 7834 } 7835 7836 /** 7837 * workqueue_init_topology - initialize CPU pods for unbound workqueues 7838 * 7839 * This is the third step of three-staged workqueue subsystem initialization and 7840 * invoked after SMP and topology information are fully initialized. It 7841 * initializes the unbound CPU pods accordingly. 7842 */ 7843 void __init workqueue_init_topology(void) 7844 { 7845 struct workqueue_struct *wq; 7846 int cpu; 7847 7848 init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); 7849 init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); 7850 init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); 7851 init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); 7852 7853 wq_topo_initialized = true; 7854 7855 mutex_lock(&wq_pool_mutex); 7856 7857 /* 7858 * Workqueues allocated earlier would have all CPUs sharing the default 7859 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU 7860 * combinations to apply per-pod sharing. 7861 */ 7862 list_for_each_entry(wq, &workqueues, list) { 7863 for_each_online_cpu(cpu) 7864 wq_update_pod(wq, cpu, cpu, true); 7865 if (wq->flags & WQ_UNBOUND) { 7866 mutex_lock(&wq->mutex); 7867 wq_update_node_max_active(wq, -1); 7868 mutex_unlock(&wq->mutex); 7869 } 7870 } 7871 7872 mutex_unlock(&wq_pool_mutex); 7873 } 7874 7875 void __warn_flushing_systemwide_wq(void) 7876 { 7877 pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n"); 7878 dump_stack(); 7879 } 7880 EXPORT_SYMBOL(__warn_flushing_systemwide_wq); 7881 7882 static int __init workqueue_unbound_cpus_setup(char *str) 7883 { 7884 if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) { 7885 cpumask_clear(&wq_cmdline_cpumask); 7886 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n"); 7887 } 7888 7889 return 1; 7890 } 7891 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup); 7892