1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/workqueue.c - generic async execution with shared worker pool 4 * 5 * Copyright (C) 2002 Ingo Molnar 6 * 7 * Derived from the taskqueue/keventd code by: 8 * David Woodhouse <dwmw2@infradead.org> 9 * Andrew Morton 10 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 11 * Theodore Ts'o <tytso@mit.edu> 12 * 13 * Made to use alloc_percpu by Christoph Lameter. 14 * 15 * Copyright (C) 2010 SUSE Linux Products GmbH 16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 17 * 18 * This is the generic async execution mechanism. Work items as are 19 * executed in process context. The worker pool is shared and 20 * automatically managed. There are two worker pools for each CPU (one for 21 * normal work items and the other for high priority ones) and some extra 22 * pools for workqueues which are not bound to any specific CPU - the 23 * number of these backing pools is dynamic. 24 * 25 * Please read Documentation/core-api/workqueue.rst for details. 26 */ 27 28 #include <linux/export.h> 29 #include <linux/kernel.h> 30 #include <linux/sched.h> 31 #include <linux/init.h> 32 #include <linux/signal.h> 33 #include <linux/completion.h> 34 #include <linux/workqueue.h> 35 #include <linux/slab.h> 36 #include <linux/cpu.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/hardirq.h> 40 #include <linux/mempolicy.h> 41 #include <linux/freezer.h> 42 #include <linux/debug_locks.h> 43 #include <linux/lockdep.h> 44 #include <linux/idr.h> 45 #include <linux/jhash.h> 46 #include <linux/hashtable.h> 47 #include <linux/rculist.h> 48 #include <linux/nodemask.h> 49 #include <linux/moduleparam.h> 50 #include <linux/uaccess.h> 51 #include <linux/sched/isolation.h> 52 #include <linux/sched/debug.h> 53 #include <linux/nmi.h> 54 #include <linux/kvm_para.h> 55 #include <linux/delay.h> 56 57 #include "workqueue_internal.h" 58 59 enum { 60 /* 61 * worker_pool flags 62 * 63 * A bound pool is either associated or disassociated with its CPU. 64 * While associated (!DISASSOCIATED), all workers are bound to the 65 * CPU and none has %WORKER_UNBOUND set and concurrency management 66 * is in effect. 67 * 68 * While DISASSOCIATED, the cpu may be offline and all workers have 69 * %WORKER_UNBOUND set and concurrency management disabled, and may 70 * be executing on any CPU. The pool behaves as an unbound one. 71 * 72 * Note that DISASSOCIATED should be flipped only while holding 73 * wq_pool_attach_mutex to avoid changing binding state while 74 * worker_attach_to_pool() is in progress. 75 */ 76 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ 77 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 78 79 /* worker flags */ 80 WORKER_DIE = 1 << 1, /* die die die */ 81 WORKER_IDLE = 1 << 2, /* is idle */ 82 WORKER_PREP = 1 << 3, /* preparing to run works */ 83 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 84 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 85 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 86 87 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 88 WORKER_UNBOUND | WORKER_REBOUND, 89 90 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 91 92 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 93 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 94 95 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 96 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 97 98 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 99 /* call for help after 10ms 100 (min two ticks) */ 101 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 102 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 103 104 /* 105 * Rescue workers are used only on emergencies and shared by 106 * all cpus. Give MIN_NICE. 107 */ 108 RESCUER_NICE_LEVEL = MIN_NICE, 109 HIGHPRI_NICE_LEVEL = MIN_NICE, 110 111 WQ_NAME_LEN = 24, 112 }; 113 114 /* 115 * Structure fields follow one of the following exclusion rules. 116 * 117 * I: Modifiable by initialization/destruction paths and read-only for 118 * everyone else. 119 * 120 * P: Preemption protected. Disabling preemption is enough and should 121 * only be modified and accessed from the local cpu. 122 * 123 * L: pool->lock protected. Access with pool->lock held. 124 * 125 * K: Only modified by worker while holding pool->lock. Can be safely read by 126 * self, while holding pool->lock or from IRQ context if %current is the 127 * kworker. 128 * 129 * S: Only modified by worker self. 130 * 131 * A: wq_pool_attach_mutex protected. 132 * 133 * PL: wq_pool_mutex protected. 134 * 135 * PR: wq_pool_mutex protected for writes. RCU protected for reads. 136 * 137 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 138 * 139 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 140 * RCU for reads. 141 * 142 * WQ: wq->mutex protected. 143 * 144 * WR: wq->mutex protected for writes. RCU protected for reads. 145 * 146 * MD: wq_mayday_lock protected. 147 * 148 * WD: Used internally by the watchdog. 149 */ 150 151 /* struct worker is defined in workqueue_internal.h */ 152 153 struct worker_pool { 154 raw_spinlock_t lock; /* the pool lock */ 155 int cpu; /* I: the associated cpu */ 156 int node; /* I: the associated node ID */ 157 int id; /* I: pool ID */ 158 unsigned int flags; /* L: flags */ 159 160 unsigned long watchdog_ts; /* L: watchdog timestamp */ 161 bool cpu_stall; /* WD: stalled cpu bound pool */ 162 163 /* 164 * The counter is incremented in a process context on the associated CPU 165 * w/ preemption disabled, and decremented or reset in the same context 166 * but w/ pool->lock held. The readers grab pool->lock and are 167 * guaranteed to see if the counter reached zero. 168 */ 169 int nr_running; 170 171 struct list_head worklist; /* L: list of pending works */ 172 173 int nr_workers; /* L: total number of workers */ 174 int nr_idle; /* L: currently idle workers */ 175 176 struct list_head idle_list; /* L: list of idle workers */ 177 struct timer_list idle_timer; /* L: worker idle timeout */ 178 struct work_struct idle_cull_work; /* L: worker idle cleanup */ 179 180 struct timer_list mayday_timer; /* L: SOS timer for workers */ 181 182 /* a workers is either on busy_hash or idle_list, or the manager */ 183 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 184 /* L: hash of busy workers */ 185 186 struct worker *manager; /* L: purely informational */ 187 struct list_head workers; /* A: attached workers */ 188 struct list_head dying_workers; /* A: workers about to die */ 189 struct completion *detach_completion; /* all workers detached */ 190 191 struct ida worker_ida; /* worker IDs for task name */ 192 193 struct workqueue_attrs *attrs; /* I: worker attributes */ 194 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 195 int refcnt; /* PL: refcnt for unbound pools */ 196 197 /* 198 * Destruction of pool is RCU protected to allow dereferences 199 * from get_work_pool(). 200 */ 201 struct rcu_head rcu; 202 }; 203 204 /* 205 * Per-pool_workqueue statistics. These can be monitored using 206 * tools/workqueue/wq_monitor.py. 207 */ 208 enum pool_workqueue_stats { 209 PWQ_STAT_STARTED, /* work items started execution */ 210 PWQ_STAT_COMPLETED, /* work items completed execution */ 211 PWQ_STAT_CPU_TIME, /* total CPU time consumed */ 212 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */ 213 PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */ 214 PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */ 215 PWQ_STAT_MAYDAY, /* maydays to rescuer */ 216 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 217 218 PWQ_NR_STATS, 219 }; 220 221 /* 222 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 223 * of work_struct->data are used for flags and the remaining high bits 224 * point to the pwq; thus, pwqs need to be aligned at two's power of the 225 * number of flag bits. 226 */ 227 struct pool_workqueue { 228 struct worker_pool *pool; /* I: the associated pool */ 229 struct workqueue_struct *wq; /* I: the owning workqueue */ 230 int work_color; /* L: current color */ 231 int flush_color; /* L: flushing color */ 232 int refcnt; /* L: reference count */ 233 int nr_in_flight[WORK_NR_COLORS]; 234 /* L: nr of in_flight works */ 235 236 /* 237 * nr_active management and WORK_STRUCT_INACTIVE: 238 * 239 * When pwq->nr_active >= max_active, new work item is queued to 240 * pwq->inactive_works instead of pool->worklist and marked with 241 * WORK_STRUCT_INACTIVE. 242 * 243 * All work items marked with WORK_STRUCT_INACTIVE do not participate 244 * in pwq->nr_active and all work items in pwq->inactive_works are 245 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE 246 * work items are in pwq->inactive_works. Some of them are ready to 247 * run in pool->worklist or worker->scheduled. Those work itmes are 248 * only struct wq_barrier which is used for flush_work() and should 249 * not participate in pwq->nr_active. For non-barrier work item, it 250 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. 251 */ 252 int nr_active; /* L: nr of active works */ 253 int max_active; /* L: max active works */ 254 struct list_head inactive_works; /* L: inactive works */ 255 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 256 struct list_head mayday_node; /* MD: node on wq->maydays */ 257 258 u64 stats[PWQ_NR_STATS]; 259 260 /* 261 * Release of unbound pwq is punted to a kthread_worker. See put_pwq() 262 * and pwq_release_workfn() for details. pool_workqueue itself is also 263 * RCU protected so that the first pwq can be determined without 264 * grabbing wq->mutex. 265 */ 266 struct kthread_work release_work; 267 struct rcu_head rcu; 268 } __aligned(1 << WORK_STRUCT_FLAG_BITS); 269 270 /* 271 * Structure used to wait for workqueue flush. 272 */ 273 struct wq_flusher { 274 struct list_head list; /* WQ: list of flushers */ 275 int flush_color; /* WQ: flush color waiting for */ 276 struct completion done; /* flush completion */ 277 }; 278 279 struct wq_device; 280 281 /* 282 * The externally visible workqueue. It relays the issued work items to 283 * the appropriate worker_pool through its pool_workqueues. 284 */ 285 struct workqueue_struct { 286 struct list_head pwqs; /* WR: all pwqs of this wq */ 287 struct list_head list; /* PR: list of all workqueues */ 288 289 struct mutex mutex; /* protects this wq */ 290 int work_color; /* WQ: current work color */ 291 int flush_color; /* WQ: current flush color */ 292 atomic_t nr_pwqs_to_flush; /* flush in progress */ 293 struct wq_flusher *first_flusher; /* WQ: first flusher */ 294 struct list_head flusher_queue; /* WQ: flush waiters */ 295 struct list_head flusher_overflow; /* WQ: flush overflow list */ 296 297 struct list_head maydays; /* MD: pwqs requesting rescue */ 298 struct worker *rescuer; /* MD: rescue worker */ 299 300 int nr_drainers; /* WQ: drain in progress */ 301 int saved_max_active; /* WQ: saved pwq max_active */ 302 303 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 304 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ 305 306 #ifdef CONFIG_SYSFS 307 struct wq_device *wq_dev; /* I: for sysfs interface */ 308 #endif 309 #ifdef CONFIG_LOCKDEP 310 char *lock_name; 311 struct lock_class_key key; 312 struct lockdep_map lockdep_map; 313 #endif 314 char name[WQ_NAME_LEN]; /* I: workqueue name */ 315 316 /* 317 * Destruction of workqueue_struct is RCU protected to allow walking 318 * the workqueues list without grabbing wq_pool_mutex. 319 * This is used to dump all workqueues from sysrq. 320 */ 321 struct rcu_head rcu; 322 323 /* hot fields used during command issue, aligned to cacheline */ 324 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 325 struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ 326 }; 327 328 static struct kmem_cache *pwq_cache; 329 330 /* 331 * Each pod type describes how CPUs should be grouped for unbound workqueues. 332 * See the comment above workqueue_attrs->affn_scope. 333 */ 334 struct wq_pod_type { 335 int nr_pods; /* number of pods */ 336 cpumask_var_t *pod_cpus; /* pod -> cpus */ 337 int *pod_node; /* pod -> node */ 338 int *cpu_pod; /* cpu -> pod */ 339 }; 340 341 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; 342 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; 343 344 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { 345 [WQ_AFFN_DFL] = "default", 346 [WQ_AFFN_CPU] = "cpu", 347 [WQ_AFFN_SMT] = "smt", 348 [WQ_AFFN_CACHE] = "cache", 349 [WQ_AFFN_NUMA] = "numa", 350 [WQ_AFFN_SYSTEM] = "system", 351 }; 352 353 /* 354 * Per-cpu work items which run for longer than the following threshold are 355 * automatically considered CPU intensive and excluded from concurrency 356 * management to prevent them from noticeably delaying other per-cpu work items. 357 * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. 358 * The actual value is initialized in wq_cpu_intensive_thresh_init(). 359 */ 360 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; 361 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); 362 363 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 364 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 365 module_param_named(power_efficient, wq_power_efficient, bool, 0444); 366 367 static bool wq_online; /* can kworkers be created yet? */ 368 369 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ 370 static struct workqueue_attrs *wq_update_pod_attrs_buf; 371 372 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 373 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ 374 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 375 /* wait for manager to go away */ 376 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait); 377 378 static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 379 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 380 381 /* PL&A: allowable cpus for unbound wqs and work items */ 382 static cpumask_var_t wq_unbound_cpumask; 383 384 /* PL: user requested unbound cpumask via sysfs */ 385 static cpumask_var_t wq_requested_unbound_cpumask; 386 387 /* PL: isolated cpumask to be excluded from unbound cpumask */ 388 static cpumask_var_t wq_isolated_cpumask; 389 390 /* for further constrain wq_unbound_cpumask by cmdline parameter*/ 391 static struct cpumask wq_cmdline_cpumask __initdata; 392 393 /* CPU where unbound work was last round robin scheduled from this CPU */ 394 static DEFINE_PER_CPU(int, wq_rr_cpu_last); 395 396 /* 397 * Local execution of unbound work items is no longer guaranteed. The 398 * following always forces round-robin CPU selection on unbound work items 399 * to uncover usages which depend on it. 400 */ 401 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 402 static bool wq_debug_force_rr_cpu = true; 403 #else 404 static bool wq_debug_force_rr_cpu = false; 405 #endif 406 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); 407 408 /* the per-cpu worker pools */ 409 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); 410 411 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 412 413 /* PL: hash of all unbound pools keyed by pool->attrs */ 414 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 415 416 /* I: attributes used when instantiating standard unbound pools on demand */ 417 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 418 419 /* I: attributes used when instantiating ordered pools on demand */ 420 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 421 422 /* 423 * I: kthread_worker to release pwq's. pwq release needs to be bounced to a 424 * process context while holding a pool lock. Bounce to a dedicated kthread 425 * worker to avoid A-A deadlocks. 426 */ 427 static struct kthread_worker *pwq_release_worker __ro_after_init; 428 429 struct workqueue_struct *system_wq __ro_after_init; 430 EXPORT_SYMBOL(system_wq); 431 struct workqueue_struct *system_highpri_wq __ro_after_init; 432 EXPORT_SYMBOL_GPL(system_highpri_wq); 433 struct workqueue_struct *system_long_wq __ro_after_init; 434 EXPORT_SYMBOL_GPL(system_long_wq); 435 struct workqueue_struct *system_unbound_wq __ro_after_init; 436 EXPORT_SYMBOL_GPL(system_unbound_wq); 437 struct workqueue_struct *system_freezable_wq __ro_after_init; 438 EXPORT_SYMBOL_GPL(system_freezable_wq); 439 struct workqueue_struct *system_power_efficient_wq __ro_after_init; 440 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 441 struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init; 442 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 443 444 static int worker_thread(void *__worker); 445 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 446 static void show_pwq(struct pool_workqueue *pwq); 447 static void show_one_worker_pool(struct worker_pool *pool); 448 449 #define CREATE_TRACE_POINTS 450 #include <trace/events/workqueue.h> 451 452 #define assert_rcu_or_pool_mutex() \ 453 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 454 !lockdep_is_held(&wq_pool_mutex), \ 455 "RCU or wq_pool_mutex should be held") 456 457 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 458 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 459 !lockdep_is_held(&wq->mutex) && \ 460 !lockdep_is_held(&wq_pool_mutex), \ 461 "RCU, wq->mutex or wq_pool_mutex should be held") 462 463 #define for_each_cpu_worker_pool(pool, cpu) \ 464 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 465 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 466 (pool)++) 467 468 /** 469 * for_each_pool - iterate through all worker_pools in the system 470 * @pool: iteration cursor 471 * @pi: integer used for iteration 472 * 473 * This must be called either with wq_pool_mutex held or RCU read 474 * locked. If the pool needs to be used beyond the locking in effect, the 475 * caller is responsible for guaranteeing that the pool stays online. 476 * 477 * The if/else clause exists only for the lockdep assertion and can be 478 * ignored. 479 */ 480 #define for_each_pool(pool, pi) \ 481 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 482 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 483 else 484 485 /** 486 * for_each_pool_worker - iterate through all workers of a worker_pool 487 * @worker: iteration cursor 488 * @pool: worker_pool to iterate workers of 489 * 490 * This must be called with wq_pool_attach_mutex. 491 * 492 * The if/else clause exists only for the lockdep assertion and can be 493 * ignored. 494 */ 495 #define for_each_pool_worker(worker, pool) \ 496 list_for_each_entry((worker), &(pool)->workers, node) \ 497 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ 498 else 499 500 /** 501 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 502 * @pwq: iteration cursor 503 * @wq: the target workqueue 504 * 505 * This must be called either with wq->mutex held or RCU read locked. 506 * If the pwq needs to be used beyond the locking in effect, the caller is 507 * responsible for guaranteeing that the pwq stays online. 508 * 509 * The if/else clause exists only for the lockdep assertion and can be 510 * ignored. 511 */ 512 #define for_each_pwq(pwq, wq) \ 513 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ 514 lockdep_is_held(&(wq->mutex))) 515 516 #ifdef CONFIG_DEBUG_OBJECTS_WORK 517 518 static const struct debug_obj_descr work_debug_descr; 519 520 static void *work_debug_hint(void *addr) 521 { 522 return ((struct work_struct *) addr)->func; 523 } 524 525 static bool work_is_static_object(void *addr) 526 { 527 struct work_struct *work = addr; 528 529 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); 530 } 531 532 /* 533 * fixup_init is called when: 534 * - an active object is initialized 535 */ 536 static bool work_fixup_init(void *addr, enum debug_obj_state state) 537 { 538 struct work_struct *work = addr; 539 540 switch (state) { 541 case ODEBUG_STATE_ACTIVE: 542 cancel_work_sync(work); 543 debug_object_init(work, &work_debug_descr); 544 return true; 545 default: 546 return false; 547 } 548 } 549 550 /* 551 * fixup_free is called when: 552 * - an active object is freed 553 */ 554 static bool work_fixup_free(void *addr, enum debug_obj_state state) 555 { 556 struct work_struct *work = addr; 557 558 switch (state) { 559 case ODEBUG_STATE_ACTIVE: 560 cancel_work_sync(work); 561 debug_object_free(work, &work_debug_descr); 562 return true; 563 default: 564 return false; 565 } 566 } 567 568 static const struct debug_obj_descr work_debug_descr = { 569 .name = "work_struct", 570 .debug_hint = work_debug_hint, 571 .is_static_object = work_is_static_object, 572 .fixup_init = work_fixup_init, 573 .fixup_free = work_fixup_free, 574 }; 575 576 static inline void debug_work_activate(struct work_struct *work) 577 { 578 debug_object_activate(work, &work_debug_descr); 579 } 580 581 static inline void debug_work_deactivate(struct work_struct *work) 582 { 583 debug_object_deactivate(work, &work_debug_descr); 584 } 585 586 void __init_work(struct work_struct *work, int onstack) 587 { 588 if (onstack) 589 debug_object_init_on_stack(work, &work_debug_descr); 590 else 591 debug_object_init(work, &work_debug_descr); 592 } 593 EXPORT_SYMBOL_GPL(__init_work); 594 595 void destroy_work_on_stack(struct work_struct *work) 596 { 597 debug_object_free(work, &work_debug_descr); 598 } 599 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 600 601 void destroy_delayed_work_on_stack(struct delayed_work *work) 602 { 603 destroy_timer_on_stack(&work->timer); 604 debug_object_free(&work->work, &work_debug_descr); 605 } 606 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 607 608 #else 609 static inline void debug_work_activate(struct work_struct *work) { } 610 static inline void debug_work_deactivate(struct work_struct *work) { } 611 #endif 612 613 /** 614 * worker_pool_assign_id - allocate ID and assign it to @pool 615 * @pool: the pool pointer of interest 616 * 617 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 618 * successfully, -errno on failure. 619 */ 620 static int worker_pool_assign_id(struct worker_pool *pool) 621 { 622 int ret; 623 624 lockdep_assert_held(&wq_pool_mutex); 625 626 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 627 GFP_KERNEL); 628 if (ret >= 0) { 629 pool->id = ret; 630 return 0; 631 } 632 return ret; 633 } 634 635 static unsigned int work_color_to_flags(int color) 636 { 637 return color << WORK_STRUCT_COLOR_SHIFT; 638 } 639 640 static int get_work_color(unsigned long work_data) 641 { 642 return (work_data >> WORK_STRUCT_COLOR_SHIFT) & 643 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 644 } 645 646 static int work_next_color(int color) 647 { 648 return (color + 1) % WORK_NR_COLORS; 649 } 650 651 /* 652 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 653 * contain the pointer to the queued pwq. Once execution starts, the flag 654 * is cleared and the high bits contain OFFQ flags and pool ID. 655 * 656 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 657 * and clear_work_data() can be used to set the pwq, pool or clear 658 * work->data. These functions should only be called while the work is 659 * owned - ie. while the PENDING bit is set. 660 * 661 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 662 * corresponding to a work. Pool is available once the work has been 663 * queued anywhere after initialization until it is sync canceled. pwq is 664 * available only while the work item is queued. 665 * 666 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 667 * canceled. While being canceled, a work item may have its PENDING set 668 * but stay off timer and worklist for arbitrarily long and nobody should 669 * try to steal the PENDING bit. 670 */ 671 static inline void set_work_data(struct work_struct *work, unsigned long data, 672 unsigned long flags) 673 { 674 WARN_ON_ONCE(!work_pending(work)); 675 atomic_long_set(&work->data, data | flags | work_static(work)); 676 } 677 678 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 679 unsigned long extra_flags) 680 { 681 set_work_data(work, (unsigned long)pwq, 682 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 683 } 684 685 static void set_work_pool_and_keep_pending(struct work_struct *work, 686 int pool_id) 687 { 688 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 689 WORK_STRUCT_PENDING); 690 } 691 692 static void set_work_pool_and_clear_pending(struct work_struct *work, 693 int pool_id) 694 { 695 /* 696 * The following wmb is paired with the implied mb in 697 * test_and_set_bit(PENDING) and ensures all updates to @work made 698 * here are visible to and precede any updates by the next PENDING 699 * owner. 700 */ 701 smp_wmb(); 702 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 703 /* 704 * The following mb guarantees that previous clear of a PENDING bit 705 * will not be reordered with any speculative LOADS or STORES from 706 * work->current_func, which is executed afterwards. This possible 707 * reordering can lead to a missed execution on attempt to queue 708 * the same @work. E.g. consider this case: 709 * 710 * CPU#0 CPU#1 711 * ---------------------------- -------------------------------- 712 * 713 * 1 STORE event_indicated 714 * 2 queue_work_on() { 715 * 3 test_and_set_bit(PENDING) 716 * 4 } set_..._and_clear_pending() { 717 * 5 set_work_data() # clear bit 718 * 6 smp_mb() 719 * 7 work->current_func() { 720 * 8 LOAD event_indicated 721 * } 722 * 723 * Without an explicit full barrier speculative LOAD on line 8 can 724 * be executed before CPU#0 does STORE on line 1. If that happens, 725 * CPU#0 observes the PENDING bit is still set and new execution of 726 * a @work is not queued in a hope, that CPU#1 will eventually 727 * finish the queued @work. Meanwhile CPU#1 does not see 728 * event_indicated is set, because speculative LOAD was executed 729 * before actual STORE. 730 */ 731 smp_mb(); 732 } 733 734 static void clear_work_data(struct work_struct *work) 735 { 736 smp_wmb(); /* see set_work_pool_and_clear_pending() */ 737 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 738 } 739 740 static inline struct pool_workqueue *work_struct_pwq(unsigned long data) 741 { 742 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK); 743 } 744 745 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 746 { 747 unsigned long data = atomic_long_read(&work->data); 748 749 if (data & WORK_STRUCT_PWQ) 750 return work_struct_pwq(data); 751 else 752 return NULL; 753 } 754 755 /** 756 * get_work_pool - return the worker_pool a given work was associated with 757 * @work: the work item of interest 758 * 759 * Pools are created and destroyed under wq_pool_mutex, and allows read 760 * access under RCU read lock. As such, this function should be 761 * called under wq_pool_mutex or inside of a rcu_read_lock() region. 762 * 763 * All fields of the returned pool are accessible as long as the above 764 * mentioned locking is in effect. If the returned pool needs to be used 765 * beyond the critical section, the caller is responsible for ensuring the 766 * returned pool is and stays online. 767 * 768 * Return: The worker_pool @work was last associated with. %NULL if none. 769 */ 770 static struct worker_pool *get_work_pool(struct work_struct *work) 771 { 772 unsigned long data = atomic_long_read(&work->data); 773 int pool_id; 774 775 assert_rcu_or_pool_mutex(); 776 777 if (data & WORK_STRUCT_PWQ) 778 return work_struct_pwq(data)->pool; 779 780 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 781 if (pool_id == WORK_OFFQ_POOL_NONE) 782 return NULL; 783 784 return idr_find(&worker_pool_idr, pool_id); 785 } 786 787 /** 788 * get_work_pool_id - return the worker pool ID a given work is associated with 789 * @work: the work item of interest 790 * 791 * Return: The worker_pool ID @work was last associated with. 792 * %WORK_OFFQ_POOL_NONE if none. 793 */ 794 static int get_work_pool_id(struct work_struct *work) 795 { 796 unsigned long data = atomic_long_read(&work->data); 797 798 if (data & WORK_STRUCT_PWQ) 799 return work_struct_pwq(data)->pool->id; 800 801 return data >> WORK_OFFQ_POOL_SHIFT; 802 } 803 804 static void mark_work_canceling(struct work_struct *work) 805 { 806 unsigned long pool_id = get_work_pool_id(work); 807 808 pool_id <<= WORK_OFFQ_POOL_SHIFT; 809 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 810 } 811 812 static bool work_is_canceling(struct work_struct *work) 813 { 814 unsigned long data = atomic_long_read(&work->data); 815 816 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 817 } 818 819 /* 820 * Policy functions. These define the policies on how the global worker 821 * pools are managed. Unless noted otherwise, these functions assume that 822 * they're being called with pool->lock held. 823 */ 824 825 /* 826 * Need to wake up a worker? Called from anything but currently 827 * running workers. 828 * 829 * Note that, because unbound workers never contribute to nr_running, this 830 * function will always return %true for unbound pools as long as the 831 * worklist isn't empty. 832 */ 833 static bool need_more_worker(struct worker_pool *pool) 834 { 835 return !list_empty(&pool->worklist) && !pool->nr_running; 836 } 837 838 /* Can I start working? Called from busy but !running workers. */ 839 static bool may_start_working(struct worker_pool *pool) 840 { 841 return pool->nr_idle; 842 } 843 844 /* Do I need to keep working? Called from currently running workers. */ 845 static bool keep_working(struct worker_pool *pool) 846 { 847 return !list_empty(&pool->worklist) && (pool->nr_running <= 1); 848 } 849 850 /* Do we need a new worker? Called from manager. */ 851 static bool need_to_create_worker(struct worker_pool *pool) 852 { 853 return need_more_worker(pool) && !may_start_working(pool); 854 } 855 856 /* Do we have too many workers and should some go away? */ 857 static bool too_many_workers(struct worker_pool *pool) 858 { 859 bool managing = pool->flags & POOL_MANAGER_ACTIVE; 860 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 861 int nr_busy = pool->nr_workers - nr_idle; 862 863 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 864 } 865 866 /** 867 * worker_set_flags - set worker flags and adjust nr_running accordingly 868 * @worker: self 869 * @flags: flags to set 870 * 871 * Set @flags in @worker->flags and adjust nr_running accordingly. 872 */ 873 static inline void worker_set_flags(struct worker *worker, unsigned int flags) 874 { 875 struct worker_pool *pool = worker->pool; 876 877 lockdep_assert_held(&pool->lock); 878 879 /* If transitioning into NOT_RUNNING, adjust nr_running. */ 880 if ((flags & WORKER_NOT_RUNNING) && 881 !(worker->flags & WORKER_NOT_RUNNING)) { 882 pool->nr_running--; 883 } 884 885 worker->flags |= flags; 886 } 887 888 /** 889 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 890 * @worker: self 891 * @flags: flags to clear 892 * 893 * Clear @flags in @worker->flags and adjust nr_running accordingly. 894 */ 895 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 896 { 897 struct worker_pool *pool = worker->pool; 898 unsigned int oflags = worker->flags; 899 900 lockdep_assert_held(&pool->lock); 901 902 worker->flags &= ~flags; 903 904 /* 905 * If transitioning out of NOT_RUNNING, increment nr_running. Note 906 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 907 * of multiple flags, not a single flag. 908 */ 909 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 910 if (!(worker->flags & WORKER_NOT_RUNNING)) 911 pool->nr_running++; 912 } 913 914 /* Return the first idle worker. Called with pool->lock held. */ 915 static struct worker *first_idle_worker(struct worker_pool *pool) 916 { 917 if (unlikely(list_empty(&pool->idle_list))) 918 return NULL; 919 920 return list_first_entry(&pool->idle_list, struct worker, entry); 921 } 922 923 /** 924 * worker_enter_idle - enter idle state 925 * @worker: worker which is entering idle state 926 * 927 * @worker is entering idle state. Update stats and idle timer if 928 * necessary. 929 * 930 * LOCKING: 931 * raw_spin_lock_irq(pool->lock). 932 */ 933 static void worker_enter_idle(struct worker *worker) 934 { 935 struct worker_pool *pool = worker->pool; 936 937 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 938 WARN_ON_ONCE(!list_empty(&worker->entry) && 939 (worker->hentry.next || worker->hentry.pprev))) 940 return; 941 942 /* can't use worker_set_flags(), also called from create_worker() */ 943 worker->flags |= WORKER_IDLE; 944 pool->nr_idle++; 945 worker->last_active = jiffies; 946 947 /* idle_list is LIFO */ 948 list_add(&worker->entry, &pool->idle_list); 949 950 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 951 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 952 953 /* Sanity check nr_running. */ 954 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); 955 } 956 957 /** 958 * worker_leave_idle - leave idle state 959 * @worker: worker which is leaving idle state 960 * 961 * @worker is leaving idle state. Update stats. 962 * 963 * LOCKING: 964 * raw_spin_lock_irq(pool->lock). 965 */ 966 static void worker_leave_idle(struct worker *worker) 967 { 968 struct worker_pool *pool = worker->pool; 969 970 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 971 return; 972 worker_clr_flags(worker, WORKER_IDLE); 973 pool->nr_idle--; 974 list_del_init(&worker->entry); 975 } 976 977 /** 978 * find_worker_executing_work - find worker which is executing a work 979 * @pool: pool of interest 980 * @work: work to find worker for 981 * 982 * Find a worker which is executing @work on @pool by searching 983 * @pool->busy_hash which is keyed by the address of @work. For a worker 984 * to match, its current execution should match the address of @work and 985 * its work function. This is to avoid unwanted dependency between 986 * unrelated work executions through a work item being recycled while still 987 * being executed. 988 * 989 * This is a bit tricky. A work item may be freed once its execution 990 * starts and nothing prevents the freed area from being recycled for 991 * another work item. If the same work item address ends up being reused 992 * before the original execution finishes, workqueue will identify the 993 * recycled work item as currently executing and make it wait until the 994 * current execution finishes, introducing an unwanted dependency. 995 * 996 * This function checks the work item address and work function to avoid 997 * false positives. Note that this isn't complete as one may construct a 998 * work function which can introduce dependency onto itself through a 999 * recycled work item. Well, if somebody wants to shoot oneself in the 1000 * foot that badly, there's only so much we can do, and if such deadlock 1001 * actually occurs, it should be easy to locate the culprit work function. 1002 * 1003 * CONTEXT: 1004 * raw_spin_lock_irq(pool->lock). 1005 * 1006 * Return: 1007 * Pointer to worker which is executing @work if found, %NULL 1008 * otherwise. 1009 */ 1010 static struct worker *find_worker_executing_work(struct worker_pool *pool, 1011 struct work_struct *work) 1012 { 1013 struct worker *worker; 1014 1015 hash_for_each_possible(pool->busy_hash, worker, hentry, 1016 (unsigned long)work) 1017 if (worker->current_work == work && 1018 worker->current_func == work->func) 1019 return worker; 1020 1021 return NULL; 1022 } 1023 1024 /** 1025 * move_linked_works - move linked works to a list 1026 * @work: start of series of works to be scheduled 1027 * @head: target list to append @work to 1028 * @nextp: out parameter for nested worklist walking 1029 * 1030 * Schedule linked works starting from @work to @head. Work series to be 1031 * scheduled starts at @work and includes any consecutive work with 1032 * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on 1033 * @nextp. 1034 * 1035 * CONTEXT: 1036 * raw_spin_lock_irq(pool->lock). 1037 */ 1038 static void move_linked_works(struct work_struct *work, struct list_head *head, 1039 struct work_struct **nextp) 1040 { 1041 struct work_struct *n; 1042 1043 /* 1044 * Linked worklist will always end before the end of the list, 1045 * use NULL for list head. 1046 */ 1047 list_for_each_entry_safe_from(work, n, NULL, entry) { 1048 list_move_tail(&work->entry, head); 1049 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1050 break; 1051 } 1052 1053 /* 1054 * If we're already inside safe list traversal and have moved 1055 * multiple works to the scheduled queue, the next position 1056 * needs to be updated. 1057 */ 1058 if (nextp) 1059 *nextp = n; 1060 } 1061 1062 /** 1063 * assign_work - assign a work item and its linked work items to a worker 1064 * @work: work to assign 1065 * @worker: worker to assign to 1066 * @nextp: out parameter for nested worklist walking 1067 * 1068 * Assign @work and its linked work items to @worker. If @work is already being 1069 * executed by another worker in the same pool, it'll be punted there. 1070 * 1071 * If @nextp is not NULL, it's updated to point to the next work of the last 1072 * scheduled work. This allows assign_work() to be nested inside 1073 * list_for_each_entry_safe(). 1074 * 1075 * Returns %true if @work was successfully assigned to @worker. %false if @work 1076 * was punted to another worker already executing it. 1077 */ 1078 static bool assign_work(struct work_struct *work, struct worker *worker, 1079 struct work_struct **nextp) 1080 { 1081 struct worker_pool *pool = worker->pool; 1082 struct worker *collision; 1083 1084 lockdep_assert_held(&pool->lock); 1085 1086 /* 1087 * A single work shouldn't be executed concurrently by multiple workers. 1088 * __queue_work() ensures that @work doesn't jump to a different pool 1089 * while still running in the previous pool. Here, we should ensure that 1090 * @work is not executed concurrently by multiple workers from the same 1091 * pool. Check whether anyone is already processing the work. If so, 1092 * defer the work to the currently executing one. 1093 */ 1094 collision = find_worker_executing_work(pool, work); 1095 if (unlikely(collision)) { 1096 move_linked_works(work, &collision->scheduled, nextp); 1097 return false; 1098 } 1099 1100 move_linked_works(work, &worker->scheduled, nextp); 1101 return true; 1102 } 1103 1104 /** 1105 * kick_pool - wake up an idle worker if necessary 1106 * @pool: pool to kick 1107 * 1108 * @pool may have pending work items. Wake up worker if necessary. Returns 1109 * whether a worker was woken up. 1110 */ 1111 static bool kick_pool(struct worker_pool *pool) 1112 { 1113 struct worker *worker = first_idle_worker(pool); 1114 struct task_struct *p; 1115 1116 lockdep_assert_held(&pool->lock); 1117 1118 if (!need_more_worker(pool) || !worker) 1119 return false; 1120 1121 p = worker->task; 1122 1123 #ifdef CONFIG_SMP 1124 /* 1125 * Idle @worker is about to execute @work and waking up provides an 1126 * opportunity to migrate @worker at a lower cost by setting the task's 1127 * wake_cpu field. Let's see if we want to move @worker to improve 1128 * execution locality. 1129 * 1130 * We're waking the worker that went idle the latest and there's some 1131 * chance that @worker is marked idle but hasn't gone off CPU yet. If 1132 * so, setting the wake_cpu won't do anything. As this is a best-effort 1133 * optimization and the race window is narrow, let's leave as-is for 1134 * now. If this becomes pronounced, we can skip over workers which are 1135 * still on cpu when picking an idle worker. 1136 * 1137 * If @pool has non-strict affinity, @worker might have ended up outside 1138 * its affinity scope. Repatriate. 1139 */ 1140 if (!pool->attrs->affn_strict && 1141 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { 1142 struct work_struct *work = list_first_entry(&pool->worklist, 1143 struct work_struct, entry); 1144 p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask); 1145 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; 1146 } 1147 #endif 1148 wake_up_process(p); 1149 return true; 1150 } 1151 1152 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT 1153 1154 /* 1155 * Concurrency-managed per-cpu work items that hog CPU for longer than 1156 * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism, 1157 * which prevents them from stalling other concurrency-managed work items. If a 1158 * work function keeps triggering this mechanism, it's likely that the work item 1159 * should be using an unbound workqueue instead. 1160 * 1161 * wq_cpu_intensive_report() tracks work functions which trigger such conditions 1162 * and report them so that they can be examined and converted to use unbound 1163 * workqueues as appropriate. To avoid flooding the console, each violating work 1164 * function is tracked and reported with exponential backoff. 1165 */ 1166 #define WCI_MAX_ENTS 128 1167 1168 struct wci_ent { 1169 work_func_t func; 1170 atomic64_t cnt; 1171 struct hlist_node hash_node; 1172 }; 1173 1174 static struct wci_ent wci_ents[WCI_MAX_ENTS]; 1175 static int wci_nr_ents; 1176 static DEFINE_RAW_SPINLOCK(wci_lock); 1177 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS)); 1178 1179 static struct wci_ent *wci_find_ent(work_func_t func) 1180 { 1181 struct wci_ent *ent; 1182 1183 hash_for_each_possible_rcu(wci_hash, ent, hash_node, 1184 (unsigned long)func) { 1185 if (ent->func == func) 1186 return ent; 1187 } 1188 return NULL; 1189 } 1190 1191 static void wq_cpu_intensive_report(work_func_t func) 1192 { 1193 struct wci_ent *ent; 1194 1195 restart: 1196 ent = wci_find_ent(func); 1197 if (ent) { 1198 u64 cnt; 1199 1200 /* 1201 * Start reporting from the fourth time and back off 1202 * exponentially. 1203 */ 1204 cnt = atomic64_inc_return_relaxed(&ent->cnt); 1205 if (cnt >= 4 && is_power_of_2(cnt)) 1206 printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", 1207 ent->func, wq_cpu_intensive_thresh_us, 1208 atomic64_read(&ent->cnt)); 1209 return; 1210 } 1211 1212 /* 1213 * @func is a new violation. Allocate a new entry for it. If wcn_ents[] 1214 * is exhausted, something went really wrong and we probably made enough 1215 * noise already. 1216 */ 1217 if (wci_nr_ents >= WCI_MAX_ENTS) 1218 return; 1219 1220 raw_spin_lock(&wci_lock); 1221 1222 if (wci_nr_ents >= WCI_MAX_ENTS) { 1223 raw_spin_unlock(&wci_lock); 1224 return; 1225 } 1226 1227 if (wci_find_ent(func)) { 1228 raw_spin_unlock(&wci_lock); 1229 goto restart; 1230 } 1231 1232 ent = &wci_ents[wci_nr_ents++]; 1233 ent->func = func; 1234 atomic64_set(&ent->cnt, 1); 1235 hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); 1236 1237 raw_spin_unlock(&wci_lock); 1238 } 1239 1240 #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1241 static void wq_cpu_intensive_report(work_func_t func) {} 1242 #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1243 1244 /** 1245 * wq_worker_running - a worker is running again 1246 * @task: task waking up 1247 * 1248 * This function is called when a worker returns from schedule() 1249 */ 1250 void wq_worker_running(struct task_struct *task) 1251 { 1252 struct worker *worker = kthread_data(task); 1253 1254 if (!READ_ONCE(worker->sleeping)) 1255 return; 1256 1257 /* 1258 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check 1259 * and the nr_running increment below, we may ruin the nr_running reset 1260 * and leave with an unexpected pool->nr_running == 1 on the newly unbound 1261 * pool. Protect against such race. 1262 */ 1263 preempt_disable(); 1264 if (!(worker->flags & WORKER_NOT_RUNNING)) 1265 worker->pool->nr_running++; 1266 preempt_enable(); 1267 1268 /* 1269 * CPU intensive auto-detection cares about how long a work item hogged 1270 * CPU without sleeping. Reset the starting timestamp on wakeup. 1271 */ 1272 worker->current_at = worker->task->se.sum_exec_runtime; 1273 1274 WRITE_ONCE(worker->sleeping, 0); 1275 } 1276 1277 /** 1278 * wq_worker_sleeping - a worker is going to sleep 1279 * @task: task going to sleep 1280 * 1281 * This function is called from schedule() when a busy worker is 1282 * going to sleep. 1283 */ 1284 void wq_worker_sleeping(struct task_struct *task) 1285 { 1286 struct worker *worker = kthread_data(task); 1287 struct worker_pool *pool; 1288 1289 /* 1290 * Rescuers, which may not have all the fields set up like normal 1291 * workers, also reach here, let's not access anything before 1292 * checking NOT_RUNNING. 1293 */ 1294 if (worker->flags & WORKER_NOT_RUNNING) 1295 return; 1296 1297 pool = worker->pool; 1298 1299 /* Return if preempted before wq_worker_running() was reached */ 1300 if (READ_ONCE(worker->sleeping)) 1301 return; 1302 1303 WRITE_ONCE(worker->sleeping, 1); 1304 raw_spin_lock_irq(&pool->lock); 1305 1306 /* 1307 * Recheck in case unbind_workers() preempted us. We don't 1308 * want to decrement nr_running after the worker is unbound 1309 * and nr_running has been reset. 1310 */ 1311 if (worker->flags & WORKER_NOT_RUNNING) { 1312 raw_spin_unlock_irq(&pool->lock); 1313 return; 1314 } 1315 1316 pool->nr_running--; 1317 if (kick_pool(pool)) 1318 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1319 1320 raw_spin_unlock_irq(&pool->lock); 1321 } 1322 1323 /** 1324 * wq_worker_tick - a scheduler tick occurred while a kworker is running 1325 * @task: task currently running 1326 * 1327 * Called from scheduler_tick(). We're in the IRQ context and the current 1328 * worker's fields which follow the 'K' locking rule can be accessed safely. 1329 */ 1330 void wq_worker_tick(struct task_struct *task) 1331 { 1332 struct worker *worker = kthread_data(task); 1333 struct pool_workqueue *pwq = worker->current_pwq; 1334 struct worker_pool *pool = worker->pool; 1335 1336 if (!pwq) 1337 return; 1338 1339 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC; 1340 1341 if (!wq_cpu_intensive_thresh_us) 1342 return; 1343 1344 /* 1345 * If the current worker is concurrency managed and hogged the CPU for 1346 * longer than wq_cpu_intensive_thresh_us, it's automatically marked 1347 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. 1348 * 1349 * Set @worker->sleeping means that @worker is in the process of 1350 * switching out voluntarily and won't be contributing to 1351 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also 1352 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to 1353 * double decrements. The task is releasing the CPU anyway. Let's skip. 1354 * We probably want to make this prettier in the future. 1355 */ 1356 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || 1357 worker->task->se.sum_exec_runtime - worker->current_at < 1358 wq_cpu_intensive_thresh_us * NSEC_PER_USEC) 1359 return; 1360 1361 raw_spin_lock(&pool->lock); 1362 1363 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 1364 wq_cpu_intensive_report(worker->current_func); 1365 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; 1366 1367 if (kick_pool(pool)) 1368 pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1369 1370 raw_spin_unlock(&pool->lock); 1371 } 1372 1373 /** 1374 * wq_worker_last_func - retrieve worker's last work function 1375 * @task: Task to retrieve last work function of. 1376 * 1377 * Determine the last function a worker executed. This is called from 1378 * the scheduler to get a worker's last known identity. 1379 * 1380 * CONTEXT: 1381 * raw_spin_lock_irq(rq->lock) 1382 * 1383 * This function is called during schedule() when a kworker is going 1384 * to sleep. It's used by psi to identify aggregation workers during 1385 * dequeuing, to allow periodic aggregation to shut-off when that 1386 * worker is the last task in the system or cgroup to go to sleep. 1387 * 1388 * As this function doesn't involve any workqueue-related locking, it 1389 * only returns stable values when called from inside the scheduler's 1390 * queuing and dequeuing paths, when @task, which must be a kworker, 1391 * is guaranteed to not be processing any works. 1392 * 1393 * Return: 1394 * The last work function %current executed as a worker, NULL if it 1395 * hasn't executed any work yet. 1396 */ 1397 work_func_t wq_worker_last_func(struct task_struct *task) 1398 { 1399 struct worker *worker = kthread_data(task); 1400 1401 return worker->last_func; 1402 } 1403 1404 /** 1405 * get_pwq - get an extra reference on the specified pool_workqueue 1406 * @pwq: pool_workqueue to get 1407 * 1408 * Obtain an extra reference on @pwq. The caller should guarantee that 1409 * @pwq has positive refcnt and be holding the matching pool->lock. 1410 */ 1411 static void get_pwq(struct pool_workqueue *pwq) 1412 { 1413 lockdep_assert_held(&pwq->pool->lock); 1414 WARN_ON_ONCE(pwq->refcnt <= 0); 1415 pwq->refcnt++; 1416 } 1417 1418 /** 1419 * put_pwq - put a pool_workqueue reference 1420 * @pwq: pool_workqueue to put 1421 * 1422 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1423 * destruction. The caller should be holding the matching pool->lock. 1424 */ 1425 static void put_pwq(struct pool_workqueue *pwq) 1426 { 1427 lockdep_assert_held(&pwq->pool->lock); 1428 if (likely(--pwq->refcnt)) 1429 return; 1430 /* 1431 * @pwq can't be released under pool->lock, bounce to a dedicated 1432 * kthread_worker to avoid A-A deadlocks. 1433 */ 1434 kthread_queue_work(pwq_release_worker, &pwq->release_work); 1435 } 1436 1437 /** 1438 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1439 * @pwq: pool_workqueue to put (can be %NULL) 1440 * 1441 * put_pwq() with locking. This function also allows %NULL @pwq. 1442 */ 1443 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1444 { 1445 if (pwq) { 1446 /* 1447 * As both pwqs and pools are RCU protected, the 1448 * following lock operations are safe. 1449 */ 1450 raw_spin_lock_irq(&pwq->pool->lock); 1451 put_pwq(pwq); 1452 raw_spin_unlock_irq(&pwq->pool->lock); 1453 } 1454 } 1455 1456 static void pwq_activate_inactive_work(struct work_struct *work) 1457 { 1458 struct pool_workqueue *pwq = get_work_pwq(work); 1459 1460 trace_workqueue_activate_work(work); 1461 if (list_empty(&pwq->pool->worklist)) 1462 pwq->pool->watchdog_ts = jiffies; 1463 move_linked_works(work, &pwq->pool->worklist, NULL); 1464 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); 1465 pwq->nr_active++; 1466 } 1467 1468 static void pwq_activate_first_inactive(struct pool_workqueue *pwq) 1469 { 1470 struct work_struct *work = list_first_entry(&pwq->inactive_works, 1471 struct work_struct, entry); 1472 1473 pwq_activate_inactive_work(work); 1474 } 1475 1476 /** 1477 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1478 * @pwq: pwq of interest 1479 * @work_data: work_data of work which left the queue 1480 * 1481 * A work either has completed or is removed from pending queue, 1482 * decrement nr_in_flight of its pwq and handle workqueue flushing. 1483 * 1484 * CONTEXT: 1485 * raw_spin_lock_irq(pool->lock). 1486 */ 1487 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) 1488 { 1489 int color = get_work_color(work_data); 1490 1491 if (!(work_data & WORK_STRUCT_INACTIVE)) { 1492 pwq->nr_active--; 1493 if (!list_empty(&pwq->inactive_works)) { 1494 /* one down, submit an inactive one */ 1495 if (pwq->nr_active < pwq->max_active) 1496 pwq_activate_first_inactive(pwq); 1497 } 1498 } 1499 1500 pwq->nr_in_flight[color]--; 1501 1502 /* is flush in progress and are we at the flushing tip? */ 1503 if (likely(pwq->flush_color != color)) 1504 goto out_put; 1505 1506 /* are there still in-flight works? */ 1507 if (pwq->nr_in_flight[color]) 1508 goto out_put; 1509 1510 /* this pwq is done, clear flush_color */ 1511 pwq->flush_color = -1; 1512 1513 /* 1514 * If this was the last pwq, wake up the first flusher. It 1515 * will handle the rest. 1516 */ 1517 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1518 complete(&pwq->wq->first_flusher->done); 1519 out_put: 1520 put_pwq(pwq); 1521 } 1522 1523 /** 1524 * try_to_grab_pending - steal work item from worklist and disable irq 1525 * @work: work item to steal 1526 * @is_dwork: @work is a delayed_work 1527 * @flags: place to store irq state 1528 * 1529 * Try to grab PENDING bit of @work. This function can handle @work in any 1530 * stable state - idle, on timer or on worklist. 1531 * 1532 * Return: 1533 * 1534 * ======== ================================================================ 1535 * 1 if @work was pending and we successfully stole PENDING 1536 * 0 if @work was idle and we claimed PENDING 1537 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1538 * -ENOENT if someone else is canceling @work, this state may persist 1539 * for arbitrarily long 1540 * ======== ================================================================ 1541 * 1542 * Note: 1543 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1544 * interrupted while holding PENDING and @work off queue, irq must be 1545 * disabled on entry. This, combined with delayed_work->timer being 1546 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1547 * 1548 * On successful return, >= 0, irq is disabled and the caller is 1549 * responsible for releasing it using local_irq_restore(*@flags). 1550 * 1551 * This function is safe to call from any context including IRQ handler. 1552 */ 1553 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1554 unsigned long *flags) 1555 { 1556 struct worker_pool *pool; 1557 struct pool_workqueue *pwq; 1558 1559 local_irq_save(*flags); 1560 1561 /* try to steal the timer if it exists */ 1562 if (is_dwork) { 1563 struct delayed_work *dwork = to_delayed_work(work); 1564 1565 /* 1566 * dwork->timer is irqsafe. If del_timer() fails, it's 1567 * guaranteed that the timer is not queued anywhere and not 1568 * running on the local CPU. 1569 */ 1570 if (likely(del_timer(&dwork->timer))) 1571 return 1; 1572 } 1573 1574 /* try to claim PENDING the normal way */ 1575 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1576 return 0; 1577 1578 rcu_read_lock(); 1579 /* 1580 * The queueing is in progress, or it is already queued. Try to 1581 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1582 */ 1583 pool = get_work_pool(work); 1584 if (!pool) 1585 goto fail; 1586 1587 raw_spin_lock(&pool->lock); 1588 /* 1589 * work->data is guaranteed to point to pwq only while the work 1590 * item is queued on pwq->wq, and both updating work->data to point 1591 * to pwq on queueing and to pool on dequeueing are done under 1592 * pwq->pool->lock. This in turn guarantees that, if work->data 1593 * points to pwq which is associated with a locked pool, the work 1594 * item is currently queued on that pool. 1595 */ 1596 pwq = get_work_pwq(work); 1597 if (pwq && pwq->pool == pool) { 1598 debug_work_deactivate(work); 1599 1600 /* 1601 * A cancelable inactive work item must be in the 1602 * pwq->inactive_works since a queued barrier can't be 1603 * canceled (see the comments in insert_wq_barrier()). 1604 * 1605 * An inactive work item cannot be grabbed directly because 1606 * it might have linked barrier work items which, if left 1607 * on the inactive_works list, will confuse pwq->nr_active 1608 * management later on and cause stall. Make sure the work 1609 * item is activated before grabbing. 1610 */ 1611 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) 1612 pwq_activate_inactive_work(work); 1613 1614 list_del_init(&work->entry); 1615 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); 1616 1617 /* work->data points to pwq iff queued, point to pool */ 1618 set_work_pool_and_keep_pending(work, pool->id); 1619 1620 raw_spin_unlock(&pool->lock); 1621 rcu_read_unlock(); 1622 return 1; 1623 } 1624 raw_spin_unlock(&pool->lock); 1625 fail: 1626 rcu_read_unlock(); 1627 local_irq_restore(*flags); 1628 if (work_is_canceling(work)) 1629 return -ENOENT; 1630 cpu_relax(); 1631 return -EAGAIN; 1632 } 1633 1634 /** 1635 * insert_work - insert a work into a pool 1636 * @pwq: pwq @work belongs to 1637 * @work: work to insert 1638 * @head: insertion point 1639 * @extra_flags: extra WORK_STRUCT_* flags to set 1640 * 1641 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1642 * work_struct flags. 1643 * 1644 * CONTEXT: 1645 * raw_spin_lock_irq(pool->lock). 1646 */ 1647 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1648 struct list_head *head, unsigned int extra_flags) 1649 { 1650 debug_work_activate(work); 1651 1652 /* record the work call stack in order to print it in KASAN reports */ 1653 kasan_record_aux_stack_noalloc(work); 1654 1655 /* we own @work, set data and link */ 1656 set_work_pwq(work, pwq, extra_flags); 1657 list_add_tail(&work->entry, head); 1658 get_pwq(pwq); 1659 } 1660 1661 /* 1662 * Test whether @work is being queued from another work executing on the 1663 * same workqueue. 1664 */ 1665 static bool is_chained_work(struct workqueue_struct *wq) 1666 { 1667 struct worker *worker; 1668 1669 worker = current_wq_worker(); 1670 /* 1671 * Return %true iff I'm a worker executing a work item on @wq. If 1672 * I'm @worker, it's safe to dereference it without locking. 1673 */ 1674 return worker && worker->current_pwq->wq == wq; 1675 } 1676 1677 /* 1678 * When queueing an unbound work item to a wq, prefer local CPU if allowed 1679 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to 1680 * avoid perturbing sensitive tasks. 1681 */ 1682 static int wq_select_unbound_cpu(int cpu) 1683 { 1684 int new_cpu; 1685 1686 if (likely(!wq_debug_force_rr_cpu)) { 1687 if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 1688 return cpu; 1689 } else { 1690 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n"); 1691 } 1692 1693 new_cpu = __this_cpu_read(wq_rr_cpu_last); 1694 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 1695 if (unlikely(new_cpu >= nr_cpu_ids)) { 1696 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 1697 if (unlikely(new_cpu >= nr_cpu_ids)) 1698 return cpu; 1699 } 1700 __this_cpu_write(wq_rr_cpu_last, new_cpu); 1701 1702 return new_cpu; 1703 } 1704 1705 static void __queue_work(int cpu, struct workqueue_struct *wq, 1706 struct work_struct *work) 1707 { 1708 struct pool_workqueue *pwq; 1709 struct worker_pool *last_pool, *pool; 1710 unsigned int work_flags; 1711 unsigned int req_cpu = cpu; 1712 1713 /* 1714 * While a work item is PENDING && off queue, a task trying to 1715 * steal the PENDING will busy-loop waiting for it to either get 1716 * queued or lose PENDING. Grabbing PENDING and queueing should 1717 * happen with IRQ disabled. 1718 */ 1719 lockdep_assert_irqs_disabled(); 1720 1721 1722 /* 1723 * For a draining wq, only works from the same workqueue are 1724 * allowed. The __WQ_DESTROYING helps to spot the issue that 1725 * queues a new work item to a wq after destroy_workqueue(wq). 1726 */ 1727 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && 1728 WARN_ON_ONCE(!is_chained_work(wq)))) 1729 return; 1730 rcu_read_lock(); 1731 retry: 1732 /* pwq which will be used unless @work is executing elsewhere */ 1733 if (req_cpu == WORK_CPU_UNBOUND) { 1734 if (wq->flags & WQ_UNBOUND) 1735 cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 1736 else 1737 cpu = raw_smp_processor_id(); 1738 } 1739 1740 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); 1741 pool = pwq->pool; 1742 1743 /* 1744 * If @work was previously on a different pool, it might still be 1745 * running there, in which case the work needs to be queued on that 1746 * pool to guarantee non-reentrancy. 1747 */ 1748 last_pool = get_work_pool(work); 1749 if (last_pool && last_pool != pool) { 1750 struct worker *worker; 1751 1752 raw_spin_lock(&last_pool->lock); 1753 1754 worker = find_worker_executing_work(last_pool, work); 1755 1756 if (worker && worker->current_pwq->wq == wq) { 1757 pwq = worker->current_pwq; 1758 pool = pwq->pool; 1759 WARN_ON_ONCE(pool != last_pool); 1760 } else { 1761 /* meh... not running there, queue here */ 1762 raw_spin_unlock(&last_pool->lock); 1763 raw_spin_lock(&pool->lock); 1764 } 1765 } else { 1766 raw_spin_lock(&pool->lock); 1767 } 1768 1769 /* 1770 * pwq is determined and locked. For unbound pools, we could have raced 1771 * with pwq release and it could already be dead. If its refcnt is zero, 1772 * repeat pwq selection. Note that unbound pwqs never die without 1773 * another pwq replacing it in cpu_pwq or while work items are executing 1774 * on it, so the retrying is guaranteed to make forward-progress. 1775 */ 1776 if (unlikely(!pwq->refcnt)) { 1777 if (wq->flags & WQ_UNBOUND) { 1778 raw_spin_unlock(&pool->lock); 1779 cpu_relax(); 1780 goto retry; 1781 } 1782 /* oops */ 1783 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 1784 wq->name, cpu); 1785 } 1786 1787 /* pwq determined, queue */ 1788 trace_workqueue_queue_work(req_cpu, pwq, work); 1789 1790 if (WARN_ON(!list_empty(&work->entry))) 1791 goto out; 1792 1793 pwq->nr_in_flight[pwq->work_color]++; 1794 work_flags = work_color_to_flags(pwq->work_color); 1795 1796 if (likely(pwq->nr_active < pwq->max_active)) { 1797 if (list_empty(&pool->worklist)) 1798 pool->watchdog_ts = jiffies; 1799 1800 trace_workqueue_activate_work(work); 1801 pwq->nr_active++; 1802 insert_work(pwq, work, &pool->worklist, work_flags); 1803 kick_pool(pool); 1804 } else { 1805 work_flags |= WORK_STRUCT_INACTIVE; 1806 insert_work(pwq, work, &pwq->inactive_works, work_flags); 1807 } 1808 1809 out: 1810 raw_spin_unlock(&pool->lock); 1811 rcu_read_unlock(); 1812 } 1813 1814 /** 1815 * queue_work_on - queue work on specific cpu 1816 * @cpu: CPU number to execute work on 1817 * @wq: workqueue to use 1818 * @work: work to queue 1819 * 1820 * We queue the work to a specific CPU, the caller must ensure it 1821 * can't go away. Callers that fail to ensure that the specified 1822 * CPU cannot go away will execute on a randomly chosen CPU. 1823 * But note well that callers specifying a CPU that never has been 1824 * online will get a splat. 1825 * 1826 * Return: %false if @work was already on a queue, %true otherwise. 1827 */ 1828 bool queue_work_on(int cpu, struct workqueue_struct *wq, 1829 struct work_struct *work) 1830 { 1831 bool ret = false; 1832 unsigned long flags; 1833 1834 local_irq_save(flags); 1835 1836 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1837 __queue_work(cpu, wq, work); 1838 ret = true; 1839 } 1840 1841 local_irq_restore(flags); 1842 return ret; 1843 } 1844 EXPORT_SYMBOL(queue_work_on); 1845 1846 /** 1847 * select_numa_node_cpu - Select a CPU based on NUMA node 1848 * @node: NUMA node ID that we want to select a CPU from 1849 * 1850 * This function will attempt to find a "random" cpu available on a given 1851 * node. If there are no CPUs available on the given node it will return 1852 * WORK_CPU_UNBOUND indicating that we should just schedule to any 1853 * available CPU if we need to schedule this work. 1854 */ 1855 static int select_numa_node_cpu(int node) 1856 { 1857 int cpu; 1858 1859 /* Delay binding to CPU if node is not valid or online */ 1860 if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) 1861 return WORK_CPU_UNBOUND; 1862 1863 /* Use local node/cpu if we are already there */ 1864 cpu = raw_smp_processor_id(); 1865 if (node == cpu_to_node(cpu)) 1866 return cpu; 1867 1868 /* Use "random" otherwise know as "first" online CPU of node */ 1869 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); 1870 1871 /* If CPU is valid return that, otherwise just defer */ 1872 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND; 1873 } 1874 1875 /** 1876 * queue_work_node - queue work on a "random" cpu for a given NUMA node 1877 * @node: NUMA node that we are targeting the work for 1878 * @wq: workqueue to use 1879 * @work: work to queue 1880 * 1881 * We queue the work to a "random" CPU within a given NUMA node. The basic 1882 * idea here is to provide a way to somehow associate work with a given 1883 * NUMA node. 1884 * 1885 * This function will only make a best effort attempt at getting this onto 1886 * the right NUMA node. If no node is requested or the requested node is 1887 * offline then we just fall back to standard queue_work behavior. 1888 * 1889 * Currently the "random" CPU ends up being the first available CPU in the 1890 * intersection of cpu_online_mask and the cpumask of the node, unless we 1891 * are running on the node. In that case we just use the current CPU. 1892 * 1893 * Return: %false if @work was already on a queue, %true otherwise. 1894 */ 1895 bool queue_work_node(int node, struct workqueue_struct *wq, 1896 struct work_struct *work) 1897 { 1898 unsigned long flags; 1899 bool ret = false; 1900 1901 /* 1902 * This current implementation is specific to unbound workqueues. 1903 * Specifically we only return the first available CPU for a given 1904 * node instead of cycling through individual CPUs within the node. 1905 * 1906 * If this is used with a per-cpu workqueue then the logic in 1907 * workqueue_select_cpu_near would need to be updated to allow for 1908 * some round robin type logic. 1909 */ 1910 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); 1911 1912 local_irq_save(flags); 1913 1914 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1915 int cpu = select_numa_node_cpu(node); 1916 1917 __queue_work(cpu, wq, work); 1918 ret = true; 1919 } 1920 1921 local_irq_restore(flags); 1922 return ret; 1923 } 1924 EXPORT_SYMBOL_GPL(queue_work_node); 1925 1926 void delayed_work_timer_fn(struct timer_list *t) 1927 { 1928 struct delayed_work *dwork = from_timer(dwork, t, timer); 1929 1930 /* should have been called from irqsafe timer with irq already off */ 1931 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 1932 } 1933 EXPORT_SYMBOL(delayed_work_timer_fn); 1934 1935 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 1936 struct delayed_work *dwork, unsigned long delay) 1937 { 1938 struct timer_list *timer = &dwork->timer; 1939 struct work_struct *work = &dwork->work; 1940 1941 WARN_ON_ONCE(!wq); 1942 WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 1943 WARN_ON_ONCE(timer_pending(timer)); 1944 WARN_ON_ONCE(!list_empty(&work->entry)); 1945 1946 /* 1947 * If @delay is 0, queue @dwork->work immediately. This is for 1948 * both optimization and correctness. The earliest @timer can 1949 * expire is on the closest next tick and delayed_work users depend 1950 * on that there's no such delay when @delay is 0. 1951 */ 1952 if (!delay) { 1953 __queue_work(cpu, wq, &dwork->work); 1954 return; 1955 } 1956 1957 dwork->wq = wq; 1958 dwork->cpu = cpu; 1959 timer->expires = jiffies + delay; 1960 1961 if (unlikely(cpu != WORK_CPU_UNBOUND)) 1962 add_timer_on(timer, cpu); 1963 else 1964 add_timer(timer); 1965 } 1966 1967 /** 1968 * queue_delayed_work_on - queue work on specific CPU after delay 1969 * @cpu: CPU number to execute work on 1970 * @wq: workqueue to use 1971 * @dwork: work to queue 1972 * @delay: number of jiffies to wait before queueing 1973 * 1974 * Return: %false if @work was already on a queue, %true otherwise. If 1975 * @delay is zero and @dwork is idle, it will be scheduled for immediate 1976 * execution. 1977 */ 1978 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1979 struct delayed_work *dwork, unsigned long delay) 1980 { 1981 struct work_struct *work = &dwork->work; 1982 bool ret = false; 1983 unsigned long flags; 1984 1985 /* read the comment in __queue_work() */ 1986 local_irq_save(flags); 1987 1988 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1989 __queue_delayed_work(cpu, wq, dwork, delay); 1990 ret = true; 1991 } 1992 1993 local_irq_restore(flags); 1994 return ret; 1995 } 1996 EXPORT_SYMBOL(queue_delayed_work_on); 1997 1998 /** 1999 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 2000 * @cpu: CPU number to execute work on 2001 * @wq: workqueue to use 2002 * @dwork: work to queue 2003 * @delay: number of jiffies to wait before queueing 2004 * 2005 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 2006 * modify @dwork's timer so that it expires after @delay. If @delay is 2007 * zero, @work is guaranteed to be scheduled immediately regardless of its 2008 * current state. 2009 * 2010 * Return: %false if @dwork was idle and queued, %true if @dwork was 2011 * pending and its timer was modified. 2012 * 2013 * This function is safe to call from any context including IRQ handler. 2014 * See try_to_grab_pending() for details. 2015 */ 2016 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 2017 struct delayed_work *dwork, unsigned long delay) 2018 { 2019 unsigned long flags; 2020 int ret; 2021 2022 do { 2023 ret = try_to_grab_pending(&dwork->work, true, &flags); 2024 } while (unlikely(ret == -EAGAIN)); 2025 2026 if (likely(ret >= 0)) { 2027 __queue_delayed_work(cpu, wq, dwork, delay); 2028 local_irq_restore(flags); 2029 } 2030 2031 /* -ENOENT from try_to_grab_pending() becomes %true */ 2032 return ret; 2033 } 2034 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 2035 2036 static void rcu_work_rcufn(struct rcu_head *rcu) 2037 { 2038 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); 2039 2040 /* read the comment in __queue_work() */ 2041 local_irq_disable(); 2042 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); 2043 local_irq_enable(); 2044 } 2045 2046 /** 2047 * queue_rcu_work - queue work after a RCU grace period 2048 * @wq: workqueue to use 2049 * @rwork: work to queue 2050 * 2051 * Return: %false if @rwork was already pending, %true otherwise. Note 2052 * that a full RCU grace period is guaranteed only after a %true return. 2053 * While @rwork is guaranteed to be executed after a %false return, the 2054 * execution may happen before a full RCU grace period has passed. 2055 */ 2056 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) 2057 { 2058 struct work_struct *work = &rwork->work; 2059 2060 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2061 rwork->wq = wq; 2062 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn); 2063 return true; 2064 } 2065 2066 return false; 2067 } 2068 EXPORT_SYMBOL(queue_rcu_work); 2069 2070 static struct worker *alloc_worker(int node) 2071 { 2072 struct worker *worker; 2073 2074 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 2075 if (worker) { 2076 INIT_LIST_HEAD(&worker->entry); 2077 INIT_LIST_HEAD(&worker->scheduled); 2078 INIT_LIST_HEAD(&worker->node); 2079 /* on creation a worker is in !idle && prep state */ 2080 worker->flags = WORKER_PREP; 2081 } 2082 return worker; 2083 } 2084 2085 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) 2086 { 2087 if (pool->cpu < 0 && pool->attrs->affn_strict) 2088 return pool->attrs->__pod_cpumask; 2089 else 2090 return pool->attrs->cpumask; 2091 } 2092 2093 /** 2094 * worker_attach_to_pool() - attach a worker to a pool 2095 * @worker: worker to be attached 2096 * @pool: the target pool 2097 * 2098 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 2099 * cpu-binding of @worker are kept coordinated with the pool across 2100 * cpu-[un]hotplugs. 2101 */ 2102 static void worker_attach_to_pool(struct worker *worker, 2103 struct worker_pool *pool) 2104 { 2105 mutex_lock(&wq_pool_attach_mutex); 2106 2107 /* 2108 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains 2109 * stable across this function. See the comments above the flag 2110 * definition for details. 2111 */ 2112 if (pool->flags & POOL_DISASSOCIATED) 2113 worker->flags |= WORKER_UNBOUND; 2114 else 2115 kthread_set_per_cpu(worker->task, pool->cpu); 2116 2117 if (worker->rescue_wq) 2118 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); 2119 2120 list_add_tail(&worker->node, &pool->workers); 2121 worker->pool = pool; 2122 2123 mutex_unlock(&wq_pool_attach_mutex); 2124 } 2125 2126 /** 2127 * worker_detach_from_pool() - detach a worker from its pool 2128 * @worker: worker which is attached to its pool 2129 * 2130 * Undo the attaching which had been done in worker_attach_to_pool(). The 2131 * caller worker shouldn't access to the pool after detached except it has 2132 * other reference to the pool. 2133 */ 2134 static void worker_detach_from_pool(struct worker *worker) 2135 { 2136 struct worker_pool *pool = worker->pool; 2137 struct completion *detach_completion = NULL; 2138 2139 mutex_lock(&wq_pool_attach_mutex); 2140 2141 kthread_set_per_cpu(worker->task, -1); 2142 list_del(&worker->node); 2143 worker->pool = NULL; 2144 2145 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers)) 2146 detach_completion = pool->detach_completion; 2147 mutex_unlock(&wq_pool_attach_mutex); 2148 2149 /* clear leftover flags without pool->lock after it is detached */ 2150 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 2151 2152 if (detach_completion) 2153 complete(detach_completion); 2154 } 2155 2156 /** 2157 * create_worker - create a new workqueue worker 2158 * @pool: pool the new worker will belong to 2159 * 2160 * Create and start a new worker which is attached to @pool. 2161 * 2162 * CONTEXT: 2163 * Might sleep. Does GFP_KERNEL allocations. 2164 * 2165 * Return: 2166 * Pointer to the newly created worker. 2167 */ 2168 static struct worker *create_worker(struct worker_pool *pool) 2169 { 2170 struct worker *worker; 2171 int id; 2172 char id_buf[23]; 2173 2174 /* ID is needed to determine kthread name */ 2175 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); 2176 if (id < 0) { 2177 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n", 2178 ERR_PTR(id)); 2179 return NULL; 2180 } 2181 2182 worker = alloc_worker(pool->node); 2183 if (!worker) { 2184 pr_err_once("workqueue: Failed to allocate a worker\n"); 2185 goto fail; 2186 } 2187 2188 worker->id = id; 2189 2190 if (pool->cpu >= 0) 2191 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 2192 pool->attrs->nice < 0 ? "H" : ""); 2193 else 2194 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2195 2196 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2197 "kworker/%s", id_buf); 2198 if (IS_ERR(worker->task)) { 2199 if (PTR_ERR(worker->task) == -EINTR) { 2200 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", 2201 id_buf); 2202 } else { 2203 pr_err_once("workqueue: Failed to create a worker thread: %pe", 2204 worker->task); 2205 } 2206 goto fail; 2207 } 2208 2209 set_user_nice(worker->task, pool->attrs->nice); 2210 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); 2211 2212 /* successful, attach the worker to the pool */ 2213 worker_attach_to_pool(worker, pool); 2214 2215 /* start the newly created worker */ 2216 raw_spin_lock_irq(&pool->lock); 2217 2218 worker->pool->nr_workers++; 2219 worker_enter_idle(worker); 2220 kick_pool(pool); 2221 2222 /* 2223 * @worker is waiting on a completion in kthread() and will trigger hung 2224 * check if not woken up soon. As kick_pool() might not have waken it 2225 * up, wake it up explicitly once more. 2226 */ 2227 wake_up_process(worker->task); 2228 2229 raw_spin_unlock_irq(&pool->lock); 2230 2231 return worker; 2232 2233 fail: 2234 ida_free(&pool->worker_ida, id); 2235 kfree(worker); 2236 return NULL; 2237 } 2238 2239 static void unbind_worker(struct worker *worker) 2240 { 2241 lockdep_assert_held(&wq_pool_attach_mutex); 2242 2243 kthread_set_per_cpu(worker->task, -1); 2244 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) 2245 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); 2246 else 2247 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); 2248 } 2249 2250 static void wake_dying_workers(struct list_head *cull_list) 2251 { 2252 struct worker *worker, *tmp; 2253 2254 list_for_each_entry_safe(worker, tmp, cull_list, entry) { 2255 list_del_init(&worker->entry); 2256 unbind_worker(worker); 2257 /* 2258 * If the worker was somehow already running, then it had to be 2259 * in pool->idle_list when set_worker_dying() happened or we 2260 * wouldn't have gotten here. 2261 * 2262 * Thus, the worker must either have observed the WORKER_DIE 2263 * flag, or have set its state to TASK_IDLE. Either way, the 2264 * below will be observed by the worker and is safe to do 2265 * outside of pool->lock. 2266 */ 2267 wake_up_process(worker->task); 2268 } 2269 } 2270 2271 /** 2272 * set_worker_dying - Tag a worker for destruction 2273 * @worker: worker to be destroyed 2274 * @list: transfer worker away from its pool->idle_list and into list 2275 * 2276 * Tag @worker for destruction and adjust @pool stats accordingly. The worker 2277 * should be idle. 2278 * 2279 * CONTEXT: 2280 * raw_spin_lock_irq(pool->lock). 2281 */ 2282 static void set_worker_dying(struct worker *worker, struct list_head *list) 2283 { 2284 struct worker_pool *pool = worker->pool; 2285 2286 lockdep_assert_held(&pool->lock); 2287 lockdep_assert_held(&wq_pool_attach_mutex); 2288 2289 /* sanity check frenzy */ 2290 if (WARN_ON(worker->current_work) || 2291 WARN_ON(!list_empty(&worker->scheduled)) || 2292 WARN_ON(!(worker->flags & WORKER_IDLE))) 2293 return; 2294 2295 pool->nr_workers--; 2296 pool->nr_idle--; 2297 2298 worker->flags |= WORKER_DIE; 2299 2300 list_move(&worker->entry, list); 2301 list_move(&worker->node, &pool->dying_workers); 2302 } 2303 2304 /** 2305 * idle_worker_timeout - check if some idle workers can now be deleted. 2306 * @t: The pool's idle_timer that just expired 2307 * 2308 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in 2309 * worker_leave_idle(), as a worker flicking between idle and active while its 2310 * pool is at the too_many_workers() tipping point would cause too much timer 2311 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let 2312 * it expire and re-evaluate things from there. 2313 */ 2314 static void idle_worker_timeout(struct timer_list *t) 2315 { 2316 struct worker_pool *pool = from_timer(pool, t, idle_timer); 2317 bool do_cull = false; 2318 2319 if (work_pending(&pool->idle_cull_work)) 2320 return; 2321 2322 raw_spin_lock_irq(&pool->lock); 2323 2324 if (too_many_workers(pool)) { 2325 struct worker *worker; 2326 unsigned long expires; 2327 2328 /* idle_list is kept in LIFO order, check the last one */ 2329 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2330 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2331 do_cull = !time_before(jiffies, expires); 2332 2333 if (!do_cull) 2334 mod_timer(&pool->idle_timer, expires); 2335 } 2336 raw_spin_unlock_irq(&pool->lock); 2337 2338 if (do_cull) 2339 queue_work(system_unbound_wq, &pool->idle_cull_work); 2340 } 2341 2342 /** 2343 * idle_cull_fn - cull workers that have been idle for too long. 2344 * @work: the pool's work for handling these idle workers 2345 * 2346 * This goes through a pool's idle workers and gets rid of those that have been 2347 * idle for at least IDLE_WORKER_TIMEOUT seconds. 2348 * 2349 * We don't want to disturb isolated CPUs because of a pcpu kworker being 2350 * culled, so this also resets worker affinity. This requires a sleepable 2351 * context, hence the split between timer callback and work item. 2352 */ 2353 static void idle_cull_fn(struct work_struct *work) 2354 { 2355 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); 2356 LIST_HEAD(cull_list); 2357 2358 /* 2359 * Grabbing wq_pool_attach_mutex here ensures an already-running worker 2360 * cannot proceed beyong worker_detach_from_pool() in its self-destruct 2361 * path. This is required as a previously-preempted worker could run after 2362 * set_worker_dying() has happened but before wake_dying_workers() did. 2363 */ 2364 mutex_lock(&wq_pool_attach_mutex); 2365 raw_spin_lock_irq(&pool->lock); 2366 2367 while (too_many_workers(pool)) { 2368 struct worker *worker; 2369 unsigned long expires; 2370 2371 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2372 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2373 2374 if (time_before(jiffies, expires)) { 2375 mod_timer(&pool->idle_timer, expires); 2376 break; 2377 } 2378 2379 set_worker_dying(worker, &cull_list); 2380 } 2381 2382 raw_spin_unlock_irq(&pool->lock); 2383 wake_dying_workers(&cull_list); 2384 mutex_unlock(&wq_pool_attach_mutex); 2385 } 2386 2387 static void send_mayday(struct work_struct *work) 2388 { 2389 struct pool_workqueue *pwq = get_work_pwq(work); 2390 struct workqueue_struct *wq = pwq->wq; 2391 2392 lockdep_assert_held(&wq_mayday_lock); 2393 2394 if (!wq->rescuer) 2395 return; 2396 2397 /* mayday mayday mayday */ 2398 if (list_empty(&pwq->mayday_node)) { 2399 /* 2400 * If @pwq is for an unbound wq, its base ref may be put at 2401 * any time due to an attribute change. Pin @pwq until the 2402 * rescuer is done with it. 2403 */ 2404 get_pwq(pwq); 2405 list_add_tail(&pwq->mayday_node, &wq->maydays); 2406 wake_up_process(wq->rescuer->task); 2407 pwq->stats[PWQ_STAT_MAYDAY]++; 2408 } 2409 } 2410 2411 static void pool_mayday_timeout(struct timer_list *t) 2412 { 2413 struct worker_pool *pool = from_timer(pool, t, mayday_timer); 2414 struct work_struct *work; 2415 2416 raw_spin_lock_irq(&pool->lock); 2417 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ 2418 2419 if (need_to_create_worker(pool)) { 2420 /* 2421 * We've been trying to create a new worker but 2422 * haven't been successful. We might be hitting an 2423 * allocation deadlock. Send distress signals to 2424 * rescuers. 2425 */ 2426 list_for_each_entry(work, &pool->worklist, entry) 2427 send_mayday(work); 2428 } 2429 2430 raw_spin_unlock(&wq_mayday_lock); 2431 raw_spin_unlock_irq(&pool->lock); 2432 2433 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 2434 } 2435 2436 /** 2437 * maybe_create_worker - create a new worker if necessary 2438 * @pool: pool to create a new worker for 2439 * 2440 * Create a new worker for @pool if necessary. @pool is guaranteed to 2441 * have at least one idle worker on return from this function. If 2442 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 2443 * sent to all rescuers with works scheduled on @pool to resolve 2444 * possible allocation deadlock. 2445 * 2446 * On return, need_to_create_worker() is guaranteed to be %false and 2447 * may_start_working() %true. 2448 * 2449 * LOCKING: 2450 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2451 * multiple times. Does GFP_KERNEL allocations. Called only from 2452 * manager. 2453 */ 2454 static void maybe_create_worker(struct worker_pool *pool) 2455 __releases(&pool->lock) 2456 __acquires(&pool->lock) 2457 { 2458 restart: 2459 raw_spin_unlock_irq(&pool->lock); 2460 2461 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 2462 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 2463 2464 while (true) { 2465 if (create_worker(pool) || !need_to_create_worker(pool)) 2466 break; 2467 2468 schedule_timeout_interruptible(CREATE_COOLDOWN); 2469 2470 if (!need_to_create_worker(pool)) 2471 break; 2472 } 2473 2474 del_timer_sync(&pool->mayday_timer); 2475 raw_spin_lock_irq(&pool->lock); 2476 /* 2477 * This is necessary even after a new worker was just successfully 2478 * created as @pool->lock was dropped and the new worker might have 2479 * already become busy. 2480 */ 2481 if (need_to_create_worker(pool)) 2482 goto restart; 2483 } 2484 2485 /** 2486 * manage_workers - manage worker pool 2487 * @worker: self 2488 * 2489 * Assume the manager role and manage the worker pool @worker belongs 2490 * to. At any given time, there can be only zero or one manager per 2491 * pool. The exclusion is handled automatically by this function. 2492 * 2493 * The caller can safely start processing works on false return. On 2494 * true return, it's guaranteed that need_to_create_worker() is false 2495 * and may_start_working() is true. 2496 * 2497 * CONTEXT: 2498 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2499 * multiple times. Does GFP_KERNEL allocations. 2500 * 2501 * Return: 2502 * %false if the pool doesn't need management and the caller can safely 2503 * start processing works, %true if management function was performed and 2504 * the conditions that the caller verified before calling the function may 2505 * no longer be true. 2506 */ 2507 static bool manage_workers(struct worker *worker) 2508 { 2509 struct worker_pool *pool = worker->pool; 2510 2511 if (pool->flags & POOL_MANAGER_ACTIVE) 2512 return false; 2513 2514 pool->flags |= POOL_MANAGER_ACTIVE; 2515 pool->manager = worker; 2516 2517 maybe_create_worker(pool); 2518 2519 pool->manager = NULL; 2520 pool->flags &= ~POOL_MANAGER_ACTIVE; 2521 rcuwait_wake_up(&manager_wait); 2522 return true; 2523 } 2524 2525 /** 2526 * process_one_work - process single work 2527 * @worker: self 2528 * @work: work to process 2529 * 2530 * Process @work. This function contains all the logics necessary to 2531 * process a single work including synchronization against and 2532 * interaction with other workers on the same cpu, queueing and 2533 * flushing. As long as context requirement is met, any worker can 2534 * call this function to process a work. 2535 * 2536 * CONTEXT: 2537 * raw_spin_lock_irq(pool->lock) which is released and regrabbed. 2538 */ 2539 static void process_one_work(struct worker *worker, struct work_struct *work) 2540 __releases(&pool->lock) 2541 __acquires(&pool->lock) 2542 { 2543 struct pool_workqueue *pwq = get_work_pwq(work); 2544 struct worker_pool *pool = worker->pool; 2545 unsigned long work_data; 2546 #ifdef CONFIG_LOCKDEP 2547 /* 2548 * It is permissible to free the struct work_struct from 2549 * inside the function that is called from it, this we need to 2550 * take into account for lockdep too. To avoid bogus "held 2551 * lock freed" warnings as well as problems when looking into 2552 * work->lockdep_map, make a copy and use that here. 2553 */ 2554 struct lockdep_map lockdep_map; 2555 2556 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 2557 #endif 2558 /* ensure we're on the correct CPU */ 2559 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 2560 raw_smp_processor_id() != pool->cpu); 2561 2562 /* claim and dequeue */ 2563 debug_work_deactivate(work); 2564 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2565 worker->current_work = work; 2566 worker->current_func = work->func; 2567 worker->current_pwq = pwq; 2568 worker->current_at = worker->task->se.sum_exec_runtime; 2569 work_data = *work_data_bits(work); 2570 worker->current_color = get_work_color(work_data); 2571 2572 /* 2573 * Record wq name for cmdline and debug reporting, may get 2574 * overridden through set_worker_desc(). 2575 */ 2576 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); 2577 2578 list_del_init(&work->entry); 2579 2580 /* 2581 * CPU intensive works don't participate in concurrency management. 2582 * They're the scheduler's responsibility. This takes @worker out 2583 * of concurrency management and the next code block will chain 2584 * execution of the pending work items. 2585 */ 2586 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) 2587 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 2588 2589 /* 2590 * Kick @pool if necessary. It's always noop for per-cpu worker pools 2591 * since nr_running would always be >= 1 at this point. This is used to 2592 * chain execution of the pending work items for WORKER_NOT_RUNNING 2593 * workers such as the UNBOUND and CPU_INTENSIVE ones. 2594 */ 2595 kick_pool(pool); 2596 2597 /* 2598 * Record the last pool and clear PENDING which should be the last 2599 * update to @work. Also, do this inside @pool->lock so that 2600 * PENDING and queued state changes happen together while IRQ is 2601 * disabled. 2602 */ 2603 set_work_pool_and_clear_pending(work, pool->id); 2604 2605 pwq->stats[PWQ_STAT_STARTED]++; 2606 raw_spin_unlock_irq(&pool->lock); 2607 2608 lock_map_acquire(&pwq->wq->lockdep_map); 2609 lock_map_acquire(&lockdep_map); 2610 /* 2611 * Strictly speaking we should mark the invariant state without holding 2612 * any locks, that is, before these two lock_map_acquire()'s. 2613 * 2614 * However, that would result in: 2615 * 2616 * A(W1) 2617 * WFC(C) 2618 * A(W1) 2619 * C(C) 2620 * 2621 * Which would create W1->C->W1 dependencies, even though there is no 2622 * actual deadlock possible. There are two solutions, using a 2623 * read-recursive acquire on the work(queue) 'locks', but this will then 2624 * hit the lockdep limitation on recursive locks, or simply discard 2625 * these locks. 2626 * 2627 * AFAICT there is no possible deadlock scenario between the 2628 * flush_work() and complete() primitives (except for single-threaded 2629 * workqueues), so hiding them isn't a problem. 2630 */ 2631 lockdep_invariant_state(true); 2632 trace_workqueue_execute_start(work); 2633 worker->current_func(work); 2634 /* 2635 * While we must be careful to not use "work" after this, the trace 2636 * point will only record its address. 2637 */ 2638 trace_workqueue_execute_end(work, worker->current_func); 2639 pwq->stats[PWQ_STAT_COMPLETED]++; 2640 lock_map_release(&lockdep_map); 2641 lock_map_release(&pwq->wq->lockdep_map); 2642 2643 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2644 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2645 " last function: %ps\n", 2646 current->comm, preempt_count(), task_pid_nr(current), 2647 worker->current_func); 2648 debug_show_held_locks(current); 2649 dump_stack(); 2650 } 2651 2652 /* 2653 * The following prevents a kworker from hogging CPU on !PREEMPTION 2654 * kernels, where a requeueing work item waiting for something to 2655 * happen could deadlock with stop_machine as such work item could 2656 * indefinitely requeue itself while all other CPUs are trapped in 2657 * stop_machine. At the same time, report a quiescent RCU state so 2658 * the same condition doesn't freeze RCU. 2659 */ 2660 cond_resched(); 2661 2662 raw_spin_lock_irq(&pool->lock); 2663 2664 /* 2665 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked 2666 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than 2667 * wq_cpu_intensive_thresh_us. Clear it. 2668 */ 2669 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2670 2671 /* tag the worker for identification in schedule() */ 2672 worker->last_func = worker->current_func; 2673 2674 /* we're done with it, release */ 2675 hash_del(&worker->hentry); 2676 worker->current_work = NULL; 2677 worker->current_func = NULL; 2678 worker->current_pwq = NULL; 2679 worker->current_color = INT_MAX; 2680 pwq_dec_nr_in_flight(pwq, work_data); 2681 } 2682 2683 /** 2684 * process_scheduled_works - process scheduled works 2685 * @worker: self 2686 * 2687 * Process all scheduled works. Please note that the scheduled list 2688 * may change while processing a work, so this function repeatedly 2689 * fetches a work from the top and executes it. 2690 * 2691 * CONTEXT: 2692 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2693 * multiple times. 2694 */ 2695 static void process_scheduled_works(struct worker *worker) 2696 { 2697 struct work_struct *work; 2698 bool first = true; 2699 2700 while ((work = list_first_entry_or_null(&worker->scheduled, 2701 struct work_struct, entry))) { 2702 if (first) { 2703 worker->pool->watchdog_ts = jiffies; 2704 first = false; 2705 } 2706 process_one_work(worker, work); 2707 } 2708 } 2709 2710 static void set_pf_worker(bool val) 2711 { 2712 mutex_lock(&wq_pool_attach_mutex); 2713 if (val) 2714 current->flags |= PF_WQ_WORKER; 2715 else 2716 current->flags &= ~PF_WQ_WORKER; 2717 mutex_unlock(&wq_pool_attach_mutex); 2718 } 2719 2720 /** 2721 * worker_thread - the worker thread function 2722 * @__worker: self 2723 * 2724 * The worker thread function. All workers belong to a worker_pool - 2725 * either a per-cpu one or dynamic unbound one. These workers process all 2726 * work items regardless of their specific target workqueue. The only 2727 * exception is work items which belong to workqueues with a rescuer which 2728 * will be explained in rescuer_thread(). 2729 * 2730 * Return: 0 2731 */ 2732 static int worker_thread(void *__worker) 2733 { 2734 struct worker *worker = __worker; 2735 struct worker_pool *pool = worker->pool; 2736 2737 /* tell the scheduler that this is a workqueue worker */ 2738 set_pf_worker(true); 2739 woke_up: 2740 raw_spin_lock_irq(&pool->lock); 2741 2742 /* am I supposed to die? */ 2743 if (unlikely(worker->flags & WORKER_DIE)) { 2744 raw_spin_unlock_irq(&pool->lock); 2745 set_pf_worker(false); 2746 2747 set_task_comm(worker->task, "kworker/dying"); 2748 ida_free(&pool->worker_ida, worker->id); 2749 worker_detach_from_pool(worker); 2750 WARN_ON_ONCE(!list_empty(&worker->entry)); 2751 kfree(worker); 2752 return 0; 2753 } 2754 2755 worker_leave_idle(worker); 2756 recheck: 2757 /* no more worker necessary? */ 2758 if (!need_more_worker(pool)) 2759 goto sleep; 2760 2761 /* do we need to manage? */ 2762 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2763 goto recheck; 2764 2765 /* 2766 * ->scheduled list can only be filled while a worker is 2767 * preparing to process a work or actually processing it. 2768 * Make sure nobody diddled with it while I was sleeping. 2769 */ 2770 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2771 2772 /* 2773 * Finish PREP stage. We're guaranteed to have at least one idle 2774 * worker or that someone else has already assumed the manager 2775 * role. This is where @worker starts participating in concurrency 2776 * management if applicable and concurrency management is restored 2777 * after being rebound. See rebind_workers() for details. 2778 */ 2779 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 2780 2781 do { 2782 struct work_struct *work = 2783 list_first_entry(&pool->worklist, 2784 struct work_struct, entry); 2785 2786 if (assign_work(work, worker, NULL)) 2787 process_scheduled_works(worker); 2788 } while (keep_working(pool)); 2789 2790 worker_set_flags(worker, WORKER_PREP); 2791 sleep: 2792 /* 2793 * pool->lock is held and there's no work to process and no need to 2794 * manage, sleep. Workers are woken up only while holding 2795 * pool->lock or from local cpu, so setting the current state 2796 * before releasing pool->lock is enough to prevent losing any 2797 * event. 2798 */ 2799 worker_enter_idle(worker); 2800 __set_current_state(TASK_IDLE); 2801 raw_spin_unlock_irq(&pool->lock); 2802 schedule(); 2803 goto woke_up; 2804 } 2805 2806 /** 2807 * rescuer_thread - the rescuer thread function 2808 * @__rescuer: self 2809 * 2810 * Workqueue rescuer thread function. There's one rescuer for each 2811 * workqueue which has WQ_MEM_RECLAIM set. 2812 * 2813 * Regular work processing on a pool may block trying to create a new 2814 * worker which uses GFP_KERNEL allocation which has slight chance of 2815 * developing into deadlock if some works currently on the same queue 2816 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2817 * the problem rescuer solves. 2818 * 2819 * When such condition is possible, the pool summons rescuers of all 2820 * workqueues which have works queued on the pool and let them process 2821 * those works so that forward progress can be guaranteed. 2822 * 2823 * This should happen rarely. 2824 * 2825 * Return: 0 2826 */ 2827 static int rescuer_thread(void *__rescuer) 2828 { 2829 struct worker *rescuer = __rescuer; 2830 struct workqueue_struct *wq = rescuer->rescue_wq; 2831 bool should_stop; 2832 2833 set_user_nice(current, RESCUER_NICE_LEVEL); 2834 2835 /* 2836 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2837 * doesn't participate in concurrency management. 2838 */ 2839 set_pf_worker(true); 2840 repeat: 2841 set_current_state(TASK_IDLE); 2842 2843 /* 2844 * By the time the rescuer is requested to stop, the workqueue 2845 * shouldn't have any work pending, but @wq->maydays may still have 2846 * pwq(s) queued. This can happen by non-rescuer workers consuming 2847 * all the work items before the rescuer got to them. Go through 2848 * @wq->maydays processing before acting on should_stop so that the 2849 * list is always empty on exit. 2850 */ 2851 should_stop = kthread_should_stop(); 2852 2853 /* see whether any pwq is asking for help */ 2854 raw_spin_lock_irq(&wq_mayday_lock); 2855 2856 while (!list_empty(&wq->maydays)) { 2857 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2858 struct pool_workqueue, mayday_node); 2859 struct worker_pool *pool = pwq->pool; 2860 struct work_struct *work, *n; 2861 2862 __set_current_state(TASK_RUNNING); 2863 list_del_init(&pwq->mayday_node); 2864 2865 raw_spin_unlock_irq(&wq_mayday_lock); 2866 2867 worker_attach_to_pool(rescuer, pool); 2868 2869 raw_spin_lock_irq(&pool->lock); 2870 2871 /* 2872 * Slurp in all works issued via this workqueue and 2873 * process'em. 2874 */ 2875 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2876 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 2877 if (get_work_pwq(work) == pwq && 2878 assign_work(work, rescuer, &n)) 2879 pwq->stats[PWQ_STAT_RESCUED]++; 2880 } 2881 2882 if (!list_empty(&rescuer->scheduled)) { 2883 process_scheduled_works(rescuer); 2884 2885 /* 2886 * The above execution of rescued work items could 2887 * have created more to rescue through 2888 * pwq_activate_first_inactive() or chained 2889 * queueing. Let's put @pwq back on mayday list so 2890 * that such back-to-back work items, which may be 2891 * being used to relieve memory pressure, don't 2892 * incur MAYDAY_INTERVAL delay inbetween. 2893 */ 2894 if (pwq->nr_active && need_to_create_worker(pool)) { 2895 raw_spin_lock(&wq_mayday_lock); 2896 /* 2897 * Queue iff we aren't racing destruction 2898 * and somebody else hasn't queued it already. 2899 */ 2900 if (wq->rescuer && list_empty(&pwq->mayday_node)) { 2901 get_pwq(pwq); 2902 list_add_tail(&pwq->mayday_node, &wq->maydays); 2903 } 2904 raw_spin_unlock(&wq_mayday_lock); 2905 } 2906 } 2907 2908 /* 2909 * Put the reference grabbed by send_mayday(). @pool won't 2910 * go away while we're still attached to it. 2911 */ 2912 put_pwq(pwq); 2913 2914 /* 2915 * Leave this pool. Notify regular workers; otherwise, we end up 2916 * with 0 concurrency and stalling the execution. 2917 */ 2918 kick_pool(pool); 2919 2920 raw_spin_unlock_irq(&pool->lock); 2921 2922 worker_detach_from_pool(rescuer); 2923 2924 raw_spin_lock_irq(&wq_mayday_lock); 2925 } 2926 2927 raw_spin_unlock_irq(&wq_mayday_lock); 2928 2929 if (should_stop) { 2930 __set_current_state(TASK_RUNNING); 2931 set_pf_worker(false); 2932 return 0; 2933 } 2934 2935 /* rescuers should never participate in concurrency management */ 2936 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2937 schedule(); 2938 goto repeat; 2939 } 2940 2941 /** 2942 * check_flush_dependency - check for flush dependency sanity 2943 * @target_wq: workqueue being flushed 2944 * @target_work: work item being flushed (NULL for workqueue flushes) 2945 * 2946 * %current is trying to flush the whole @target_wq or @target_work on it. 2947 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not 2948 * reclaiming memory or running on a workqueue which doesn't have 2949 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to 2950 * a deadlock. 2951 */ 2952 static void check_flush_dependency(struct workqueue_struct *target_wq, 2953 struct work_struct *target_work) 2954 { 2955 work_func_t target_func = target_work ? target_work->func : NULL; 2956 struct worker *worker; 2957 2958 if (target_wq->flags & WQ_MEM_RECLAIM) 2959 return; 2960 2961 worker = current_wq_worker(); 2962 2963 WARN_ONCE(current->flags & PF_MEMALLOC, 2964 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", 2965 current->pid, current->comm, target_wq->name, target_func); 2966 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 2967 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 2968 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", 2969 worker->current_pwq->wq->name, worker->current_func, 2970 target_wq->name, target_func); 2971 } 2972 2973 struct wq_barrier { 2974 struct work_struct work; 2975 struct completion done; 2976 struct task_struct *task; /* purely informational */ 2977 }; 2978 2979 static void wq_barrier_func(struct work_struct *work) 2980 { 2981 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2982 complete(&barr->done); 2983 } 2984 2985 /** 2986 * insert_wq_barrier - insert a barrier work 2987 * @pwq: pwq to insert barrier into 2988 * @barr: wq_barrier to insert 2989 * @target: target work to attach @barr to 2990 * @worker: worker currently executing @target, NULL if @target is not executing 2991 * 2992 * @barr is linked to @target such that @barr is completed only after 2993 * @target finishes execution. Please note that the ordering 2994 * guarantee is observed only with respect to @target and on the local 2995 * cpu. 2996 * 2997 * Currently, a queued barrier can't be canceled. This is because 2998 * try_to_grab_pending() can't determine whether the work to be 2999 * grabbed is at the head of the queue and thus can't clear LINKED 3000 * flag of the previous work while there must be a valid next work 3001 * after a work with LINKED flag set. 3002 * 3003 * Note that when @worker is non-NULL, @target may be modified 3004 * underneath us, so we can't reliably determine pwq from @target. 3005 * 3006 * CONTEXT: 3007 * raw_spin_lock_irq(pool->lock). 3008 */ 3009 static void insert_wq_barrier(struct pool_workqueue *pwq, 3010 struct wq_barrier *barr, 3011 struct work_struct *target, struct worker *worker) 3012 { 3013 unsigned int work_flags = 0; 3014 unsigned int work_color; 3015 struct list_head *head; 3016 3017 /* 3018 * debugobject calls are safe here even with pool->lock locked 3019 * as we know for sure that this will not trigger any of the 3020 * checks and call back into the fixup functions where we 3021 * might deadlock. 3022 */ 3023 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 3024 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 3025 3026 init_completion_map(&barr->done, &target->lockdep_map); 3027 3028 barr->task = current; 3029 3030 /* The barrier work item does not participate in pwq->nr_active. */ 3031 work_flags |= WORK_STRUCT_INACTIVE; 3032 3033 /* 3034 * If @target is currently being executed, schedule the 3035 * barrier to the worker; otherwise, put it after @target. 3036 */ 3037 if (worker) { 3038 head = worker->scheduled.next; 3039 work_color = worker->current_color; 3040 } else { 3041 unsigned long *bits = work_data_bits(target); 3042 3043 head = target->entry.next; 3044 /* there can already be other linked works, inherit and set */ 3045 work_flags |= *bits & WORK_STRUCT_LINKED; 3046 work_color = get_work_color(*bits); 3047 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3048 } 3049 3050 pwq->nr_in_flight[work_color]++; 3051 work_flags |= work_color_to_flags(work_color); 3052 3053 insert_work(pwq, &barr->work, head, work_flags); 3054 } 3055 3056 /** 3057 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 3058 * @wq: workqueue being flushed 3059 * @flush_color: new flush color, < 0 for no-op 3060 * @work_color: new work color, < 0 for no-op 3061 * 3062 * Prepare pwqs for workqueue flushing. 3063 * 3064 * If @flush_color is non-negative, flush_color on all pwqs should be 3065 * -1. If no pwq has in-flight commands at the specified color, all 3066 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3067 * has in flight commands, its pwq->flush_color is set to 3068 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 3069 * wakeup logic is armed and %true is returned. 3070 * 3071 * The caller should have initialized @wq->first_flusher prior to 3072 * calling this function with non-negative @flush_color. If 3073 * @flush_color is negative, no flush color update is done and %false 3074 * is returned. 3075 * 3076 * If @work_color is non-negative, all pwqs should have the same 3077 * work_color which is previous to @work_color and all will be 3078 * advanced to @work_color. 3079 * 3080 * CONTEXT: 3081 * mutex_lock(wq->mutex). 3082 * 3083 * Return: 3084 * %true if @flush_color >= 0 and there's something to flush. %false 3085 * otherwise. 3086 */ 3087 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 3088 int flush_color, int work_color) 3089 { 3090 bool wait = false; 3091 struct pool_workqueue *pwq; 3092 3093 if (flush_color >= 0) { 3094 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3095 atomic_set(&wq->nr_pwqs_to_flush, 1); 3096 } 3097 3098 for_each_pwq(pwq, wq) { 3099 struct worker_pool *pool = pwq->pool; 3100 3101 raw_spin_lock_irq(&pool->lock); 3102 3103 if (flush_color >= 0) { 3104 WARN_ON_ONCE(pwq->flush_color != -1); 3105 3106 if (pwq->nr_in_flight[flush_color]) { 3107 pwq->flush_color = flush_color; 3108 atomic_inc(&wq->nr_pwqs_to_flush); 3109 wait = true; 3110 } 3111 } 3112 3113 if (work_color >= 0) { 3114 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3115 pwq->work_color = work_color; 3116 } 3117 3118 raw_spin_unlock_irq(&pool->lock); 3119 } 3120 3121 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 3122 complete(&wq->first_flusher->done); 3123 3124 return wait; 3125 } 3126 3127 /** 3128 * __flush_workqueue - ensure that any scheduled work has run to completion. 3129 * @wq: workqueue to flush 3130 * 3131 * This function sleeps until all work items which were queued on entry 3132 * have finished execution, but it is not livelocked by new incoming ones. 3133 */ 3134 void __flush_workqueue(struct workqueue_struct *wq) 3135 { 3136 struct wq_flusher this_flusher = { 3137 .list = LIST_HEAD_INIT(this_flusher.list), 3138 .flush_color = -1, 3139 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), 3140 }; 3141 int next_color; 3142 3143 if (WARN_ON(!wq_online)) 3144 return; 3145 3146 lock_map_acquire(&wq->lockdep_map); 3147 lock_map_release(&wq->lockdep_map); 3148 3149 mutex_lock(&wq->mutex); 3150 3151 /* 3152 * Start-to-wait phase 3153 */ 3154 next_color = work_next_color(wq->work_color); 3155 3156 if (next_color != wq->flush_color) { 3157 /* 3158 * Color space is not full. The current work_color 3159 * becomes our flush_color and work_color is advanced 3160 * by one. 3161 */ 3162 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 3163 this_flusher.flush_color = wq->work_color; 3164 wq->work_color = next_color; 3165 3166 if (!wq->first_flusher) { 3167 /* no flush in progress, become the first flusher */ 3168 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3169 3170 wq->first_flusher = &this_flusher; 3171 3172 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 3173 wq->work_color)) { 3174 /* nothing to flush, done */ 3175 wq->flush_color = next_color; 3176 wq->first_flusher = NULL; 3177 goto out_unlock; 3178 } 3179 } else { 3180 /* wait in queue */ 3181 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 3182 list_add_tail(&this_flusher.list, &wq->flusher_queue); 3183 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3184 } 3185 } else { 3186 /* 3187 * Oops, color space is full, wait on overflow queue. 3188 * The next flush completion will assign us 3189 * flush_color and transfer to flusher_queue. 3190 */ 3191 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 3192 } 3193 3194 check_flush_dependency(wq, NULL); 3195 3196 mutex_unlock(&wq->mutex); 3197 3198 wait_for_completion(&this_flusher.done); 3199 3200 /* 3201 * Wake-up-and-cascade phase 3202 * 3203 * First flushers are responsible for cascading flushes and 3204 * handling overflow. Non-first flushers can simply return. 3205 */ 3206 if (READ_ONCE(wq->first_flusher) != &this_flusher) 3207 return; 3208 3209 mutex_lock(&wq->mutex); 3210 3211 /* we might have raced, check again with mutex held */ 3212 if (wq->first_flusher != &this_flusher) 3213 goto out_unlock; 3214 3215 WRITE_ONCE(wq->first_flusher, NULL); 3216 3217 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 3218 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3219 3220 while (true) { 3221 struct wq_flusher *next, *tmp; 3222 3223 /* complete all the flushers sharing the current flush color */ 3224 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 3225 if (next->flush_color != wq->flush_color) 3226 break; 3227 list_del_init(&next->list); 3228 complete(&next->done); 3229 } 3230 3231 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 3232 wq->flush_color != work_next_color(wq->work_color)); 3233 3234 /* this flush_color is finished, advance by one */ 3235 wq->flush_color = work_next_color(wq->flush_color); 3236 3237 /* one color has been freed, handle overflow queue */ 3238 if (!list_empty(&wq->flusher_overflow)) { 3239 /* 3240 * Assign the same color to all overflowed 3241 * flushers, advance work_color and append to 3242 * flusher_queue. This is the start-to-wait 3243 * phase for these overflowed flushers. 3244 */ 3245 list_for_each_entry(tmp, &wq->flusher_overflow, list) 3246 tmp->flush_color = wq->work_color; 3247 3248 wq->work_color = work_next_color(wq->work_color); 3249 3250 list_splice_tail_init(&wq->flusher_overflow, 3251 &wq->flusher_queue); 3252 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3253 } 3254 3255 if (list_empty(&wq->flusher_queue)) { 3256 WARN_ON_ONCE(wq->flush_color != wq->work_color); 3257 break; 3258 } 3259 3260 /* 3261 * Need to flush more colors. Make the next flusher 3262 * the new first flusher and arm pwqs. 3263 */ 3264 WARN_ON_ONCE(wq->flush_color == wq->work_color); 3265 WARN_ON_ONCE(wq->flush_color != next->flush_color); 3266 3267 list_del_init(&next->list); 3268 wq->first_flusher = next; 3269 3270 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 3271 break; 3272 3273 /* 3274 * Meh... this color is already done, clear first 3275 * flusher and repeat cascading. 3276 */ 3277 wq->first_flusher = NULL; 3278 } 3279 3280 out_unlock: 3281 mutex_unlock(&wq->mutex); 3282 } 3283 EXPORT_SYMBOL(__flush_workqueue); 3284 3285 /** 3286 * drain_workqueue - drain a workqueue 3287 * @wq: workqueue to drain 3288 * 3289 * Wait until the workqueue becomes empty. While draining is in progress, 3290 * only chain queueing is allowed. IOW, only currently pending or running 3291 * work items on @wq can queue further work items on it. @wq is flushed 3292 * repeatedly until it becomes empty. The number of flushing is determined 3293 * by the depth of chaining and should be relatively short. Whine if it 3294 * takes too long. 3295 */ 3296 void drain_workqueue(struct workqueue_struct *wq) 3297 { 3298 unsigned int flush_cnt = 0; 3299 struct pool_workqueue *pwq; 3300 3301 /* 3302 * __queue_work() needs to test whether there are drainers, is much 3303 * hotter than drain_workqueue() and already looks at @wq->flags. 3304 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 3305 */ 3306 mutex_lock(&wq->mutex); 3307 if (!wq->nr_drainers++) 3308 wq->flags |= __WQ_DRAINING; 3309 mutex_unlock(&wq->mutex); 3310 reflush: 3311 __flush_workqueue(wq); 3312 3313 mutex_lock(&wq->mutex); 3314 3315 for_each_pwq(pwq, wq) { 3316 bool drained; 3317 3318 raw_spin_lock_irq(&pwq->pool->lock); 3319 drained = !pwq->nr_active && list_empty(&pwq->inactive_works); 3320 raw_spin_unlock_irq(&pwq->pool->lock); 3321 3322 if (drained) 3323 continue; 3324 3325 if (++flush_cnt == 10 || 3326 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 3327 pr_warn("workqueue %s: %s() isn't complete after %u tries\n", 3328 wq->name, __func__, flush_cnt); 3329 3330 mutex_unlock(&wq->mutex); 3331 goto reflush; 3332 } 3333 3334 if (!--wq->nr_drainers) 3335 wq->flags &= ~__WQ_DRAINING; 3336 mutex_unlock(&wq->mutex); 3337 } 3338 EXPORT_SYMBOL_GPL(drain_workqueue); 3339 3340 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 3341 bool from_cancel) 3342 { 3343 struct worker *worker = NULL; 3344 struct worker_pool *pool; 3345 struct pool_workqueue *pwq; 3346 3347 might_sleep(); 3348 3349 rcu_read_lock(); 3350 pool = get_work_pool(work); 3351 if (!pool) { 3352 rcu_read_unlock(); 3353 return false; 3354 } 3355 3356 raw_spin_lock_irq(&pool->lock); 3357 /* see the comment in try_to_grab_pending() with the same code */ 3358 pwq = get_work_pwq(work); 3359 if (pwq) { 3360 if (unlikely(pwq->pool != pool)) 3361 goto already_gone; 3362 } else { 3363 worker = find_worker_executing_work(pool, work); 3364 if (!worker) 3365 goto already_gone; 3366 pwq = worker->current_pwq; 3367 } 3368 3369 check_flush_dependency(pwq->wq, work); 3370 3371 insert_wq_barrier(pwq, barr, work, worker); 3372 raw_spin_unlock_irq(&pool->lock); 3373 3374 /* 3375 * Force a lock recursion deadlock when using flush_work() inside a 3376 * single-threaded or rescuer equipped workqueue. 3377 * 3378 * For single threaded workqueues the deadlock happens when the work 3379 * is after the work issuing the flush_work(). For rescuer equipped 3380 * workqueues the deadlock happens when the rescuer stalls, blocking 3381 * forward progress. 3382 */ 3383 if (!from_cancel && 3384 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { 3385 lock_map_acquire(&pwq->wq->lockdep_map); 3386 lock_map_release(&pwq->wq->lockdep_map); 3387 } 3388 rcu_read_unlock(); 3389 return true; 3390 already_gone: 3391 raw_spin_unlock_irq(&pool->lock); 3392 rcu_read_unlock(); 3393 return false; 3394 } 3395 3396 static bool __flush_work(struct work_struct *work, bool from_cancel) 3397 { 3398 struct wq_barrier barr; 3399 3400 if (WARN_ON(!wq_online)) 3401 return false; 3402 3403 if (WARN_ON(!work->func)) 3404 return false; 3405 3406 lock_map_acquire(&work->lockdep_map); 3407 lock_map_release(&work->lockdep_map); 3408 3409 if (start_flush_work(work, &barr, from_cancel)) { 3410 wait_for_completion(&barr.done); 3411 destroy_work_on_stack(&barr.work); 3412 return true; 3413 } else { 3414 return false; 3415 } 3416 } 3417 3418 /** 3419 * flush_work - wait for a work to finish executing the last queueing instance 3420 * @work: the work to flush 3421 * 3422 * Wait until @work has finished execution. @work is guaranteed to be idle 3423 * on return if it hasn't been requeued since flush started. 3424 * 3425 * Return: 3426 * %true if flush_work() waited for the work to finish execution, 3427 * %false if it was already idle. 3428 */ 3429 bool flush_work(struct work_struct *work) 3430 { 3431 return __flush_work(work, false); 3432 } 3433 EXPORT_SYMBOL_GPL(flush_work); 3434 3435 struct cwt_wait { 3436 wait_queue_entry_t wait; 3437 struct work_struct *work; 3438 }; 3439 3440 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 3441 { 3442 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 3443 3444 if (cwait->work != key) 3445 return 0; 3446 return autoremove_wake_function(wait, mode, sync, key); 3447 } 3448 3449 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 3450 { 3451 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); 3452 unsigned long flags; 3453 int ret; 3454 3455 do { 3456 ret = try_to_grab_pending(work, is_dwork, &flags); 3457 /* 3458 * If someone else is already canceling, wait for it to 3459 * finish. flush_work() doesn't work for PREEMPT_NONE 3460 * because we may get scheduled between @work's completion 3461 * and the other canceling task resuming and clearing 3462 * CANCELING - flush_work() will return false immediately 3463 * as @work is no longer busy, try_to_grab_pending() will 3464 * return -ENOENT as @work is still being canceled and the 3465 * other canceling task won't be able to clear CANCELING as 3466 * we're hogging the CPU. 3467 * 3468 * Let's wait for completion using a waitqueue. As this 3469 * may lead to the thundering herd problem, use a custom 3470 * wake function which matches @work along with exclusive 3471 * wait and wakeup. 3472 */ 3473 if (unlikely(ret == -ENOENT)) { 3474 struct cwt_wait cwait; 3475 3476 init_wait(&cwait.wait); 3477 cwait.wait.func = cwt_wakefn; 3478 cwait.work = work; 3479 3480 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 3481 TASK_UNINTERRUPTIBLE); 3482 if (work_is_canceling(work)) 3483 schedule(); 3484 finish_wait(&cancel_waitq, &cwait.wait); 3485 } 3486 } while (unlikely(ret < 0)); 3487 3488 /* tell other tasks trying to grab @work to back off */ 3489 mark_work_canceling(work); 3490 local_irq_restore(flags); 3491 3492 /* 3493 * This allows canceling during early boot. We know that @work 3494 * isn't executing. 3495 */ 3496 if (wq_online) 3497 __flush_work(work, true); 3498 3499 clear_work_data(work); 3500 3501 /* 3502 * Paired with prepare_to_wait() above so that either 3503 * waitqueue_active() is visible here or !work_is_canceling() is 3504 * visible there. 3505 */ 3506 smp_mb(); 3507 if (waitqueue_active(&cancel_waitq)) 3508 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 3509 3510 return ret; 3511 } 3512 3513 /** 3514 * cancel_work_sync - cancel a work and wait for it to finish 3515 * @work: the work to cancel 3516 * 3517 * Cancel @work and wait for its execution to finish. This function 3518 * can be used even if the work re-queues itself or migrates to 3519 * another workqueue. On return from this function, @work is 3520 * guaranteed to be not pending or executing on any CPU. 3521 * 3522 * cancel_work_sync(&delayed_work->work) must not be used for 3523 * delayed_work's. Use cancel_delayed_work_sync() instead. 3524 * 3525 * The caller must ensure that the workqueue on which @work was last 3526 * queued can't be destroyed before this function returns. 3527 * 3528 * Return: 3529 * %true if @work was pending, %false otherwise. 3530 */ 3531 bool cancel_work_sync(struct work_struct *work) 3532 { 3533 return __cancel_work_timer(work, false); 3534 } 3535 EXPORT_SYMBOL_GPL(cancel_work_sync); 3536 3537 /** 3538 * flush_delayed_work - wait for a dwork to finish executing the last queueing 3539 * @dwork: the delayed work to flush 3540 * 3541 * Delayed timer is cancelled and the pending work is queued for 3542 * immediate execution. Like flush_work(), this function only 3543 * considers the last queueing instance of @dwork. 3544 * 3545 * Return: 3546 * %true if flush_work() waited for the work to finish execution, 3547 * %false if it was already idle. 3548 */ 3549 bool flush_delayed_work(struct delayed_work *dwork) 3550 { 3551 local_irq_disable(); 3552 if (del_timer_sync(&dwork->timer)) 3553 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 3554 local_irq_enable(); 3555 return flush_work(&dwork->work); 3556 } 3557 EXPORT_SYMBOL(flush_delayed_work); 3558 3559 /** 3560 * flush_rcu_work - wait for a rwork to finish executing the last queueing 3561 * @rwork: the rcu work to flush 3562 * 3563 * Return: 3564 * %true if flush_rcu_work() waited for the work to finish execution, 3565 * %false if it was already idle. 3566 */ 3567 bool flush_rcu_work(struct rcu_work *rwork) 3568 { 3569 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { 3570 rcu_barrier(); 3571 flush_work(&rwork->work); 3572 return true; 3573 } else { 3574 return flush_work(&rwork->work); 3575 } 3576 } 3577 EXPORT_SYMBOL(flush_rcu_work); 3578 3579 static bool __cancel_work(struct work_struct *work, bool is_dwork) 3580 { 3581 unsigned long flags; 3582 int ret; 3583 3584 do { 3585 ret = try_to_grab_pending(work, is_dwork, &flags); 3586 } while (unlikely(ret == -EAGAIN)); 3587 3588 if (unlikely(ret < 0)) 3589 return false; 3590 3591 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); 3592 local_irq_restore(flags); 3593 return ret; 3594 } 3595 3596 /* 3597 * See cancel_delayed_work() 3598 */ 3599 bool cancel_work(struct work_struct *work) 3600 { 3601 return __cancel_work(work, false); 3602 } 3603 EXPORT_SYMBOL(cancel_work); 3604 3605 /** 3606 * cancel_delayed_work - cancel a delayed work 3607 * @dwork: delayed_work to cancel 3608 * 3609 * Kill off a pending delayed_work. 3610 * 3611 * Return: %true if @dwork was pending and canceled; %false if it wasn't 3612 * pending. 3613 * 3614 * Note: 3615 * The work callback function may still be running on return, unless 3616 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 3617 * use cancel_delayed_work_sync() to wait on it. 3618 * 3619 * This function is safe to call from any context including IRQ handler. 3620 */ 3621 bool cancel_delayed_work(struct delayed_work *dwork) 3622 { 3623 return __cancel_work(&dwork->work, true); 3624 } 3625 EXPORT_SYMBOL(cancel_delayed_work); 3626 3627 /** 3628 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 3629 * @dwork: the delayed work cancel 3630 * 3631 * This is cancel_work_sync() for delayed works. 3632 * 3633 * Return: 3634 * %true if @dwork was pending, %false otherwise. 3635 */ 3636 bool cancel_delayed_work_sync(struct delayed_work *dwork) 3637 { 3638 return __cancel_work_timer(&dwork->work, true); 3639 } 3640 EXPORT_SYMBOL(cancel_delayed_work_sync); 3641 3642 /** 3643 * schedule_on_each_cpu - execute a function synchronously on each online CPU 3644 * @func: the function to call 3645 * 3646 * schedule_on_each_cpu() executes @func on each online CPU using the 3647 * system workqueue and blocks until all CPUs have completed. 3648 * schedule_on_each_cpu() is very slow. 3649 * 3650 * Return: 3651 * 0 on success, -errno on failure. 3652 */ 3653 int schedule_on_each_cpu(work_func_t func) 3654 { 3655 int cpu; 3656 struct work_struct __percpu *works; 3657 3658 works = alloc_percpu(struct work_struct); 3659 if (!works) 3660 return -ENOMEM; 3661 3662 cpus_read_lock(); 3663 3664 for_each_online_cpu(cpu) { 3665 struct work_struct *work = per_cpu_ptr(works, cpu); 3666 3667 INIT_WORK(work, func); 3668 schedule_work_on(cpu, work); 3669 } 3670 3671 for_each_online_cpu(cpu) 3672 flush_work(per_cpu_ptr(works, cpu)); 3673 3674 cpus_read_unlock(); 3675 free_percpu(works); 3676 return 0; 3677 } 3678 3679 /** 3680 * execute_in_process_context - reliably execute the routine with user context 3681 * @fn: the function to execute 3682 * @ew: guaranteed storage for the execute work structure (must 3683 * be available when the work executes) 3684 * 3685 * Executes the function immediately if process context is available, 3686 * otherwise schedules the function for delayed execution. 3687 * 3688 * Return: 0 - function was executed 3689 * 1 - function was scheduled for execution 3690 */ 3691 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 3692 { 3693 if (!in_interrupt()) { 3694 fn(&ew->work); 3695 return 0; 3696 } 3697 3698 INIT_WORK(&ew->work, fn); 3699 schedule_work(&ew->work); 3700 3701 return 1; 3702 } 3703 EXPORT_SYMBOL_GPL(execute_in_process_context); 3704 3705 /** 3706 * free_workqueue_attrs - free a workqueue_attrs 3707 * @attrs: workqueue_attrs to free 3708 * 3709 * Undo alloc_workqueue_attrs(). 3710 */ 3711 void free_workqueue_attrs(struct workqueue_attrs *attrs) 3712 { 3713 if (attrs) { 3714 free_cpumask_var(attrs->cpumask); 3715 free_cpumask_var(attrs->__pod_cpumask); 3716 kfree(attrs); 3717 } 3718 } 3719 3720 /** 3721 * alloc_workqueue_attrs - allocate a workqueue_attrs 3722 * 3723 * Allocate a new workqueue_attrs, initialize with default settings and 3724 * return it. 3725 * 3726 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3727 */ 3728 struct workqueue_attrs *alloc_workqueue_attrs(void) 3729 { 3730 struct workqueue_attrs *attrs; 3731 3732 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 3733 if (!attrs) 3734 goto fail; 3735 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) 3736 goto fail; 3737 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) 3738 goto fail; 3739 3740 cpumask_copy(attrs->cpumask, cpu_possible_mask); 3741 attrs->affn_scope = WQ_AFFN_DFL; 3742 return attrs; 3743 fail: 3744 free_workqueue_attrs(attrs); 3745 return NULL; 3746 } 3747 3748 static void copy_workqueue_attrs(struct workqueue_attrs *to, 3749 const struct workqueue_attrs *from) 3750 { 3751 to->nice = from->nice; 3752 cpumask_copy(to->cpumask, from->cpumask); 3753 cpumask_copy(to->__pod_cpumask, from->__pod_cpumask); 3754 to->affn_strict = from->affn_strict; 3755 3756 /* 3757 * Unlike hash and equality test, copying shouldn't ignore wq-only 3758 * fields as copying is used for both pool and wq attrs. Instead, 3759 * get_unbound_pool() explicitly clears the fields. 3760 */ 3761 to->affn_scope = from->affn_scope; 3762 to->ordered = from->ordered; 3763 } 3764 3765 /* 3766 * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the 3767 * comments in 'struct workqueue_attrs' definition. 3768 */ 3769 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) 3770 { 3771 attrs->affn_scope = WQ_AFFN_NR_TYPES; 3772 attrs->ordered = false; 3773 } 3774 3775 /* hash value of the content of @attr */ 3776 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 3777 { 3778 u32 hash = 0; 3779 3780 hash = jhash_1word(attrs->nice, hash); 3781 hash = jhash(cpumask_bits(attrs->cpumask), 3782 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3783 hash = jhash(cpumask_bits(attrs->__pod_cpumask), 3784 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3785 hash = jhash_1word(attrs->affn_strict, hash); 3786 return hash; 3787 } 3788 3789 /* content equality test */ 3790 static bool wqattrs_equal(const struct workqueue_attrs *a, 3791 const struct workqueue_attrs *b) 3792 { 3793 if (a->nice != b->nice) 3794 return false; 3795 if (!cpumask_equal(a->cpumask, b->cpumask)) 3796 return false; 3797 if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask)) 3798 return false; 3799 if (a->affn_strict != b->affn_strict) 3800 return false; 3801 return true; 3802 } 3803 3804 /* Update @attrs with actually available CPUs */ 3805 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, 3806 const cpumask_t *unbound_cpumask) 3807 { 3808 /* 3809 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If 3810 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to 3811 * @unbound_cpumask. 3812 */ 3813 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); 3814 if (unlikely(cpumask_empty(attrs->cpumask))) 3815 cpumask_copy(attrs->cpumask, unbound_cpumask); 3816 } 3817 3818 /* find wq_pod_type to use for @attrs */ 3819 static const struct wq_pod_type * 3820 wqattrs_pod_type(const struct workqueue_attrs *attrs) 3821 { 3822 enum wq_affn_scope scope; 3823 struct wq_pod_type *pt; 3824 3825 /* to synchronize access to wq_affn_dfl */ 3826 lockdep_assert_held(&wq_pool_mutex); 3827 3828 if (attrs->affn_scope == WQ_AFFN_DFL) 3829 scope = wq_affn_dfl; 3830 else 3831 scope = attrs->affn_scope; 3832 3833 pt = &wq_pod_types[scope]; 3834 3835 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && 3836 likely(pt->nr_pods)) 3837 return pt; 3838 3839 /* 3840 * Before workqueue_init_topology(), only SYSTEM is available which is 3841 * initialized in workqueue_init_early(). 3842 */ 3843 pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 3844 BUG_ON(!pt->nr_pods); 3845 return pt; 3846 } 3847 3848 /** 3849 * init_worker_pool - initialize a newly zalloc'd worker_pool 3850 * @pool: worker_pool to initialize 3851 * 3852 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 3853 * 3854 * Return: 0 on success, -errno on failure. Even on failure, all fields 3855 * inside @pool proper are initialized and put_unbound_pool() can be called 3856 * on @pool safely to release it. 3857 */ 3858 static int init_worker_pool(struct worker_pool *pool) 3859 { 3860 raw_spin_lock_init(&pool->lock); 3861 pool->id = -1; 3862 pool->cpu = -1; 3863 pool->node = NUMA_NO_NODE; 3864 pool->flags |= POOL_DISASSOCIATED; 3865 pool->watchdog_ts = jiffies; 3866 INIT_LIST_HEAD(&pool->worklist); 3867 INIT_LIST_HEAD(&pool->idle_list); 3868 hash_init(pool->busy_hash); 3869 3870 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 3871 INIT_WORK(&pool->idle_cull_work, idle_cull_fn); 3872 3873 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 3874 3875 INIT_LIST_HEAD(&pool->workers); 3876 INIT_LIST_HEAD(&pool->dying_workers); 3877 3878 ida_init(&pool->worker_ida); 3879 INIT_HLIST_NODE(&pool->hash_node); 3880 pool->refcnt = 1; 3881 3882 /* shouldn't fail above this point */ 3883 pool->attrs = alloc_workqueue_attrs(); 3884 if (!pool->attrs) 3885 return -ENOMEM; 3886 3887 wqattrs_clear_for_pool(pool->attrs); 3888 3889 return 0; 3890 } 3891 3892 #ifdef CONFIG_LOCKDEP 3893 static void wq_init_lockdep(struct workqueue_struct *wq) 3894 { 3895 char *lock_name; 3896 3897 lockdep_register_key(&wq->key); 3898 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); 3899 if (!lock_name) 3900 lock_name = wq->name; 3901 3902 wq->lock_name = lock_name; 3903 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); 3904 } 3905 3906 static void wq_unregister_lockdep(struct workqueue_struct *wq) 3907 { 3908 lockdep_unregister_key(&wq->key); 3909 } 3910 3911 static void wq_free_lockdep(struct workqueue_struct *wq) 3912 { 3913 if (wq->lock_name != wq->name) 3914 kfree(wq->lock_name); 3915 } 3916 #else 3917 static void wq_init_lockdep(struct workqueue_struct *wq) 3918 { 3919 } 3920 3921 static void wq_unregister_lockdep(struct workqueue_struct *wq) 3922 { 3923 } 3924 3925 static void wq_free_lockdep(struct workqueue_struct *wq) 3926 { 3927 } 3928 #endif 3929 3930 static void rcu_free_wq(struct rcu_head *rcu) 3931 { 3932 struct workqueue_struct *wq = 3933 container_of(rcu, struct workqueue_struct, rcu); 3934 3935 wq_free_lockdep(wq); 3936 free_percpu(wq->cpu_pwq); 3937 free_workqueue_attrs(wq->unbound_attrs); 3938 kfree(wq); 3939 } 3940 3941 static void rcu_free_pool(struct rcu_head *rcu) 3942 { 3943 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 3944 3945 ida_destroy(&pool->worker_ida); 3946 free_workqueue_attrs(pool->attrs); 3947 kfree(pool); 3948 } 3949 3950 /** 3951 * put_unbound_pool - put a worker_pool 3952 * @pool: worker_pool to put 3953 * 3954 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU 3955 * safe manner. get_unbound_pool() calls this function on its failure path 3956 * and this function should be able to release pools which went through, 3957 * successfully or not, init_worker_pool(). 3958 * 3959 * Should be called with wq_pool_mutex held. 3960 */ 3961 static void put_unbound_pool(struct worker_pool *pool) 3962 { 3963 DECLARE_COMPLETION_ONSTACK(detach_completion); 3964 struct worker *worker; 3965 LIST_HEAD(cull_list); 3966 3967 lockdep_assert_held(&wq_pool_mutex); 3968 3969 if (--pool->refcnt) 3970 return; 3971 3972 /* sanity checks */ 3973 if (WARN_ON(!(pool->cpu < 0)) || 3974 WARN_ON(!list_empty(&pool->worklist))) 3975 return; 3976 3977 /* release id and unhash */ 3978 if (pool->id >= 0) 3979 idr_remove(&worker_pool_idr, pool->id); 3980 hash_del(&pool->hash_node); 3981 3982 /* 3983 * Become the manager and destroy all workers. This prevents 3984 * @pool's workers from blocking on attach_mutex. We're the last 3985 * manager and @pool gets freed with the flag set. 3986 * 3987 * Having a concurrent manager is quite unlikely to happen as we can 3988 * only get here with 3989 * pwq->refcnt == pool->refcnt == 0 3990 * which implies no work queued to the pool, which implies no worker can 3991 * become the manager. However a worker could have taken the role of 3992 * manager before the refcnts dropped to 0, since maybe_create_worker() 3993 * drops pool->lock 3994 */ 3995 while (true) { 3996 rcuwait_wait_event(&manager_wait, 3997 !(pool->flags & POOL_MANAGER_ACTIVE), 3998 TASK_UNINTERRUPTIBLE); 3999 4000 mutex_lock(&wq_pool_attach_mutex); 4001 raw_spin_lock_irq(&pool->lock); 4002 if (!(pool->flags & POOL_MANAGER_ACTIVE)) { 4003 pool->flags |= POOL_MANAGER_ACTIVE; 4004 break; 4005 } 4006 raw_spin_unlock_irq(&pool->lock); 4007 mutex_unlock(&wq_pool_attach_mutex); 4008 } 4009 4010 while ((worker = first_idle_worker(pool))) 4011 set_worker_dying(worker, &cull_list); 4012 WARN_ON(pool->nr_workers || pool->nr_idle); 4013 raw_spin_unlock_irq(&pool->lock); 4014 4015 wake_dying_workers(&cull_list); 4016 4017 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers)) 4018 pool->detach_completion = &detach_completion; 4019 mutex_unlock(&wq_pool_attach_mutex); 4020 4021 if (pool->detach_completion) 4022 wait_for_completion(pool->detach_completion); 4023 4024 /* shut down the timers */ 4025 del_timer_sync(&pool->idle_timer); 4026 cancel_work_sync(&pool->idle_cull_work); 4027 del_timer_sync(&pool->mayday_timer); 4028 4029 /* RCU protected to allow dereferences from get_work_pool() */ 4030 call_rcu(&pool->rcu, rcu_free_pool); 4031 } 4032 4033 /** 4034 * get_unbound_pool - get a worker_pool with the specified attributes 4035 * @attrs: the attributes of the worker_pool to get 4036 * 4037 * Obtain a worker_pool which has the same attributes as @attrs, bump the 4038 * reference count and return it. If there already is a matching 4039 * worker_pool, it will be used; otherwise, this function attempts to 4040 * create a new one. 4041 * 4042 * Should be called with wq_pool_mutex held. 4043 * 4044 * Return: On success, a worker_pool with the same attributes as @attrs. 4045 * On failure, %NULL. 4046 */ 4047 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 4048 { 4049 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA]; 4050 u32 hash = wqattrs_hash(attrs); 4051 struct worker_pool *pool; 4052 int pod, node = NUMA_NO_NODE; 4053 4054 lockdep_assert_held(&wq_pool_mutex); 4055 4056 /* do we already have a matching pool? */ 4057 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 4058 if (wqattrs_equal(pool->attrs, attrs)) { 4059 pool->refcnt++; 4060 return pool; 4061 } 4062 } 4063 4064 /* If __pod_cpumask is contained inside a NUMA pod, that's our node */ 4065 for (pod = 0; pod < pt->nr_pods; pod++) { 4066 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { 4067 node = pt->pod_node[pod]; 4068 break; 4069 } 4070 } 4071 4072 /* nope, create a new one */ 4073 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); 4074 if (!pool || init_worker_pool(pool) < 0) 4075 goto fail; 4076 4077 pool->node = node; 4078 copy_workqueue_attrs(pool->attrs, attrs); 4079 wqattrs_clear_for_pool(pool->attrs); 4080 4081 if (worker_pool_assign_id(pool) < 0) 4082 goto fail; 4083 4084 /* create and start the initial worker */ 4085 if (wq_online && !create_worker(pool)) 4086 goto fail; 4087 4088 /* install */ 4089 hash_add(unbound_pool_hash, &pool->hash_node, hash); 4090 4091 return pool; 4092 fail: 4093 if (pool) 4094 put_unbound_pool(pool); 4095 return NULL; 4096 } 4097 4098 static void rcu_free_pwq(struct rcu_head *rcu) 4099 { 4100 kmem_cache_free(pwq_cache, 4101 container_of(rcu, struct pool_workqueue, rcu)); 4102 } 4103 4104 /* 4105 * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero 4106 * refcnt and needs to be destroyed. 4107 */ 4108 static void pwq_release_workfn(struct kthread_work *work) 4109 { 4110 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 4111 release_work); 4112 struct workqueue_struct *wq = pwq->wq; 4113 struct worker_pool *pool = pwq->pool; 4114 bool is_last = false; 4115 4116 /* 4117 * When @pwq is not linked, it doesn't hold any reference to the 4118 * @wq, and @wq is invalid to access. 4119 */ 4120 if (!list_empty(&pwq->pwqs_node)) { 4121 mutex_lock(&wq->mutex); 4122 list_del_rcu(&pwq->pwqs_node); 4123 is_last = list_empty(&wq->pwqs); 4124 mutex_unlock(&wq->mutex); 4125 } 4126 4127 if (wq->flags & WQ_UNBOUND) { 4128 mutex_lock(&wq_pool_mutex); 4129 put_unbound_pool(pool); 4130 mutex_unlock(&wq_pool_mutex); 4131 } 4132 4133 call_rcu(&pwq->rcu, rcu_free_pwq); 4134 4135 /* 4136 * If we're the last pwq going away, @wq is already dead and no one 4137 * is gonna access it anymore. Schedule RCU free. 4138 */ 4139 if (is_last) { 4140 wq_unregister_lockdep(wq); 4141 call_rcu(&wq->rcu, rcu_free_wq); 4142 } 4143 } 4144 4145 /** 4146 * pwq_adjust_max_active - update a pwq's max_active to the current setting 4147 * @pwq: target pool_workqueue 4148 * 4149 * If @pwq isn't freezing, set @pwq->max_active to the associated 4150 * workqueue's saved_max_active and activate inactive work items 4151 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. 4152 */ 4153 static void pwq_adjust_max_active(struct pool_workqueue *pwq) 4154 { 4155 struct workqueue_struct *wq = pwq->wq; 4156 bool freezable = wq->flags & WQ_FREEZABLE; 4157 unsigned long flags; 4158 4159 /* for @wq->saved_max_active */ 4160 lockdep_assert_held(&wq->mutex); 4161 4162 /* fast exit for non-freezable wqs */ 4163 if (!freezable && pwq->max_active == wq->saved_max_active) 4164 return; 4165 4166 /* this function can be called during early boot w/ irq disabled */ 4167 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 4168 4169 /* 4170 * During [un]freezing, the caller is responsible for ensuring that 4171 * this function is called at least once after @workqueue_freezing 4172 * is updated and visible. 4173 */ 4174 if (!freezable || !workqueue_freezing) { 4175 pwq->max_active = wq->saved_max_active; 4176 4177 while (!list_empty(&pwq->inactive_works) && 4178 pwq->nr_active < pwq->max_active) 4179 pwq_activate_first_inactive(pwq); 4180 4181 kick_pool(pwq->pool); 4182 } else { 4183 pwq->max_active = 0; 4184 } 4185 4186 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 4187 } 4188 4189 /* initialize newly allocated @pwq which is associated with @wq and @pool */ 4190 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 4191 struct worker_pool *pool) 4192 { 4193 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 4194 4195 memset(pwq, 0, sizeof(*pwq)); 4196 4197 pwq->pool = pool; 4198 pwq->wq = wq; 4199 pwq->flush_color = -1; 4200 pwq->refcnt = 1; 4201 INIT_LIST_HEAD(&pwq->inactive_works); 4202 INIT_LIST_HEAD(&pwq->pwqs_node); 4203 INIT_LIST_HEAD(&pwq->mayday_node); 4204 kthread_init_work(&pwq->release_work, pwq_release_workfn); 4205 } 4206 4207 /* sync @pwq with the current state of its associated wq and link it */ 4208 static void link_pwq(struct pool_workqueue *pwq) 4209 { 4210 struct workqueue_struct *wq = pwq->wq; 4211 4212 lockdep_assert_held(&wq->mutex); 4213 4214 /* may be called multiple times, ignore if already linked */ 4215 if (!list_empty(&pwq->pwqs_node)) 4216 return; 4217 4218 /* set the matching work_color */ 4219 pwq->work_color = wq->work_color; 4220 4221 /* sync max_active to the current setting */ 4222 pwq_adjust_max_active(pwq); 4223 4224 /* link in @pwq */ 4225 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 4226 } 4227 4228 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 4229 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 4230 const struct workqueue_attrs *attrs) 4231 { 4232 struct worker_pool *pool; 4233 struct pool_workqueue *pwq; 4234 4235 lockdep_assert_held(&wq_pool_mutex); 4236 4237 pool = get_unbound_pool(attrs); 4238 if (!pool) 4239 return NULL; 4240 4241 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 4242 if (!pwq) { 4243 put_unbound_pool(pool); 4244 return NULL; 4245 } 4246 4247 init_pwq(pwq, wq, pool); 4248 return pwq; 4249 } 4250 4251 /** 4252 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod 4253 * @attrs: the wq_attrs of the default pwq of the target workqueue 4254 * @cpu: the target CPU 4255 * @cpu_going_down: if >= 0, the CPU to consider as offline 4256 * 4257 * Calculate the cpumask a workqueue with @attrs should use on @pod. If 4258 * @cpu_going_down is >= 0, that cpu is considered offline during calculation. 4259 * The result is stored in @attrs->__pod_cpumask. 4260 * 4261 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled 4262 * and @pod has online CPUs requested by @attrs, the returned cpumask is the 4263 * intersection of the possible CPUs of @pod and @attrs->cpumask. 4264 * 4265 * The caller is responsible for ensuring that the cpumask of @pod stays stable. 4266 */ 4267 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, 4268 int cpu_going_down) 4269 { 4270 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 4271 int pod = pt->cpu_pod[cpu]; 4272 4273 /* does @pod have any online CPUs @attrs wants? */ 4274 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); 4275 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask); 4276 if (cpu_going_down >= 0) 4277 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask); 4278 4279 if (cpumask_empty(attrs->__pod_cpumask)) { 4280 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); 4281 return; 4282 } 4283 4284 /* yeap, return possible CPUs in @pod that @attrs wants */ 4285 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]); 4286 4287 if (cpumask_empty(attrs->__pod_cpumask)) 4288 pr_warn_once("WARNING: workqueue cpumask: online intersect > " 4289 "possible intersect\n"); 4290 } 4291 4292 /* install @pwq into @wq's cpu_pwq and return the old pwq */ 4293 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, 4294 int cpu, struct pool_workqueue *pwq) 4295 { 4296 struct pool_workqueue *old_pwq; 4297 4298 lockdep_assert_held(&wq_pool_mutex); 4299 lockdep_assert_held(&wq->mutex); 4300 4301 /* link_pwq() can handle duplicate calls */ 4302 link_pwq(pwq); 4303 4304 old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4305 rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq); 4306 return old_pwq; 4307 } 4308 4309 /* context to store the prepared attrs & pwqs before applying */ 4310 struct apply_wqattrs_ctx { 4311 struct workqueue_struct *wq; /* target workqueue */ 4312 struct workqueue_attrs *attrs; /* attrs to apply */ 4313 struct list_head list; /* queued for batching commit */ 4314 struct pool_workqueue *dfl_pwq; 4315 struct pool_workqueue *pwq_tbl[]; 4316 }; 4317 4318 /* free the resources after success or abort */ 4319 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 4320 { 4321 if (ctx) { 4322 int cpu; 4323 4324 for_each_possible_cpu(cpu) 4325 put_pwq_unlocked(ctx->pwq_tbl[cpu]); 4326 put_pwq_unlocked(ctx->dfl_pwq); 4327 4328 free_workqueue_attrs(ctx->attrs); 4329 4330 kfree(ctx); 4331 } 4332 } 4333 4334 /* allocate the attrs and pwqs for later installation */ 4335 static struct apply_wqattrs_ctx * 4336 apply_wqattrs_prepare(struct workqueue_struct *wq, 4337 const struct workqueue_attrs *attrs, 4338 const cpumask_var_t unbound_cpumask) 4339 { 4340 struct apply_wqattrs_ctx *ctx; 4341 struct workqueue_attrs *new_attrs; 4342 int cpu; 4343 4344 lockdep_assert_held(&wq_pool_mutex); 4345 4346 if (WARN_ON(attrs->affn_scope < 0 || 4347 attrs->affn_scope >= WQ_AFFN_NR_TYPES)) 4348 return ERR_PTR(-EINVAL); 4349 4350 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); 4351 4352 new_attrs = alloc_workqueue_attrs(); 4353 if (!ctx || !new_attrs) 4354 goto out_free; 4355 4356 /* 4357 * If something goes wrong during CPU up/down, we'll fall back to 4358 * the default pwq covering whole @attrs->cpumask. Always create 4359 * it even if we don't use it immediately. 4360 */ 4361 copy_workqueue_attrs(new_attrs, attrs); 4362 wqattrs_actualize_cpumask(new_attrs, unbound_cpumask); 4363 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4364 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 4365 if (!ctx->dfl_pwq) 4366 goto out_free; 4367 4368 for_each_possible_cpu(cpu) { 4369 if (new_attrs->ordered) { 4370 ctx->dfl_pwq->refcnt++; 4371 ctx->pwq_tbl[cpu] = ctx->dfl_pwq; 4372 } else { 4373 wq_calc_pod_cpumask(new_attrs, cpu, -1); 4374 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs); 4375 if (!ctx->pwq_tbl[cpu]) 4376 goto out_free; 4377 } 4378 } 4379 4380 /* save the user configured attrs and sanitize it. */ 4381 copy_workqueue_attrs(new_attrs, attrs); 4382 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 4383 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4384 ctx->attrs = new_attrs; 4385 4386 ctx->wq = wq; 4387 return ctx; 4388 4389 out_free: 4390 free_workqueue_attrs(new_attrs); 4391 apply_wqattrs_cleanup(ctx); 4392 return ERR_PTR(-ENOMEM); 4393 } 4394 4395 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 4396 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 4397 { 4398 int cpu; 4399 4400 /* all pwqs have been created successfully, let's install'em */ 4401 mutex_lock(&ctx->wq->mutex); 4402 4403 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 4404 4405 /* save the previous pwq and install the new one */ 4406 for_each_possible_cpu(cpu) 4407 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, 4408 ctx->pwq_tbl[cpu]); 4409 4410 /* @dfl_pwq might not have been used, ensure it's linked */ 4411 link_pwq(ctx->dfl_pwq); 4412 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); 4413 4414 mutex_unlock(&ctx->wq->mutex); 4415 } 4416 4417 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 4418 const struct workqueue_attrs *attrs) 4419 { 4420 struct apply_wqattrs_ctx *ctx; 4421 4422 /* only unbound workqueues can change attributes */ 4423 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 4424 return -EINVAL; 4425 4426 /* creating multiple pwqs breaks ordering guarantee */ 4427 if (!list_empty(&wq->pwqs)) { 4428 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4429 return -EINVAL; 4430 4431 wq->flags &= ~__WQ_ORDERED; 4432 } 4433 4434 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); 4435 if (IS_ERR(ctx)) 4436 return PTR_ERR(ctx); 4437 4438 /* the ctx has been prepared successfully, let's commit it */ 4439 apply_wqattrs_commit(ctx); 4440 apply_wqattrs_cleanup(ctx); 4441 4442 return 0; 4443 } 4444 4445 /** 4446 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 4447 * @wq: the target workqueue 4448 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 4449 * 4450 * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps 4451 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that 4452 * work items are affine to the pod it was issued on. Older pwqs are released as 4453 * in-flight work items finish. Note that a work item which repeatedly requeues 4454 * itself back-to-back will stay on its current pwq. 4455 * 4456 * Performs GFP_KERNEL allocations. 4457 * 4458 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock(). 4459 * 4460 * Return: 0 on success and -errno on failure. 4461 */ 4462 int apply_workqueue_attrs(struct workqueue_struct *wq, 4463 const struct workqueue_attrs *attrs) 4464 { 4465 int ret; 4466 4467 lockdep_assert_cpus_held(); 4468 4469 mutex_lock(&wq_pool_mutex); 4470 ret = apply_workqueue_attrs_locked(wq, attrs); 4471 mutex_unlock(&wq_pool_mutex); 4472 4473 return ret; 4474 } 4475 4476 /** 4477 * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug 4478 * @wq: the target workqueue 4479 * @cpu: the CPU to update pool association for 4480 * @hotplug_cpu: the CPU coming up or going down 4481 * @online: whether @cpu is coming up or going down 4482 * 4483 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 4484 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of 4485 * @wq accordingly. 4486 * 4487 * 4488 * If pod affinity can't be adjusted due to memory allocation failure, it falls 4489 * back to @wq->dfl_pwq which may not be optimal but is always correct. 4490 * 4491 * Note that when the last allowed CPU of a pod goes offline for a workqueue 4492 * with a cpumask spanning multiple pods, the workers which were already 4493 * executing the work items for the workqueue will lose their CPU affinity and 4494 * may execute on any CPU. This is similar to how per-cpu workqueues behave on 4495 * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's 4496 * responsibility to flush the work item from CPU_DOWN_PREPARE. 4497 */ 4498 static void wq_update_pod(struct workqueue_struct *wq, int cpu, 4499 int hotplug_cpu, bool online) 4500 { 4501 int off_cpu = online ? -1 : hotplug_cpu; 4502 struct pool_workqueue *old_pwq = NULL, *pwq; 4503 struct workqueue_attrs *target_attrs; 4504 4505 lockdep_assert_held(&wq_pool_mutex); 4506 4507 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered) 4508 return; 4509 4510 /* 4511 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 4512 * Let's use a preallocated one. The following buf is protected by 4513 * CPU hotplug exclusion. 4514 */ 4515 target_attrs = wq_update_pod_attrs_buf; 4516 4517 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 4518 wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask); 4519 4520 /* nothing to do if the target cpumask matches the current pwq */ 4521 wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); 4522 pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu), 4523 lockdep_is_held(&wq_pool_mutex)); 4524 if (wqattrs_equal(target_attrs, pwq->pool->attrs)) 4525 return; 4526 4527 /* create a new pwq */ 4528 pwq = alloc_unbound_pwq(wq, target_attrs); 4529 if (!pwq) { 4530 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n", 4531 wq->name); 4532 goto use_dfl_pwq; 4533 } 4534 4535 /* Install the new pwq. */ 4536 mutex_lock(&wq->mutex); 4537 old_pwq = install_unbound_pwq(wq, cpu, pwq); 4538 goto out_unlock; 4539 4540 use_dfl_pwq: 4541 mutex_lock(&wq->mutex); 4542 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); 4543 get_pwq(wq->dfl_pwq); 4544 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); 4545 old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq); 4546 out_unlock: 4547 mutex_unlock(&wq->mutex); 4548 put_pwq_unlocked(old_pwq); 4549 } 4550 4551 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 4552 { 4553 bool highpri = wq->flags & WQ_HIGHPRI; 4554 int cpu, ret; 4555 4556 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); 4557 if (!wq->cpu_pwq) 4558 goto enomem; 4559 4560 if (!(wq->flags & WQ_UNBOUND)) { 4561 for_each_possible_cpu(cpu) { 4562 struct pool_workqueue **pwq_p = 4563 per_cpu_ptr(wq->cpu_pwq, cpu); 4564 struct worker_pool *pool = 4565 &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]); 4566 4567 *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, 4568 pool->node); 4569 if (!*pwq_p) 4570 goto enomem; 4571 4572 init_pwq(*pwq_p, wq, pool); 4573 4574 mutex_lock(&wq->mutex); 4575 link_pwq(*pwq_p); 4576 mutex_unlock(&wq->mutex); 4577 } 4578 return 0; 4579 } 4580 4581 cpus_read_lock(); 4582 if (wq->flags & __WQ_ORDERED) { 4583 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 4584 /* there should only be single pwq for ordering guarantee */ 4585 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 4586 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 4587 "ordering guarantee broken for workqueue %s\n", wq->name); 4588 } else { 4589 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4590 } 4591 cpus_read_unlock(); 4592 4593 /* for unbound pwq, flush the pwq_release_worker ensures that the 4594 * pwq_release_workfn() completes before calling kfree(wq). 4595 */ 4596 if (ret) 4597 kthread_flush_worker(pwq_release_worker); 4598 4599 return ret; 4600 4601 enomem: 4602 if (wq->cpu_pwq) { 4603 for_each_possible_cpu(cpu) { 4604 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 4605 4606 if (pwq) 4607 kmem_cache_free(pwq_cache, pwq); 4608 } 4609 free_percpu(wq->cpu_pwq); 4610 wq->cpu_pwq = NULL; 4611 } 4612 return -ENOMEM; 4613 } 4614 4615 static int wq_clamp_max_active(int max_active, unsigned int flags, 4616 const char *name) 4617 { 4618 if (max_active < 1 || max_active > WQ_MAX_ACTIVE) 4619 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 4620 max_active, name, 1, WQ_MAX_ACTIVE); 4621 4622 return clamp_val(max_active, 1, WQ_MAX_ACTIVE); 4623 } 4624 4625 /* 4626 * Workqueues which may be used during memory reclaim should have a rescuer 4627 * to guarantee forward progress. 4628 */ 4629 static int init_rescuer(struct workqueue_struct *wq) 4630 { 4631 struct worker *rescuer; 4632 int ret; 4633 4634 if (!(wq->flags & WQ_MEM_RECLAIM)) 4635 return 0; 4636 4637 rescuer = alloc_worker(NUMA_NO_NODE); 4638 if (!rescuer) { 4639 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n", 4640 wq->name); 4641 return -ENOMEM; 4642 } 4643 4644 rescuer->rescue_wq = wq; 4645 rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name); 4646 if (IS_ERR(rescuer->task)) { 4647 ret = PTR_ERR(rescuer->task); 4648 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe", 4649 wq->name, ERR_PTR(ret)); 4650 kfree(rescuer); 4651 return ret; 4652 } 4653 4654 wq->rescuer = rescuer; 4655 kthread_bind_mask(rescuer->task, cpu_possible_mask); 4656 wake_up_process(rescuer->task); 4657 4658 return 0; 4659 } 4660 4661 __printf(1, 4) 4662 struct workqueue_struct *alloc_workqueue(const char *fmt, 4663 unsigned int flags, 4664 int max_active, ...) 4665 { 4666 va_list args; 4667 struct workqueue_struct *wq; 4668 struct pool_workqueue *pwq; 4669 4670 /* 4671 * Unbound && max_active == 1 used to imply ordered, which is no longer 4672 * the case on many machines due to per-pod pools. While 4673 * alloc_ordered_workqueue() is the right way to create an ordered 4674 * workqueue, keep the previous behavior to avoid subtle breakages. 4675 */ 4676 if ((flags & WQ_UNBOUND) && max_active == 1) 4677 flags |= __WQ_ORDERED; 4678 4679 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 4680 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 4681 flags |= WQ_UNBOUND; 4682 4683 /* allocate wq and format name */ 4684 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4685 if (!wq) 4686 return NULL; 4687 4688 if (flags & WQ_UNBOUND) { 4689 wq->unbound_attrs = alloc_workqueue_attrs(); 4690 if (!wq->unbound_attrs) 4691 goto err_free_wq; 4692 } 4693 4694 va_start(args, max_active); 4695 vsnprintf(wq->name, sizeof(wq->name), fmt, args); 4696 va_end(args); 4697 4698 max_active = max_active ?: WQ_DFL_ACTIVE; 4699 max_active = wq_clamp_max_active(max_active, flags, wq->name); 4700 4701 /* init wq */ 4702 wq->flags = flags; 4703 wq->saved_max_active = max_active; 4704 mutex_init(&wq->mutex); 4705 atomic_set(&wq->nr_pwqs_to_flush, 0); 4706 INIT_LIST_HEAD(&wq->pwqs); 4707 INIT_LIST_HEAD(&wq->flusher_queue); 4708 INIT_LIST_HEAD(&wq->flusher_overflow); 4709 INIT_LIST_HEAD(&wq->maydays); 4710 4711 wq_init_lockdep(wq); 4712 INIT_LIST_HEAD(&wq->list); 4713 4714 if (alloc_and_link_pwqs(wq) < 0) 4715 goto err_unreg_lockdep; 4716 4717 if (wq_online && init_rescuer(wq) < 0) 4718 goto err_destroy; 4719 4720 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 4721 goto err_destroy; 4722 4723 /* 4724 * wq_pool_mutex protects global freeze state and workqueues list. 4725 * Grab it, adjust max_active and add the new @wq to workqueues 4726 * list. 4727 */ 4728 mutex_lock(&wq_pool_mutex); 4729 4730 mutex_lock(&wq->mutex); 4731 for_each_pwq(pwq, wq) 4732 pwq_adjust_max_active(pwq); 4733 mutex_unlock(&wq->mutex); 4734 4735 list_add_tail_rcu(&wq->list, &workqueues); 4736 4737 mutex_unlock(&wq_pool_mutex); 4738 4739 return wq; 4740 4741 err_unreg_lockdep: 4742 wq_unregister_lockdep(wq); 4743 wq_free_lockdep(wq); 4744 err_free_wq: 4745 free_workqueue_attrs(wq->unbound_attrs); 4746 kfree(wq); 4747 return NULL; 4748 err_destroy: 4749 destroy_workqueue(wq); 4750 return NULL; 4751 } 4752 EXPORT_SYMBOL_GPL(alloc_workqueue); 4753 4754 static bool pwq_busy(struct pool_workqueue *pwq) 4755 { 4756 int i; 4757 4758 for (i = 0; i < WORK_NR_COLORS; i++) 4759 if (pwq->nr_in_flight[i]) 4760 return true; 4761 4762 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) 4763 return true; 4764 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) 4765 return true; 4766 4767 return false; 4768 } 4769 4770 /** 4771 * destroy_workqueue - safely terminate a workqueue 4772 * @wq: target workqueue 4773 * 4774 * Safely destroy a workqueue. All work currently pending will be done first. 4775 */ 4776 void destroy_workqueue(struct workqueue_struct *wq) 4777 { 4778 struct pool_workqueue *pwq; 4779 int cpu; 4780 4781 /* 4782 * Remove it from sysfs first so that sanity check failure doesn't 4783 * lead to sysfs name conflicts. 4784 */ 4785 workqueue_sysfs_unregister(wq); 4786 4787 /* mark the workqueue destruction is in progress */ 4788 mutex_lock(&wq->mutex); 4789 wq->flags |= __WQ_DESTROYING; 4790 mutex_unlock(&wq->mutex); 4791 4792 /* drain it before proceeding with destruction */ 4793 drain_workqueue(wq); 4794 4795 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */ 4796 if (wq->rescuer) { 4797 struct worker *rescuer = wq->rescuer; 4798 4799 /* this prevents new queueing */ 4800 raw_spin_lock_irq(&wq_mayday_lock); 4801 wq->rescuer = NULL; 4802 raw_spin_unlock_irq(&wq_mayday_lock); 4803 4804 /* rescuer will empty maydays list before exiting */ 4805 kthread_stop(rescuer->task); 4806 kfree(rescuer); 4807 } 4808 4809 /* 4810 * Sanity checks - grab all the locks so that we wait for all 4811 * in-flight operations which may do put_pwq(). 4812 */ 4813 mutex_lock(&wq_pool_mutex); 4814 mutex_lock(&wq->mutex); 4815 for_each_pwq(pwq, wq) { 4816 raw_spin_lock_irq(&pwq->pool->lock); 4817 if (WARN_ON(pwq_busy(pwq))) { 4818 pr_warn("%s: %s has the following busy pwq\n", 4819 __func__, wq->name); 4820 show_pwq(pwq); 4821 raw_spin_unlock_irq(&pwq->pool->lock); 4822 mutex_unlock(&wq->mutex); 4823 mutex_unlock(&wq_pool_mutex); 4824 show_one_workqueue(wq); 4825 return; 4826 } 4827 raw_spin_unlock_irq(&pwq->pool->lock); 4828 } 4829 mutex_unlock(&wq->mutex); 4830 4831 /* 4832 * wq list is used to freeze wq, remove from list after 4833 * flushing is complete in case freeze races us. 4834 */ 4835 list_del_rcu(&wq->list); 4836 mutex_unlock(&wq_pool_mutex); 4837 4838 /* 4839 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq 4840 * to put the base refs. @wq will be auto-destroyed from the last 4841 * pwq_put. RCU read lock prevents @wq from going away from under us. 4842 */ 4843 rcu_read_lock(); 4844 4845 for_each_possible_cpu(cpu) { 4846 pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4847 RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL); 4848 put_pwq_unlocked(pwq); 4849 } 4850 4851 put_pwq_unlocked(wq->dfl_pwq); 4852 wq->dfl_pwq = NULL; 4853 4854 rcu_read_unlock(); 4855 } 4856 EXPORT_SYMBOL_GPL(destroy_workqueue); 4857 4858 /** 4859 * workqueue_set_max_active - adjust max_active of a workqueue 4860 * @wq: target workqueue 4861 * @max_active: new max_active value. 4862 * 4863 * Set max_active of @wq to @max_active. 4864 * 4865 * CONTEXT: 4866 * Don't call from IRQ context. 4867 */ 4868 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 4869 { 4870 struct pool_workqueue *pwq; 4871 4872 /* disallow meddling with max_active for ordered workqueues */ 4873 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4874 return; 4875 4876 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4877 4878 mutex_lock(&wq->mutex); 4879 4880 wq->flags &= ~__WQ_ORDERED; 4881 wq->saved_max_active = max_active; 4882 4883 for_each_pwq(pwq, wq) 4884 pwq_adjust_max_active(pwq); 4885 4886 mutex_unlock(&wq->mutex); 4887 } 4888 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4889 4890 /** 4891 * current_work - retrieve %current task's work struct 4892 * 4893 * Determine if %current task is a workqueue worker and what it's working on. 4894 * Useful to find out the context that the %current task is running in. 4895 * 4896 * Return: work struct if %current task is a workqueue worker, %NULL otherwise. 4897 */ 4898 struct work_struct *current_work(void) 4899 { 4900 struct worker *worker = current_wq_worker(); 4901 4902 return worker ? worker->current_work : NULL; 4903 } 4904 EXPORT_SYMBOL(current_work); 4905 4906 /** 4907 * current_is_workqueue_rescuer - is %current workqueue rescuer? 4908 * 4909 * Determine whether %current is a workqueue rescuer. Can be used from 4910 * work functions to determine whether it's being run off the rescuer task. 4911 * 4912 * Return: %true if %current is a workqueue rescuer. %false otherwise. 4913 */ 4914 bool current_is_workqueue_rescuer(void) 4915 { 4916 struct worker *worker = current_wq_worker(); 4917 4918 return worker && worker->rescue_wq; 4919 } 4920 4921 /** 4922 * workqueue_congested - test whether a workqueue is congested 4923 * @cpu: CPU in question 4924 * @wq: target workqueue 4925 * 4926 * Test whether @wq's cpu workqueue for @cpu is congested. There is 4927 * no synchronization around this function and the test result is 4928 * unreliable and only useful as advisory hints or for debugging. 4929 * 4930 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 4931 * 4932 * With the exception of ordered workqueues, all workqueues have per-cpu 4933 * pool_workqueues, each with its own congested state. A workqueue being 4934 * congested on one CPU doesn't mean that the workqueue is contested on any 4935 * other CPUs. 4936 * 4937 * Return: 4938 * %true if congested, %false otherwise. 4939 */ 4940 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 4941 { 4942 struct pool_workqueue *pwq; 4943 bool ret; 4944 4945 rcu_read_lock(); 4946 preempt_disable(); 4947 4948 if (cpu == WORK_CPU_UNBOUND) 4949 cpu = smp_processor_id(); 4950 4951 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 4952 ret = !list_empty(&pwq->inactive_works); 4953 4954 preempt_enable(); 4955 rcu_read_unlock(); 4956 4957 return ret; 4958 } 4959 EXPORT_SYMBOL_GPL(workqueue_congested); 4960 4961 /** 4962 * work_busy - test whether a work is currently pending or running 4963 * @work: the work to be tested 4964 * 4965 * Test whether @work is currently pending or running. There is no 4966 * synchronization around this function and the test result is 4967 * unreliable and only useful as advisory hints or for debugging. 4968 * 4969 * Return: 4970 * OR'd bitmask of WORK_BUSY_* bits. 4971 */ 4972 unsigned int work_busy(struct work_struct *work) 4973 { 4974 struct worker_pool *pool; 4975 unsigned long flags; 4976 unsigned int ret = 0; 4977 4978 if (work_pending(work)) 4979 ret |= WORK_BUSY_PENDING; 4980 4981 rcu_read_lock(); 4982 pool = get_work_pool(work); 4983 if (pool) { 4984 raw_spin_lock_irqsave(&pool->lock, flags); 4985 if (find_worker_executing_work(pool, work)) 4986 ret |= WORK_BUSY_RUNNING; 4987 raw_spin_unlock_irqrestore(&pool->lock, flags); 4988 } 4989 rcu_read_unlock(); 4990 4991 return ret; 4992 } 4993 EXPORT_SYMBOL_GPL(work_busy); 4994 4995 /** 4996 * set_worker_desc - set description for the current work item 4997 * @fmt: printf-style format string 4998 * @...: arguments for the format string 4999 * 5000 * This function can be called by a running work function to describe what 5001 * the work item is about. If the worker task gets dumped, this 5002 * information will be printed out together to help debugging. The 5003 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 5004 */ 5005 void set_worker_desc(const char *fmt, ...) 5006 { 5007 struct worker *worker = current_wq_worker(); 5008 va_list args; 5009 5010 if (worker) { 5011 va_start(args, fmt); 5012 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 5013 va_end(args); 5014 } 5015 } 5016 EXPORT_SYMBOL_GPL(set_worker_desc); 5017 5018 /** 5019 * print_worker_info - print out worker information and description 5020 * @log_lvl: the log level to use when printing 5021 * @task: target task 5022 * 5023 * If @task is a worker and currently executing a work item, print out the 5024 * name of the workqueue being serviced and worker description set with 5025 * set_worker_desc() by the currently executing work item. 5026 * 5027 * This function can be safely called on any task as long as the 5028 * task_struct itself is accessible. While safe, this function isn't 5029 * synchronized and may print out mixups or garbages of limited length. 5030 */ 5031 void print_worker_info(const char *log_lvl, struct task_struct *task) 5032 { 5033 work_func_t *fn = NULL; 5034 char name[WQ_NAME_LEN] = { }; 5035 char desc[WORKER_DESC_LEN] = { }; 5036 struct pool_workqueue *pwq = NULL; 5037 struct workqueue_struct *wq = NULL; 5038 struct worker *worker; 5039 5040 if (!(task->flags & PF_WQ_WORKER)) 5041 return; 5042 5043 /* 5044 * This function is called without any synchronization and @task 5045 * could be in any state. Be careful with dereferences. 5046 */ 5047 worker = kthread_probe_data(task); 5048 5049 /* 5050 * Carefully copy the associated workqueue's workfn, name and desc. 5051 * Keep the original last '\0' in case the original is garbage. 5052 */ 5053 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); 5054 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); 5055 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); 5056 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); 5057 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); 5058 5059 if (fn || name[0] || desc[0]) { 5060 printk("%sWorkqueue: %s %ps", log_lvl, name, fn); 5061 if (strcmp(name, desc)) 5062 pr_cont(" (%s)", desc); 5063 pr_cont("\n"); 5064 } 5065 } 5066 5067 static void pr_cont_pool_info(struct worker_pool *pool) 5068 { 5069 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 5070 if (pool->node != NUMA_NO_NODE) 5071 pr_cont(" node=%d", pool->node); 5072 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 5073 } 5074 5075 struct pr_cont_work_struct { 5076 bool comma; 5077 work_func_t func; 5078 long ctr; 5079 }; 5080 5081 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp) 5082 { 5083 if (!pcwsp->ctr) 5084 goto out_record; 5085 if (func == pcwsp->func) { 5086 pcwsp->ctr++; 5087 return; 5088 } 5089 if (pcwsp->ctr == 1) 5090 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func); 5091 else 5092 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func); 5093 pcwsp->ctr = 0; 5094 out_record: 5095 if ((long)func == -1L) 5096 return; 5097 pcwsp->comma = comma; 5098 pcwsp->func = func; 5099 pcwsp->ctr = 1; 5100 } 5101 5102 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) 5103 { 5104 if (work->func == wq_barrier_func) { 5105 struct wq_barrier *barr; 5106 5107 barr = container_of(work, struct wq_barrier, work); 5108 5109 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5110 pr_cont("%s BAR(%d)", comma ? "," : "", 5111 task_pid_nr(barr->task)); 5112 } else { 5113 if (!comma) 5114 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5115 pr_cont_work_flush(comma, work->func, pcwsp); 5116 } 5117 } 5118 5119 static void show_pwq(struct pool_workqueue *pwq) 5120 { 5121 struct pr_cont_work_struct pcws = { .ctr = 0, }; 5122 struct worker_pool *pool = pwq->pool; 5123 struct work_struct *work; 5124 struct worker *worker; 5125 bool has_in_flight = false, has_pending = false; 5126 int bkt; 5127 5128 pr_info(" pwq %d:", pool->id); 5129 pr_cont_pool_info(pool); 5130 5131 pr_cont(" active=%d/%d refcnt=%d%s\n", 5132 pwq->nr_active, pwq->max_active, pwq->refcnt, 5133 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 5134 5135 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5136 if (worker->current_pwq == pwq) { 5137 has_in_flight = true; 5138 break; 5139 } 5140 } 5141 if (has_in_flight) { 5142 bool comma = false; 5143 5144 pr_info(" in-flight:"); 5145 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5146 if (worker->current_pwq != pwq) 5147 continue; 5148 5149 pr_cont("%s %d%s:%ps", comma ? "," : "", 5150 task_pid_nr(worker->task), 5151 worker->rescue_wq ? "(RESCUER)" : "", 5152 worker->current_func); 5153 list_for_each_entry(work, &worker->scheduled, entry) 5154 pr_cont_work(false, work, &pcws); 5155 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5156 comma = true; 5157 } 5158 pr_cont("\n"); 5159 } 5160 5161 list_for_each_entry(work, &pool->worklist, entry) { 5162 if (get_work_pwq(work) == pwq) { 5163 has_pending = true; 5164 break; 5165 } 5166 } 5167 if (has_pending) { 5168 bool comma = false; 5169 5170 pr_info(" pending:"); 5171 list_for_each_entry(work, &pool->worklist, entry) { 5172 if (get_work_pwq(work) != pwq) 5173 continue; 5174 5175 pr_cont_work(comma, work, &pcws); 5176 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5177 } 5178 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5179 pr_cont("\n"); 5180 } 5181 5182 if (!list_empty(&pwq->inactive_works)) { 5183 bool comma = false; 5184 5185 pr_info(" inactive:"); 5186 list_for_each_entry(work, &pwq->inactive_works, entry) { 5187 pr_cont_work(comma, work, &pcws); 5188 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5189 } 5190 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5191 pr_cont("\n"); 5192 } 5193 } 5194 5195 /** 5196 * show_one_workqueue - dump state of specified workqueue 5197 * @wq: workqueue whose state will be printed 5198 */ 5199 void show_one_workqueue(struct workqueue_struct *wq) 5200 { 5201 struct pool_workqueue *pwq; 5202 bool idle = true; 5203 unsigned long flags; 5204 5205 for_each_pwq(pwq, wq) { 5206 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 5207 idle = false; 5208 break; 5209 } 5210 } 5211 if (idle) /* Nothing to print for idle workqueue */ 5212 return; 5213 5214 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 5215 5216 for_each_pwq(pwq, wq) { 5217 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 5218 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 5219 /* 5220 * Defer printing to avoid deadlocks in console 5221 * drivers that queue work while holding locks 5222 * also taken in their write paths. 5223 */ 5224 printk_deferred_enter(); 5225 show_pwq(pwq); 5226 printk_deferred_exit(); 5227 } 5228 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 5229 /* 5230 * We could be printing a lot from atomic context, e.g. 5231 * sysrq-t -> show_all_workqueues(). Avoid triggering 5232 * hard lockup. 5233 */ 5234 touch_nmi_watchdog(); 5235 } 5236 5237 } 5238 5239 /** 5240 * show_one_worker_pool - dump state of specified worker pool 5241 * @pool: worker pool whose state will be printed 5242 */ 5243 static void show_one_worker_pool(struct worker_pool *pool) 5244 { 5245 struct worker *worker; 5246 bool first = true; 5247 unsigned long flags; 5248 unsigned long hung = 0; 5249 5250 raw_spin_lock_irqsave(&pool->lock, flags); 5251 if (pool->nr_workers == pool->nr_idle) 5252 goto next_pool; 5253 5254 /* How long the first pending work is waiting for a worker. */ 5255 if (!list_empty(&pool->worklist)) 5256 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 5257 5258 /* 5259 * Defer printing to avoid deadlocks in console drivers that 5260 * queue work while holding locks also taken in their write 5261 * paths. 5262 */ 5263 printk_deferred_enter(); 5264 pr_info("pool %d:", pool->id); 5265 pr_cont_pool_info(pool); 5266 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); 5267 if (pool->manager) 5268 pr_cont(" manager: %d", 5269 task_pid_nr(pool->manager->task)); 5270 list_for_each_entry(worker, &pool->idle_list, entry) { 5271 pr_cont(" %s%d", first ? "idle: " : "", 5272 task_pid_nr(worker->task)); 5273 first = false; 5274 } 5275 pr_cont("\n"); 5276 printk_deferred_exit(); 5277 next_pool: 5278 raw_spin_unlock_irqrestore(&pool->lock, flags); 5279 /* 5280 * We could be printing a lot from atomic context, e.g. 5281 * sysrq-t -> show_all_workqueues(). Avoid triggering 5282 * hard lockup. 5283 */ 5284 touch_nmi_watchdog(); 5285 5286 } 5287 5288 /** 5289 * show_all_workqueues - dump workqueue state 5290 * 5291 * Called from a sysrq handler and prints out all busy workqueues and pools. 5292 */ 5293 void show_all_workqueues(void) 5294 { 5295 struct workqueue_struct *wq; 5296 struct worker_pool *pool; 5297 int pi; 5298 5299 rcu_read_lock(); 5300 5301 pr_info("Showing busy workqueues and worker pools:\n"); 5302 5303 list_for_each_entry_rcu(wq, &workqueues, list) 5304 show_one_workqueue(wq); 5305 5306 for_each_pool(pool, pi) 5307 show_one_worker_pool(pool); 5308 5309 rcu_read_unlock(); 5310 } 5311 5312 /** 5313 * show_freezable_workqueues - dump freezable workqueue state 5314 * 5315 * Called from try_to_freeze_tasks() and prints out all freezable workqueues 5316 * still busy. 5317 */ 5318 void show_freezable_workqueues(void) 5319 { 5320 struct workqueue_struct *wq; 5321 5322 rcu_read_lock(); 5323 5324 pr_info("Showing freezable workqueues that are still busy:\n"); 5325 5326 list_for_each_entry_rcu(wq, &workqueues, list) { 5327 if (!(wq->flags & WQ_FREEZABLE)) 5328 continue; 5329 show_one_workqueue(wq); 5330 } 5331 5332 rcu_read_unlock(); 5333 } 5334 5335 /* used to show worker information through /proc/PID/{comm,stat,status} */ 5336 void wq_worker_comm(char *buf, size_t size, struct task_struct *task) 5337 { 5338 int off; 5339 5340 /* always show the actual comm */ 5341 off = strscpy(buf, task->comm, size); 5342 if (off < 0) 5343 return; 5344 5345 /* stabilize PF_WQ_WORKER and worker pool association */ 5346 mutex_lock(&wq_pool_attach_mutex); 5347 5348 if (task->flags & PF_WQ_WORKER) { 5349 struct worker *worker = kthread_data(task); 5350 struct worker_pool *pool = worker->pool; 5351 5352 if (pool) { 5353 raw_spin_lock_irq(&pool->lock); 5354 /* 5355 * ->desc tracks information (wq name or 5356 * set_worker_desc()) for the latest execution. If 5357 * current, prepend '+', otherwise '-'. 5358 */ 5359 if (worker->desc[0] != '\0') { 5360 if (worker->current_work) 5361 scnprintf(buf + off, size - off, "+%s", 5362 worker->desc); 5363 else 5364 scnprintf(buf + off, size - off, "-%s", 5365 worker->desc); 5366 } 5367 raw_spin_unlock_irq(&pool->lock); 5368 } 5369 } 5370 5371 mutex_unlock(&wq_pool_attach_mutex); 5372 } 5373 5374 #ifdef CONFIG_SMP 5375 5376 /* 5377 * CPU hotplug. 5378 * 5379 * There are two challenges in supporting CPU hotplug. Firstly, there 5380 * are a lot of assumptions on strong associations among work, pwq and 5381 * pool which make migrating pending and scheduled works very 5382 * difficult to implement without impacting hot paths. Secondly, 5383 * worker pools serve mix of short, long and very long running works making 5384 * blocked draining impractical. 5385 * 5386 * This is solved by allowing the pools to be disassociated from the CPU 5387 * running as an unbound one and allowing it to be reattached later if the 5388 * cpu comes back online. 5389 */ 5390 5391 static void unbind_workers(int cpu) 5392 { 5393 struct worker_pool *pool; 5394 struct worker *worker; 5395 5396 for_each_cpu_worker_pool(pool, cpu) { 5397 mutex_lock(&wq_pool_attach_mutex); 5398 raw_spin_lock_irq(&pool->lock); 5399 5400 /* 5401 * We've blocked all attach/detach operations. Make all workers 5402 * unbound and set DISASSOCIATED. Before this, all workers 5403 * must be on the cpu. After this, they may become diasporas. 5404 * And the preemption disabled section in their sched callbacks 5405 * are guaranteed to see WORKER_UNBOUND since the code here 5406 * is on the same cpu. 5407 */ 5408 for_each_pool_worker(worker, pool) 5409 worker->flags |= WORKER_UNBOUND; 5410 5411 pool->flags |= POOL_DISASSOCIATED; 5412 5413 /* 5414 * The handling of nr_running in sched callbacks are disabled 5415 * now. Zap nr_running. After this, nr_running stays zero and 5416 * need_more_worker() and keep_working() are always true as 5417 * long as the worklist is not empty. This pool now behaves as 5418 * an unbound (in terms of concurrency management) pool which 5419 * are served by workers tied to the pool. 5420 */ 5421 pool->nr_running = 0; 5422 5423 /* 5424 * With concurrency management just turned off, a busy 5425 * worker blocking could lead to lengthy stalls. Kick off 5426 * unbound chain execution of currently pending work items. 5427 */ 5428 kick_pool(pool); 5429 5430 raw_spin_unlock_irq(&pool->lock); 5431 5432 for_each_pool_worker(worker, pool) 5433 unbind_worker(worker); 5434 5435 mutex_unlock(&wq_pool_attach_mutex); 5436 } 5437 } 5438 5439 /** 5440 * rebind_workers - rebind all workers of a pool to the associated CPU 5441 * @pool: pool of interest 5442 * 5443 * @pool->cpu is coming online. Rebind all workers to the CPU. 5444 */ 5445 static void rebind_workers(struct worker_pool *pool) 5446 { 5447 struct worker *worker; 5448 5449 lockdep_assert_held(&wq_pool_attach_mutex); 5450 5451 /* 5452 * Restore CPU affinity of all workers. As all idle workers should 5453 * be on the run-queue of the associated CPU before any local 5454 * wake-ups for concurrency management happen, restore CPU affinity 5455 * of all workers first and then clear UNBOUND. As we're called 5456 * from CPU_ONLINE, the following shouldn't fail. 5457 */ 5458 for_each_pool_worker(worker, pool) { 5459 kthread_set_per_cpu(worker->task, pool->cpu); 5460 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 5461 pool_allowed_cpus(pool)) < 0); 5462 } 5463 5464 raw_spin_lock_irq(&pool->lock); 5465 5466 pool->flags &= ~POOL_DISASSOCIATED; 5467 5468 for_each_pool_worker(worker, pool) { 5469 unsigned int worker_flags = worker->flags; 5470 5471 /* 5472 * We want to clear UNBOUND but can't directly call 5473 * worker_clr_flags() or adjust nr_running. Atomically 5474 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 5475 * @worker will clear REBOUND using worker_clr_flags() when 5476 * it initiates the next execution cycle thus restoring 5477 * concurrency management. Note that when or whether 5478 * @worker clears REBOUND doesn't affect correctness. 5479 * 5480 * WRITE_ONCE() is necessary because @worker->flags may be 5481 * tested without holding any lock in 5482 * wq_worker_running(). Without it, NOT_RUNNING test may 5483 * fail incorrectly leading to premature concurrency 5484 * management operations. 5485 */ 5486 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 5487 worker_flags |= WORKER_REBOUND; 5488 worker_flags &= ~WORKER_UNBOUND; 5489 WRITE_ONCE(worker->flags, worker_flags); 5490 } 5491 5492 raw_spin_unlock_irq(&pool->lock); 5493 } 5494 5495 /** 5496 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 5497 * @pool: unbound pool of interest 5498 * @cpu: the CPU which is coming up 5499 * 5500 * An unbound pool may end up with a cpumask which doesn't have any online 5501 * CPUs. When a worker of such pool get scheduled, the scheduler resets 5502 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 5503 * online CPU before, cpus_allowed of all its workers should be restored. 5504 */ 5505 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 5506 { 5507 static cpumask_t cpumask; 5508 struct worker *worker; 5509 5510 lockdep_assert_held(&wq_pool_attach_mutex); 5511 5512 /* is @cpu allowed for @pool? */ 5513 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 5514 return; 5515 5516 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 5517 5518 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 5519 for_each_pool_worker(worker, pool) 5520 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 5521 } 5522 5523 int workqueue_prepare_cpu(unsigned int cpu) 5524 { 5525 struct worker_pool *pool; 5526 5527 for_each_cpu_worker_pool(pool, cpu) { 5528 if (pool->nr_workers) 5529 continue; 5530 if (!create_worker(pool)) 5531 return -ENOMEM; 5532 } 5533 return 0; 5534 } 5535 5536 int workqueue_online_cpu(unsigned int cpu) 5537 { 5538 struct worker_pool *pool; 5539 struct workqueue_struct *wq; 5540 int pi; 5541 5542 mutex_lock(&wq_pool_mutex); 5543 5544 for_each_pool(pool, pi) { 5545 mutex_lock(&wq_pool_attach_mutex); 5546 5547 if (pool->cpu == cpu) 5548 rebind_workers(pool); 5549 else if (pool->cpu < 0) 5550 restore_unbound_workers_cpumask(pool, cpu); 5551 5552 mutex_unlock(&wq_pool_attach_mutex); 5553 } 5554 5555 /* update pod affinity of unbound workqueues */ 5556 list_for_each_entry(wq, &workqueues, list) { 5557 struct workqueue_attrs *attrs = wq->unbound_attrs; 5558 5559 if (attrs) { 5560 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5561 int tcpu; 5562 5563 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5564 wq_update_pod(wq, tcpu, cpu, true); 5565 } 5566 } 5567 5568 mutex_unlock(&wq_pool_mutex); 5569 return 0; 5570 } 5571 5572 int workqueue_offline_cpu(unsigned int cpu) 5573 { 5574 struct workqueue_struct *wq; 5575 5576 /* unbinding per-cpu workers should happen on the local CPU */ 5577 if (WARN_ON(cpu != smp_processor_id())) 5578 return -1; 5579 5580 unbind_workers(cpu); 5581 5582 /* update pod affinity of unbound workqueues */ 5583 mutex_lock(&wq_pool_mutex); 5584 list_for_each_entry(wq, &workqueues, list) { 5585 struct workqueue_attrs *attrs = wq->unbound_attrs; 5586 5587 if (attrs) { 5588 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5589 int tcpu; 5590 5591 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5592 wq_update_pod(wq, tcpu, cpu, false); 5593 } 5594 } 5595 mutex_unlock(&wq_pool_mutex); 5596 5597 return 0; 5598 } 5599 5600 struct work_for_cpu { 5601 struct work_struct work; 5602 long (*fn)(void *); 5603 void *arg; 5604 long ret; 5605 }; 5606 5607 static void work_for_cpu_fn(struct work_struct *work) 5608 { 5609 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 5610 5611 wfc->ret = wfc->fn(wfc->arg); 5612 } 5613 5614 /** 5615 * work_on_cpu_key - run a function in thread context on a particular cpu 5616 * @cpu: the cpu to run on 5617 * @fn: the function to run 5618 * @arg: the function arg 5619 * @key: The lock class key for lock debugging purposes 5620 * 5621 * It is up to the caller to ensure that the cpu doesn't go offline. 5622 * The caller must not hold any locks which would prevent @fn from completing. 5623 * 5624 * Return: The value @fn returns. 5625 */ 5626 long work_on_cpu_key(int cpu, long (*fn)(void *), 5627 void *arg, struct lock_class_key *key) 5628 { 5629 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 5630 5631 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key); 5632 schedule_work_on(cpu, &wfc.work); 5633 flush_work(&wfc.work); 5634 destroy_work_on_stack(&wfc.work); 5635 return wfc.ret; 5636 } 5637 EXPORT_SYMBOL_GPL(work_on_cpu_key); 5638 5639 /** 5640 * work_on_cpu_safe_key - run a function in thread context on a particular cpu 5641 * @cpu: the cpu to run on 5642 * @fn: the function to run 5643 * @arg: the function argument 5644 * @key: The lock class key for lock debugging purposes 5645 * 5646 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold 5647 * any locks which would prevent @fn from completing. 5648 * 5649 * Return: The value @fn returns. 5650 */ 5651 long work_on_cpu_safe_key(int cpu, long (*fn)(void *), 5652 void *arg, struct lock_class_key *key) 5653 { 5654 long ret = -ENODEV; 5655 5656 cpus_read_lock(); 5657 if (cpu_online(cpu)) 5658 ret = work_on_cpu_key(cpu, fn, arg, key); 5659 cpus_read_unlock(); 5660 return ret; 5661 } 5662 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); 5663 #endif /* CONFIG_SMP */ 5664 5665 #ifdef CONFIG_FREEZER 5666 5667 /** 5668 * freeze_workqueues_begin - begin freezing workqueues 5669 * 5670 * Start freezing workqueues. After this function returns, all freezable 5671 * workqueues will queue new works to their inactive_works list instead of 5672 * pool->worklist. 5673 * 5674 * CONTEXT: 5675 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5676 */ 5677 void freeze_workqueues_begin(void) 5678 { 5679 struct workqueue_struct *wq; 5680 struct pool_workqueue *pwq; 5681 5682 mutex_lock(&wq_pool_mutex); 5683 5684 WARN_ON_ONCE(workqueue_freezing); 5685 workqueue_freezing = true; 5686 5687 list_for_each_entry(wq, &workqueues, list) { 5688 mutex_lock(&wq->mutex); 5689 for_each_pwq(pwq, wq) 5690 pwq_adjust_max_active(pwq); 5691 mutex_unlock(&wq->mutex); 5692 } 5693 5694 mutex_unlock(&wq_pool_mutex); 5695 } 5696 5697 /** 5698 * freeze_workqueues_busy - are freezable workqueues still busy? 5699 * 5700 * Check whether freezing is complete. This function must be called 5701 * between freeze_workqueues_begin() and thaw_workqueues(). 5702 * 5703 * CONTEXT: 5704 * Grabs and releases wq_pool_mutex. 5705 * 5706 * Return: 5707 * %true if some freezable workqueues are still busy. %false if freezing 5708 * is complete. 5709 */ 5710 bool freeze_workqueues_busy(void) 5711 { 5712 bool busy = false; 5713 struct workqueue_struct *wq; 5714 struct pool_workqueue *pwq; 5715 5716 mutex_lock(&wq_pool_mutex); 5717 5718 WARN_ON_ONCE(!workqueue_freezing); 5719 5720 list_for_each_entry(wq, &workqueues, list) { 5721 if (!(wq->flags & WQ_FREEZABLE)) 5722 continue; 5723 /* 5724 * nr_active is monotonically decreasing. It's safe 5725 * to peek without lock. 5726 */ 5727 rcu_read_lock(); 5728 for_each_pwq(pwq, wq) { 5729 WARN_ON_ONCE(pwq->nr_active < 0); 5730 if (pwq->nr_active) { 5731 busy = true; 5732 rcu_read_unlock(); 5733 goto out_unlock; 5734 } 5735 } 5736 rcu_read_unlock(); 5737 } 5738 out_unlock: 5739 mutex_unlock(&wq_pool_mutex); 5740 return busy; 5741 } 5742 5743 /** 5744 * thaw_workqueues - thaw workqueues 5745 * 5746 * Thaw workqueues. Normal queueing is restored and all collected 5747 * frozen works are transferred to their respective pool worklists. 5748 * 5749 * CONTEXT: 5750 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5751 */ 5752 void thaw_workqueues(void) 5753 { 5754 struct workqueue_struct *wq; 5755 struct pool_workqueue *pwq; 5756 5757 mutex_lock(&wq_pool_mutex); 5758 5759 if (!workqueue_freezing) 5760 goto out_unlock; 5761 5762 workqueue_freezing = false; 5763 5764 /* restore max_active and repopulate worklist */ 5765 list_for_each_entry(wq, &workqueues, list) { 5766 mutex_lock(&wq->mutex); 5767 for_each_pwq(pwq, wq) 5768 pwq_adjust_max_active(pwq); 5769 mutex_unlock(&wq->mutex); 5770 } 5771 5772 out_unlock: 5773 mutex_unlock(&wq_pool_mutex); 5774 } 5775 #endif /* CONFIG_FREEZER */ 5776 5777 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) 5778 { 5779 LIST_HEAD(ctxs); 5780 int ret = 0; 5781 struct workqueue_struct *wq; 5782 struct apply_wqattrs_ctx *ctx, *n; 5783 5784 lockdep_assert_held(&wq_pool_mutex); 5785 5786 list_for_each_entry(wq, &workqueues, list) { 5787 if (!(wq->flags & WQ_UNBOUND)) 5788 continue; 5789 5790 /* creating multiple pwqs breaks ordering guarantee */ 5791 if (!list_empty(&wq->pwqs)) { 5792 if (wq->flags & __WQ_ORDERED_EXPLICIT) 5793 continue; 5794 wq->flags &= ~__WQ_ORDERED; 5795 } 5796 5797 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); 5798 if (IS_ERR(ctx)) { 5799 ret = PTR_ERR(ctx); 5800 break; 5801 } 5802 5803 list_add_tail(&ctx->list, &ctxs); 5804 } 5805 5806 list_for_each_entry_safe(ctx, n, &ctxs, list) { 5807 if (!ret) 5808 apply_wqattrs_commit(ctx); 5809 apply_wqattrs_cleanup(ctx); 5810 } 5811 5812 if (!ret) { 5813 mutex_lock(&wq_pool_attach_mutex); 5814 cpumask_copy(wq_unbound_cpumask, unbound_cpumask); 5815 mutex_unlock(&wq_pool_attach_mutex); 5816 } 5817 return ret; 5818 } 5819 5820 /** 5821 * workqueue_unbound_exclude_cpumask - Exclude given CPUs from unbound cpumask 5822 * @exclude_cpumask: the cpumask to be excluded from wq_unbound_cpumask 5823 * 5824 * This function can be called from cpuset code to provide a set of isolated 5825 * CPUs that should be excluded from wq_unbound_cpumask. The caller must hold 5826 * either cpus_read_lock or cpus_write_lock. 5827 */ 5828 int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask) 5829 { 5830 cpumask_var_t cpumask; 5831 int ret = 0; 5832 5833 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 5834 return -ENOMEM; 5835 5836 lockdep_assert_cpus_held(); 5837 mutex_lock(&wq_pool_mutex); 5838 5839 /* Save the current isolated cpumask & export it via sysfs */ 5840 cpumask_copy(wq_isolated_cpumask, exclude_cpumask); 5841 5842 /* 5843 * If the operation fails, it will fall back to 5844 * wq_requested_unbound_cpumask which is initially set to 5845 * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) house keeping mask and rewritten 5846 * by any subsequent write to workqueue/cpumask sysfs file. 5847 */ 5848 if (!cpumask_andnot(cpumask, wq_requested_unbound_cpumask, exclude_cpumask)) 5849 cpumask_copy(cpumask, wq_requested_unbound_cpumask); 5850 if (!cpumask_equal(cpumask, wq_unbound_cpumask)) 5851 ret = workqueue_apply_unbound_cpumask(cpumask); 5852 5853 mutex_unlock(&wq_pool_mutex); 5854 free_cpumask_var(cpumask); 5855 return ret; 5856 } 5857 5858 static int parse_affn_scope(const char *val) 5859 { 5860 int i; 5861 5862 for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) { 5863 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i]))) 5864 return i; 5865 } 5866 return -EINVAL; 5867 } 5868 5869 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) 5870 { 5871 struct workqueue_struct *wq; 5872 int affn, cpu; 5873 5874 affn = parse_affn_scope(val); 5875 if (affn < 0) 5876 return affn; 5877 if (affn == WQ_AFFN_DFL) 5878 return -EINVAL; 5879 5880 cpus_read_lock(); 5881 mutex_lock(&wq_pool_mutex); 5882 5883 wq_affn_dfl = affn; 5884 5885 list_for_each_entry(wq, &workqueues, list) { 5886 for_each_online_cpu(cpu) { 5887 wq_update_pod(wq, cpu, cpu, true); 5888 } 5889 } 5890 5891 mutex_unlock(&wq_pool_mutex); 5892 cpus_read_unlock(); 5893 5894 return 0; 5895 } 5896 5897 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp) 5898 { 5899 return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]); 5900 } 5901 5902 static const struct kernel_param_ops wq_affn_dfl_ops = { 5903 .set = wq_affn_dfl_set, 5904 .get = wq_affn_dfl_get, 5905 }; 5906 5907 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644); 5908 5909 #ifdef CONFIG_SYSFS 5910 /* 5911 * Workqueues with WQ_SYSFS flag set is visible to userland via 5912 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 5913 * following attributes. 5914 * 5915 * per_cpu RO bool : whether the workqueue is per-cpu or unbound 5916 * max_active RW int : maximum number of in-flight work items 5917 * 5918 * Unbound workqueues have the following extra attributes. 5919 * 5920 * nice RW int : nice value of the workers 5921 * cpumask RW mask : bitmask of allowed CPUs for the workers 5922 * affinity_scope RW str : worker CPU affinity scope (cache, numa, none) 5923 * affinity_strict RW bool : worker CPU affinity is strict 5924 */ 5925 struct wq_device { 5926 struct workqueue_struct *wq; 5927 struct device dev; 5928 }; 5929 5930 static struct workqueue_struct *dev_to_wq(struct device *dev) 5931 { 5932 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 5933 5934 return wq_dev->wq; 5935 } 5936 5937 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 5938 char *buf) 5939 { 5940 struct workqueue_struct *wq = dev_to_wq(dev); 5941 5942 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 5943 } 5944 static DEVICE_ATTR_RO(per_cpu); 5945 5946 static ssize_t max_active_show(struct device *dev, 5947 struct device_attribute *attr, char *buf) 5948 { 5949 struct workqueue_struct *wq = dev_to_wq(dev); 5950 5951 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 5952 } 5953 5954 static ssize_t max_active_store(struct device *dev, 5955 struct device_attribute *attr, const char *buf, 5956 size_t count) 5957 { 5958 struct workqueue_struct *wq = dev_to_wq(dev); 5959 int val; 5960 5961 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 5962 return -EINVAL; 5963 5964 workqueue_set_max_active(wq, val); 5965 return count; 5966 } 5967 static DEVICE_ATTR_RW(max_active); 5968 5969 static struct attribute *wq_sysfs_attrs[] = { 5970 &dev_attr_per_cpu.attr, 5971 &dev_attr_max_active.attr, 5972 NULL, 5973 }; 5974 ATTRIBUTE_GROUPS(wq_sysfs); 5975 5976 static void apply_wqattrs_lock(void) 5977 { 5978 /* CPUs should stay stable across pwq creations and installations */ 5979 cpus_read_lock(); 5980 mutex_lock(&wq_pool_mutex); 5981 } 5982 5983 static void apply_wqattrs_unlock(void) 5984 { 5985 mutex_unlock(&wq_pool_mutex); 5986 cpus_read_unlock(); 5987 } 5988 5989 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 5990 char *buf) 5991 { 5992 struct workqueue_struct *wq = dev_to_wq(dev); 5993 int written; 5994 5995 mutex_lock(&wq->mutex); 5996 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 5997 mutex_unlock(&wq->mutex); 5998 5999 return written; 6000 } 6001 6002 /* prepare workqueue_attrs for sysfs store operations */ 6003 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 6004 { 6005 struct workqueue_attrs *attrs; 6006 6007 lockdep_assert_held(&wq_pool_mutex); 6008 6009 attrs = alloc_workqueue_attrs(); 6010 if (!attrs) 6011 return NULL; 6012 6013 copy_workqueue_attrs(attrs, wq->unbound_attrs); 6014 return attrs; 6015 } 6016 6017 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 6018 const char *buf, size_t count) 6019 { 6020 struct workqueue_struct *wq = dev_to_wq(dev); 6021 struct workqueue_attrs *attrs; 6022 int ret = -ENOMEM; 6023 6024 apply_wqattrs_lock(); 6025 6026 attrs = wq_sysfs_prep_attrs(wq); 6027 if (!attrs) 6028 goto out_unlock; 6029 6030 if (sscanf(buf, "%d", &attrs->nice) == 1 && 6031 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 6032 ret = apply_workqueue_attrs_locked(wq, attrs); 6033 else 6034 ret = -EINVAL; 6035 6036 out_unlock: 6037 apply_wqattrs_unlock(); 6038 free_workqueue_attrs(attrs); 6039 return ret ?: count; 6040 } 6041 6042 static ssize_t wq_cpumask_show(struct device *dev, 6043 struct device_attribute *attr, char *buf) 6044 { 6045 struct workqueue_struct *wq = dev_to_wq(dev); 6046 int written; 6047 6048 mutex_lock(&wq->mutex); 6049 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6050 cpumask_pr_args(wq->unbound_attrs->cpumask)); 6051 mutex_unlock(&wq->mutex); 6052 return written; 6053 } 6054 6055 static ssize_t wq_cpumask_store(struct device *dev, 6056 struct device_attribute *attr, 6057 const char *buf, size_t count) 6058 { 6059 struct workqueue_struct *wq = dev_to_wq(dev); 6060 struct workqueue_attrs *attrs; 6061 int ret = -ENOMEM; 6062 6063 apply_wqattrs_lock(); 6064 6065 attrs = wq_sysfs_prep_attrs(wq); 6066 if (!attrs) 6067 goto out_unlock; 6068 6069 ret = cpumask_parse(buf, attrs->cpumask); 6070 if (!ret) 6071 ret = apply_workqueue_attrs_locked(wq, attrs); 6072 6073 out_unlock: 6074 apply_wqattrs_unlock(); 6075 free_workqueue_attrs(attrs); 6076 return ret ?: count; 6077 } 6078 6079 static ssize_t wq_affn_scope_show(struct device *dev, 6080 struct device_attribute *attr, char *buf) 6081 { 6082 struct workqueue_struct *wq = dev_to_wq(dev); 6083 int written; 6084 6085 mutex_lock(&wq->mutex); 6086 if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL) 6087 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n", 6088 wq_affn_names[WQ_AFFN_DFL], 6089 wq_affn_names[wq_affn_dfl]); 6090 else 6091 written = scnprintf(buf, PAGE_SIZE, "%s\n", 6092 wq_affn_names[wq->unbound_attrs->affn_scope]); 6093 mutex_unlock(&wq->mutex); 6094 6095 return written; 6096 } 6097 6098 static ssize_t wq_affn_scope_store(struct device *dev, 6099 struct device_attribute *attr, 6100 const char *buf, size_t count) 6101 { 6102 struct workqueue_struct *wq = dev_to_wq(dev); 6103 struct workqueue_attrs *attrs; 6104 int affn, ret = -ENOMEM; 6105 6106 affn = parse_affn_scope(buf); 6107 if (affn < 0) 6108 return affn; 6109 6110 apply_wqattrs_lock(); 6111 attrs = wq_sysfs_prep_attrs(wq); 6112 if (attrs) { 6113 attrs->affn_scope = affn; 6114 ret = apply_workqueue_attrs_locked(wq, attrs); 6115 } 6116 apply_wqattrs_unlock(); 6117 free_workqueue_attrs(attrs); 6118 return ret ?: count; 6119 } 6120 6121 static ssize_t wq_affinity_strict_show(struct device *dev, 6122 struct device_attribute *attr, char *buf) 6123 { 6124 struct workqueue_struct *wq = dev_to_wq(dev); 6125 6126 return scnprintf(buf, PAGE_SIZE, "%d\n", 6127 wq->unbound_attrs->affn_strict); 6128 } 6129 6130 static ssize_t wq_affinity_strict_store(struct device *dev, 6131 struct device_attribute *attr, 6132 const char *buf, size_t count) 6133 { 6134 struct workqueue_struct *wq = dev_to_wq(dev); 6135 struct workqueue_attrs *attrs; 6136 int v, ret = -ENOMEM; 6137 6138 if (sscanf(buf, "%d", &v) != 1) 6139 return -EINVAL; 6140 6141 apply_wqattrs_lock(); 6142 attrs = wq_sysfs_prep_attrs(wq); 6143 if (attrs) { 6144 attrs->affn_strict = (bool)v; 6145 ret = apply_workqueue_attrs_locked(wq, attrs); 6146 } 6147 apply_wqattrs_unlock(); 6148 free_workqueue_attrs(attrs); 6149 return ret ?: count; 6150 } 6151 6152 static struct device_attribute wq_sysfs_unbound_attrs[] = { 6153 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 6154 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 6155 __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store), 6156 __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store), 6157 __ATTR_NULL, 6158 }; 6159 6160 static struct bus_type wq_subsys = { 6161 .name = "workqueue", 6162 .dev_groups = wq_sysfs_groups, 6163 }; 6164 6165 /** 6166 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 6167 * @cpumask: the cpumask to set 6168 * 6169 * The low-level workqueues cpumask is a global cpumask that limits 6170 * the affinity of all unbound workqueues. This function check the @cpumask 6171 * and apply it to all unbound workqueues and updates all pwqs of them. 6172 * 6173 * Return: 0 - Success 6174 * -EINVAL - Invalid @cpumask 6175 * -ENOMEM - Failed to allocate memory for attrs or pwqs. 6176 */ 6177 static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 6178 { 6179 int ret = -EINVAL; 6180 6181 /* 6182 * Not excluding isolated cpus on purpose. 6183 * If the user wishes to include them, we allow that. 6184 */ 6185 cpumask_and(cpumask, cpumask, cpu_possible_mask); 6186 if (!cpumask_empty(cpumask)) { 6187 apply_wqattrs_lock(); 6188 cpumask_copy(wq_requested_unbound_cpumask, cpumask); 6189 if (cpumask_equal(cpumask, wq_unbound_cpumask)) { 6190 ret = 0; 6191 goto out_unlock; 6192 } 6193 6194 ret = workqueue_apply_unbound_cpumask(cpumask); 6195 6196 out_unlock: 6197 apply_wqattrs_unlock(); 6198 } 6199 6200 return ret; 6201 } 6202 6203 static ssize_t __wq_cpumask_show(struct device *dev, 6204 struct device_attribute *attr, char *buf, cpumask_var_t mask) 6205 { 6206 int written; 6207 6208 mutex_lock(&wq_pool_mutex); 6209 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); 6210 mutex_unlock(&wq_pool_mutex); 6211 6212 return written; 6213 } 6214 6215 static ssize_t wq_unbound_cpumask_show(struct device *dev, 6216 struct device_attribute *attr, char *buf) 6217 { 6218 return __wq_cpumask_show(dev, attr, buf, wq_unbound_cpumask); 6219 } 6220 6221 static ssize_t wq_requested_cpumask_show(struct device *dev, 6222 struct device_attribute *attr, char *buf) 6223 { 6224 return __wq_cpumask_show(dev, attr, buf, wq_requested_unbound_cpumask); 6225 } 6226 6227 static ssize_t wq_isolated_cpumask_show(struct device *dev, 6228 struct device_attribute *attr, char *buf) 6229 { 6230 return __wq_cpumask_show(dev, attr, buf, wq_isolated_cpumask); 6231 } 6232 6233 static ssize_t wq_unbound_cpumask_store(struct device *dev, 6234 struct device_attribute *attr, const char *buf, size_t count) 6235 { 6236 cpumask_var_t cpumask; 6237 int ret; 6238 6239 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 6240 return -ENOMEM; 6241 6242 ret = cpumask_parse(buf, cpumask); 6243 if (!ret) 6244 ret = workqueue_set_unbound_cpumask(cpumask); 6245 6246 free_cpumask_var(cpumask); 6247 return ret ? ret : count; 6248 } 6249 6250 static struct device_attribute wq_sysfs_cpumask_attrs[] = { 6251 __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 6252 wq_unbound_cpumask_store), 6253 __ATTR(cpumask_requested, 0444, wq_requested_cpumask_show, NULL), 6254 __ATTR(cpumask_isolated, 0444, wq_isolated_cpumask_show, NULL), 6255 __ATTR_NULL, 6256 }; 6257 6258 static int __init wq_sysfs_init(void) 6259 { 6260 struct device *dev_root; 6261 int err; 6262 6263 err = subsys_virtual_register(&wq_subsys, NULL); 6264 if (err) 6265 return err; 6266 6267 dev_root = bus_get_dev_root(&wq_subsys); 6268 if (dev_root) { 6269 struct device_attribute *attr; 6270 6271 for (attr = wq_sysfs_cpumask_attrs; attr->attr.name; attr++) { 6272 err = device_create_file(dev_root, attr); 6273 if (err) 6274 break; 6275 } 6276 put_device(dev_root); 6277 } 6278 return err; 6279 } 6280 core_initcall(wq_sysfs_init); 6281 6282 static void wq_device_release(struct device *dev) 6283 { 6284 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 6285 6286 kfree(wq_dev); 6287 } 6288 6289 /** 6290 * workqueue_sysfs_register - make a workqueue visible in sysfs 6291 * @wq: the workqueue to register 6292 * 6293 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 6294 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 6295 * which is the preferred method. 6296 * 6297 * Workqueue user should use this function directly iff it wants to apply 6298 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 6299 * apply_workqueue_attrs() may race against userland updating the 6300 * attributes. 6301 * 6302 * Return: 0 on success, -errno on failure. 6303 */ 6304 int workqueue_sysfs_register(struct workqueue_struct *wq) 6305 { 6306 struct wq_device *wq_dev; 6307 int ret; 6308 6309 /* 6310 * Adjusting max_active or creating new pwqs by applying 6311 * attributes breaks ordering guarantee. Disallow exposing ordered 6312 * workqueues. 6313 */ 6314 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 6315 return -EINVAL; 6316 6317 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 6318 if (!wq_dev) 6319 return -ENOMEM; 6320 6321 wq_dev->wq = wq; 6322 wq_dev->dev.bus = &wq_subsys; 6323 wq_dev->dev.release = wq_device_release; 6324 dev_set_name(&wq_dev->dev, "%s", wq->name); 6325 6326 /* 6327 * unbound_attrs are created separately. Suppress uevent until 6328 * everything is ready. 6329 */ 6330 dev_set_uevent_suppress(&wq_dev->dev, true); 6331 6332 ret = device_register(&wq_dev->dev); 6333 if (ret) { 6334 put_device(&wq_dev->dev); 6335 wq->wq_dev = NULL; 6336 return ret; 6337 } 6338 6339 if (wq->flags & WQ_UNBOUND) { 6340 struct device_attribute *attr; 6341 6342 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 6343 ret = device_create_file(&wq_dev->dev, attr); 6344 if (ret) { 6345 device_unregister(&wq_dev->dev); 6346 wq->wq_dev = NULL; 6347 return ret; 6348 } 6349 } 6350 } 6351 6352 dev_set_uevent_suppress(&wq_dev->dev, false); 6353 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 6354 return 0; 6355 } 6356 6357 /** 6358 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 6359 * @wq: the workqueue to unregister 6360 * 6361 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 6362 */ 6363 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 6364 { 6365 struct wq_device *wq_dev = wq->wq_dev; 6366 6367 if (!wq->wq_dev) 6368 return; 6369 6370 wq->wq_dev = NULL; 6371 device_unregister(&wq_dev->dev); 6372 } 6373 #else /* CONFIG_SYSFS */ 6374 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 6375 #endif /* CONFIG_SYSFS */ 6376 6377 /* 6378 * Workqueue watchdog. 6379 * 6380 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal 6381 * flush dependency, a concurrency managed work item which stays RUNNING 6382 * indefinitely. Workqueue stalls can be very difficult to debug as the 6383 * usual warning mechanisms don't trigger and internal workqueue state is 6384 * largely opaque. 6385 * 6386 * Workqueue watchdog monitors all worker pools periodically and dumps 6387 * state if some pools failed to make forward progress for a while where 6388 * forward progress is defined as the first item on ->worklist changing. 6389 * 6390 * This mechanism is controlled through the kernel parameter 6391 * "workqueue.watchdog_thresh" which can be updated at runtime through the 6392 * corresponding sysfs parameter file. 6393 */ 6394 #ifdef CONFIG_WQ_WATCHDOG 6395 6396 static unsigned long wq_watchdog_thresh = 30; 6397 static struct timer_list wq_watchdog_timer; 6398 6399 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 6400 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 6401 6402 /* 6403 * Show workers that might prevent the processing of pending work items. 6404 * The only candidates are CPU-bound workers in the running state. 6405 * Pending work items should be handled by another idle worker 6406 * in all other situations. 6407 */ 6408 static void show_cpu_pool_hog(struct worker_pool *pool) 6409 { 6410 struct worker *worker; 6411 unsigned long flags; 6412 int bkt; 6413 6414 raw_spin_lock_irqsave(&pool->lock, flags); 6415 6416 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6417 if (task_is_running(worker->task)) { 6418 /* 6419 * Defer printing to avoid deadlocks in console 6420 * drivers that queue work while holding locks 6421 * also taken in their write paths. 6422 */ 6423 printk_deferred_enter(); 6424 6425 pr_info("pool %d:\n", pool->id); 6426 sched_show_task(worker->task); 6427 6428 printk_deferred_exit(); 6429 } 6430 } 6431 6432 raw_spin_unlock_irqrestore(&pool->lock, flags); 6433 } 6434 6435 static void show_cpu_pools_hogs(void) 6436 { 6437 struct worker_pool *pool; 6438 int pi; 6439 6440 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 6441 6442 rcu_read_lock(); 6443 6444 for_each_pool(pool, pi) { 6445 if (pool->cpu_stall) 6446 show_cpu_pool_hog(pool); 6447 6448 } 6449 6450 rcu_read_unlock(); 6451 } 6452 6453 static void wq_watchdog_reset_touched(void) 6454 { 6455 int cpu; 6456 6457 wq_watchdog_touched = jiffies; 6458 for_each_possible_cpu(cpu) 6459 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 6460 } 6461 6462 static void wq_watchdog_timer_fn(struct timer_list *unused) 6463 { 6464 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 6465 bool lockup_detected = false; 6466 bool cpu_pool_stall = false; 6467 unsigned long now = jiffies; 6468 struct worker_pool *pool; 6469 int pi; 6470 6471 if (!thresh) 6472 return; 6473 6474 rcu_read_lock(); 6475 6476 for_each_pool(pool, pi) { 6477 unsigned long pool_ts, touched, ts; 6478 6479 pool->cpu_stall = false; 6480 if (list_empty(&pool->worklist)) 6481 continue; 6482 6483 /* 6484 * If a virtual machine is stopped by the host it can look to 6485 * the watchdog like a stall. 6486 */ 6487 kvm_check_and_clear_guest_paused(); 6488 6489 /* get the latest of pool and touched timestamps */ 6490 if (pool->cpu >= 0) 6491 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 6492 else 6493 touched = READ_ONCE(wq_watchdog_touched); 6494 pool_ts = READ_ONCE(pool->watchdog_ts); 6495 6496 if (time_after(pool_ts, touched)) 6497 ts = pool_ts; 6498 else 6499 ts = touched; 6500 6501 /* did we stall? */ 6502 if (time_after(now, ts + thresh)) { 6503 lockup_detected = true; 6504 if (pool->cpu >= 0) { 6505 pool->cpu_stall = true; 6506 cpu_pool_stall = true; 6507 } 6508 pr_emerg("BUG: workqueue lockup - pool"); 6509 pr_cont_pool_info(pool); 6510 pr_cont(" stuck for %us!\n", 6511 jiffies_to_msecs(now - pool_ts) / 1000); 6512 } 6513 6514 6515 } 6516 6517 rcu_read_unlock(); 6518 6519 if (lockup_detected) 6520 show_all_workqueues(); 6521 6522 if (cpu_pool_stall) 6523 show_cpu_pools_hogs(); 6524 6525 wq_watchdog_reset_touched(); 6526 mod_timer(&wq_watchdog_timer, jiffies + thresh); 6527 } 6528 6529 notrace void wq_watchdog_touch(int cpu) 6530 { 6531 if (cpu >= 0) 6532 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 6533 6534 wq_watchdog_touched = jiffies; 6535 } 6536 6537 static void wq_watchdog_set_thresh(unsigned long thresh) 6538 { 6539 wq_watchdog_thresh = 0; 6540 del_timer_sync(&wq_watchdog_timer); 6541 6542 if (thresh) { 6543 wq_watchdog_thresh = thresh; 6544 wq_watchdog_reset_touched(); 6545 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ); 6546 } 6547 } 6548 6549 static int wq_watchdog_param_set_thresh(const char *val, 6550 const struct kernel_param *kp) 6551 { 6552 unsigned long thresh; 6553 int ret; 6554 6555 ret = kstrtoul(val, 0, &thresh); 6556 if (ret) 6557 return ret; 6558 6559 if (system_wq) 6560 wq_watchdog_set_thresh(thresh); 6561 else 6562 wq_watchdog_thresh = thresh; 6563 6564 return 0; 6565 } 6566 6567 static const struct kernel_param_ops wq_watchdog_thresh_ops = { 6568 .set = wq_watchdog_param_set_thresh, 6569 .get = param_get_ulong, 6570 }; 6571 6572 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh, 6573 0644); 6574 6575 static void wq_watchdog_init(void) 6576 { 6577 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 6578 wq_watchdog_set_thresh(wq_watchdog_thresh); 6579 } 6580 6581 #else /* CONFIG_WQ_WATCHDOG */ 6582 6583 static inline void wq_watchdog_init(void) { } 6584 6585 #endif /* CONFIG_WQ_WATCHDOG */ 6586 6587 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask) 6588 { 6589 if (!cpumask_intersects(wq_unbound_cpumask, mask)) { 6590 pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n", 6591 cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask)); 6592 return; 6593 } 6594 6595 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask); 6596 } 6597 6598 /** 6599 * workqueue_init_early - early init for workqueue subsystem 6600 * 6601 * This is the first step of three-staged workqueue subsystem initialization and 6602 * invoked as soon as the bare basics - memory allocation, cpumasks and idr are 6603 * up. It sets up all the data structures and system workqueues and allows early 6604 * boot code to create workqueues and queue/cancel work items. Actual work item 6605 * execution starts only after kthreads can be created and scheduled right 6606 * before early initcalls. 6607 */ 6608 void __init workqueue_init_early(void) 6609 { 6610 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 6611 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 6612 int i, cpu; 6613 6614 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 6615 6616 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 6617 BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL)); 6618 BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL)); 6619 6620 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); 6621 restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ)); 6622 restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN)); 6623 if (!cpumask_empty(&wq_cmdline_cpumask)) 6624 restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask); 6625 6626 cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask); 6627 6628 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 6629 6630 wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 6631 BUG_ON(!wq_update_pod_attrs_buf); 6632 6633 /* initialize WQ_AFFN_SYSTEM pods */ 6634 pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6635 pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); 6636 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6637 BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); 6638 6639 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); 6640 6641 pt->nr_pods = 1; 6642 cpumask_copy(pt->pod_cpus[0], cpu_possible_mask); 6643 pt->pod_node[0] = NUMA_NO_NODE; 6644 pt->cpu_pod[0] = 0; 6645 6646 /* initialize CPU pools */ 6647 for_each_possible_cpu(cpu) { 6648 struct worker_pool *pool; 6649 6650 i = 0; 6651 for_each_cpu_worker_pool(pool, cpu) { 6652 BUG_ON(init_worker_pool(pool)); 6653 pool->cpu = cpu; 6654 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 6655 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); 6656 pool->attrs->nice = std_nice[i++]; 6657 pool->attrs->affn_strict = true; 6658 pool->node = cpu_to_node(cpu); 6659 6660 /* alloc pool ID */ 6661 mutex_lock(&wq_pool_mutex); 6662 BUG_ON(worker_pool_assign_id(pool)); 6663 mutex_unlock(&wq_pool_mutex); 6664 } 6665 } 6666 6667 /* create default unbound and ordered wq attrs */ 6668 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 6669 struct workqueue_attrs *attrs; 6670 6671 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6672 attrs->nice = std_nice[i]; 6673 unbound_std_wq_attrs[i] = attrs; 6674 6675 /* 6676 * An ordered wq should have only one pwq as ordering is 6677 * guaranteed by max_active which is enforced by pwqs. 6678 */ 6679 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6680 attrs->nice = std_nice[i]; 6681 attrs->ordered = true; 6682 ordered_wq_attrs[i] = attrs; 6683 } 6684 6685 system_wq = alloc_workqueue("events", 0, 0); 6686 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 6687 system_long_wq = alloc_workqueue("events_long", 0, 0); 6688 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 6689 WQ_MAX_ACTIVE); 6690 system_freezable_wq = alloc_workqueue("events_freezable", 6691 WQ_FREEZABLE, 0); 6692 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 6693 WQ_POWER_EFFICIENT, 0); 6694 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 6695 WQ_FREEZABLE | WQ_POWER_EFFICIENT, 6696 0); 6697 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 6698 !system_unbound_wq || !system_freezable_wq || 6699 !system_power_efficient_wq || 6700 !system_freezable_power_efficient_wq); 6701 } 6702 6703 static void __init wq_cpu_intensive_thresh_init(void) 6704 { 6705 unsigned long thresh; 6706 unsigned long bogo; 6707 6708 pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); 6709 BUG_ON(IS_ERR(pwq_release_worker)); 6710 6711 /* if the user set it to a specific value, keep it */ 6712 if (wq_cpu_intensive_thresh_us != ULONG_MAX) 6713 return; 6714 6715 /* 6716 * The default of 10ms is derived from the fact that most modern (as of 6717 * 2023) processors can do a lot in 10ms and that it's just below what 6718 * most consider human-perceivable. However, the kernel also runs on a 6719 * lot slower CPUs including microcontrollers where the threshold is way 6720 * too low. 6721 * 6722 * Let's scale up the threshold upto 1 second if BogoMips is below 4000. 6723 * This is by no means accurate but it doesn't have to be. The mechanism 6724 * is still useful even when the threshold is fully scaled up. Also, as 6725 * the reports would usually be applicable to everyone, some machines 6726 * operating on longer thresholds won't significantly diminish their 6727 * usefulness. 6728 */ 6729 thresh = 10 * USEC_PER_MSEC; 6730 6731 /* see init/calibrate.c for lpj -> BogoMIPS calculation */ 6732 bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); 6733 if (bogo < 4000) 6734 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); 6735 6736 pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", 6737 loops_per_jiffy, bogo, thresh); 6738 6739 wq_cpu_intensive_thresh_us = thresh; 6740 } 6741 6742 /** 6743 * workqueue_init - bring workqueue subsystem fully online 6744 * 6745 * This is the second step of three-staged workqueue subsystem initialization 6746 * and invoked as soon as kthreads can be created and scheduled. Workqueues have 6747 * been created and work items queued on them, but there are no kworkers 6748 * executing the work items yet. Populate the worker pools with the initial 6749 * workers and enable future kworker creations. 6750 */ 6751 void __init workqueue_init(void) 6752 { 6753 struct workqueue_struct *wq; 6754 struct worker_pool *pool; 6755 int cpu, bkt; 6756 6757 wq_cpu_intensive_thresh_init(); 6758 6759 mutex_lock(&wq_pool_mutex); 6760 6761 /* 6762 * Per-cpu pools created earlier could be missing node hint. Fix them 6763 * up. Also, create a rescuer for workqueues that requested it. 6764 */ 6765 for_each_possible_cpu(cpu) { 6766 for_each_cpu_worker_pool(pool, cpu) { 6767 pool->node = cpu_to_node(cpu); 6768 } 6769 } 6770 6771 list_for_each_entry(wq, &workqueues, list) { 6772 WARN(init_rescuer(wq), 6773 "workqueue: failed to create early rescuer for %s", 6774 wq->name); 6775 } 6776 6777 mutex_unlock(&wq_pool_mutex); 6778 6779 /* create the initial workers */ 6780 for_each_online_cpu(cpu) { 6781 for_each_cpu_worker_pool(pool, cpu) { 6782 pool->flags &= ~POOL_DISASSOCIATED; 6783 BUG_ON(!create_worker(pool)); 6784 } 6785 } 6786 6787 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 6788 BUG_ON(!create_worker(pool)); 6789 6790 wq_online = true; 6791 wq_watchdog_init(); 6792 } 6793 6794 /* 6795 * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to 6796 * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique 6797 * and consecutive pod ID. The rest of @pt is initialized accordingly. 6798 */ 6799 static void __init init_pod_type(struct wq_pod_type *pt, 6800 bool (*cpus_share_pod)(int, int)) 6801 { 6802 int cur, pre, cpu, pod; 6803 6804 pt->nr_pods = 0; 6805 6806 /* init @pt->cpu_pod[] according to @cpus_share_pod() */ 6807 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6808 BUG_ON(!pt->cpu_pod); 6809 6810 for_each_possible_cpu(cur) { 6811 for_each_possible_cpu(pre) { 6812 if (pre >= cur) { 6813 pt->cpu_pod[cur] = pt->nr_pods++; 6814 break; 6815 } 6816 if (cpus_share_pod(cur, pre)) { 6817 pt->cpu_pod[cur] = pt->cpu_pod[pre]; 6818 break; 6819 } 6820 } 6821 } 6822 6823 /* init the rest to match @pt->cpu_pod[] */ 6824 pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6825 pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL); 6826 BUG_ON(!pt->pod_cpus || !pt->pod_node); 6827 6828 for (pod = 0; pod < pt->nr_pods; pod++) 6829 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL)); 6830 6831 for_each_possible_cpu(cpu) { 6832 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]); 6833 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu); 6834 } 6835 } 6836 6837 static bool __init cpus_dont_share(int cpu0, int cpu1) 6838 { 6839 return false; 6840 } 6841 6842 static bool __init cpus_share_smt(int cpu0, int cpu1) 6843 { 6844 #ifdef CONFIG_SCHED_SMT 6845 return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1)); 6846 #else 6847 return false; 6848 #endif 6849 } 6850 6851 static bool __init cpus_share_numa(int cpu0, int cpu1) 6852 { 6853 return cpu_to_node(cpu0) == cpu_to_node(cpu1); 6854 } 6855 6856 /** 6857 * workqueue_init_topology - initialize CPU pods for unbound workqueues 6858 * 6859 * This is the third step of there-staged workqueue subsystem initialization and 6860 * invoked after SMP and topology information are fully initialized. It 6861 * initializes the unbound CPU pods accordingly. 6862 */ 6863 void __init workqueue_init_topology(void) 6864 { 6865 struct workqueue_struct *wq; 6866 int cpu; 6867 6868 init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); 6869 init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); 6870 init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); 6871 init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); 6872 6873 mutex_lock(&wq_pool_mutex); 6874 6875 /* 6876 * Workqueues allocated earlier would have all CPUs sharing the default 6877 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU 6878 * combinations to apply per-pod sharing. 6879 */ 6880 list_for_each_entry(wq, &workqueues, list) { 6881 for_each_online_cpu(cpu) { 6882 wq_update_pod(wq, cpu, cpu, true); 6883 } 6884 } 6885 6886 mutex_unlock(&wq_pool_mutex); 6887 } 6888 6889 void __warn_flushing_systemwide_wq(void) 6890 { 6891 pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n"); 6892 dump_stack(); 6893 } 6894 EXPORT_SYMBOL(__warn_flushing_systemwide_wq); 6895 6896 static int __init workqueue_unbound_cpus_setup(char *str) 6897 { 6898 if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) { 6899 cpumask_clear(&wq_cmdline_cpumask); 6900 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n"); 6901 } 6902 6903 return 1; 6904 } 6905 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup); 6906