1 /* 2 * kernel/workqueue.c - generic async execution with shared worker pool 3 * 4 * Copyright (C) 2002 Ingo Molnar 5 * 6 * Derived from the taskqueue/keventd code by: 7 * David Woodhouse <dwmw2@infradead.org> 8 * Andrew Morton 9 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 10 * Theodore Ts'o <tytso@mit.edu> 11 * 12 * Made to use alloc_percpu by Christoph Lameter. 13 * 14 * Copyright (C) 2010 SUSE Linux Products GmbH 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 16 * 17 * This is the generic async execution mechanism. Work items as are 18 * executed in process context. The worker pool is shared and 19 * automatically managed. There is one worker pool for each CPU and 20 * one extra for works which are better served by workers which are 21 * not bound to any specific CPU. 22 * 23 * Please read Documentation/workqueue.txt for details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/kernel.h> 28 #include <linux/sched.h> 29 #include <linux/init.h> 30 #include <linux/signal.h> 31 #include <linux/completion.h> 32 #include <linux/workqueue.h> 33 #include <linux/slab.h> 34 #include <linux/cpu.h> 35 #include <linux/notifier.h> 36 #include <linux/kthread.h> 37 #include <linux/hardirq.h> 38 #include <linux/mempolicy.h> 39 #include <linux/freezer.h> 40 #include <linux/kallsyms.h> 41 #include <linux/debug_locks.h> 42 #include <linux/lockdep.h> 43 #include <linux/idr.h> 44 45 #include "workqueue_sched.h" 46 47 enum { 48 /* global_cwq flags */ 49 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 50 GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */ 51 GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 52 GCWQ_FREEZING = 1 << 3, /* freeze in progress */ 53 GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */ 54 55 /* worker flags */ 56 WORKER_STARTED = 1 << 0, /* started */ 57 WORKER_DIE = 1 << 1, /* die die die */ 58 WORKER_IDLE = 1 << 2, /* is idle */ 59 WORKER_PREP = 1 << 3, /* preparing to run works */ 60 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ 61 WORKER_REBIND = 1 << 5, /* mom is home, come back */ 62 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 63 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 64 65 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | 66 WORKER_CPU_INTENSIVE | WORKER_UNBOUND, 67 68 /* gcwq->trustee_state */ 69 TRUSTEE_START = 0, /* start */ 70 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */ 71 TRUSTEE_BUTCHER = 2, /* butcher workers */ 72 TRUSTEE_RELEASE = 3, /* release workers */ 73 TRUSTEE_DONE = 4, /* trustee is done */ 74 75 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 76 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, 77 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, 78 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 81 82 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 83 /* call for help after 10ms 84 (min two ticks) */ 85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 86 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ 88 89 /* 90 * Rescue workers are used only on emergencies and shared by 91 * all cpus. Give -20. 92 */ 93 RESCUER_NICE_LEVEL = -20, 94 }; 95 96 /* 97 * Structure fields follow one of the following exclusion rules. 98 * 99 * I: Modifiable by initialization/destruction paths and read-only for 100 * everyone else. 101 * 102 * P: Preemption protected. Disabling preemption is enough and should 103 * only be modified and accessed from the local cpu. 104 * 105 * L: gcwq->lock protected. Access with gcwq->lock held. 106 * 107 * X: During normal operation, modification requires gcwq->lock and 108 * should be done only from local cpu. Either disabling preemption 109 * on local cpu or grabbing gcwq->lock is enough for read access. 110 * If GCWQ_DISASSOCIATED is set, it's identical to L. 111 * 112 * F: wq->flush_mutex protected. 113 * 114 * W: workqueue_lock protected. 115 */ 116 117 struct global_cwq; 118 119 /* 120 * The poor guys doing the actual heavy lifting. All on-duty workers 121 * are either serving the manager role, on idle list or on busy hash. 122 */ 123 struct worker { 124 /* on idle list while idle, on busy hash table while busy */ 125 union { 126 struct list_head entry; /* L: while idle */ 127 struct hlist_node hentry; /* L: while busy */ 128 }; 129 130 struct work_struct *current_work; /* L: work being processed */ 131 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ 132 struct list_head scheduled; /* L: scheduled works */ 133 struct task_struct *task; /* I: worker task */ 134 struct global_cwq *gcwq; /* I: the associated gcwq */ 135 /* 64 bytes boundary on 64bit, 32 on 32bit */ 136 unsigned long last_active; /* L: last active timestamp */ 137 unsigned int flags; /* X: flags */ 138 int id; /* I: worker id */ 139 struct work_struct rebind_work; /* L: rebind worker to cpu */ 140 }; 141 142 /* 143 * Global per-cpu workqueue. There's one and only one for each cpu 144 * and all works are queued and processed here regardless of their 145 * target workqueues. 146 */ 147 struct global_cwq { 148 spinlock_t lock; /* the gcwq lock */ 149 struct list_head worklist; /* L: list of pending works */ 150 unsigned int cpu; /* I: the associated cpu */ 151 unsigned int flags; /* L: GCWQ_* flags */ 152 153 int nr_workers; /* L: total number of workers */ 154 int nr_idle; /* L: currently idle ones */ 155 156 /* workers are chained either in the idle_list or busy_hash */ 157 struct list_head idle_list; /* X: list of idle workers */ 158 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; 159 /* L: hash of busy workers */ 160 161 struct timer_list idle_timer; /* L: worker idle timeout */ 162 struct timer_list mayday_timer; /* L: SOS timer for dworkers */ 163 164 struct ida worker_ida; /* L: for worker IDs */ 165 166 struct task_struct *trustee; /* L: for gcwq shutdown */ 167 unsigned int trustee_state; /* L: trustee state */ 168 wait_queue_head_t trustee_wait; /* trustee wait */ 169 struct worker *first_idle; /* L: first idle worker */ 170 } ____cacheline_aligned_in_smp; 171 172 /* 173 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of 174 * work_struct->data are used for flags and thus cwqs need to be 175 * aligned at two's power of the number of flag bits. 176 */ 177 struct cpu_workqueue_struct { 178 struct global_cwq *gcwq; /* I: the associated gcwq */ 179 struct workqueue_struct *wq; /* I: the owning workqueue */ 180 int work_color; /* L: current color */ 181 int flush_color; /* L: flushing color */ 182 int nr_in_flight[WORK_NR_COLORS]; 183 /* L: nr of in_flight works */ 184 int nr_active; /* L: nr of active works */ 185 int max_active; /* L: max active works */ 186 struct list_head delayed_works; /* L: delayed works */ 187 }; 188 189 /* 190 * Structure used to wait for workqueue flush. 191 */ 192 struct wq_flusher { 193 struct list_head list; /* F: list of flushers */ 194 int flush_color; /* F: flush color waiting for */ 195 struct completion done; /* flush completion */ 196 }; 197 198 /* 199 * All cpumasks are assumed to be always set on UP and thus can't be 200 * used to determine whether there's something to be done. 201 */ 202 #ifdef CONFIG_SMP 203 typedef cpumask_var_t mayday_mask_t; 204 #define mayday_test_and_set_cpu(cpu, mask) \ 205 cpumask_test_and_set_cpu((cpu), (mask)) 206 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 207 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 208 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) 209 #define free_mayday_mask(mask) free_cpumask_var((mask)) 210 #else 211 typedef unsigned long mayday_mask_t; 212 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask)) 213 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask)) 214 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask)) 215 #define alloc_mayday_mask(maskp, gfp) true 216 #define free_mayday_mask(mask) do { } while (0) 217 #endif 218 219 /* 220 * The externally visible workqueue abstraction is an array of 221 * per-CPU workqueues: 222 */ 223 struct workqueue_struct { 224 unsigned int flags; /* I: WQ_* flags */ 225 union { 226 struct cpu_workqueue_struct __percpu *pcpu; 227 struct cpu_workqueue_struct *single; 228 unsigned long v; 229 } cpu_wq; /* I: cwq's */ 230 struct list_head list; /* W: list of all workqueues */ 231 232 struct mutex flush_mutex; /* protects wq flushing */ 233 int work_color; /* F: current work color */ 234 int flush_color; /* F: current flush color */ 235 atomic_t nr_cwqs_to_flush; /* flush in progress */ 236 struct wq_flusher *first_flusher; /* F: first flusher */ 237 struct list_head flusher_queue; /* F: flush waiters */ 238 struct list_head flusher_overflow; /* F: flush overflow list */ 239 240 mayday_mask_t mayday_mask; /* cpus requesting rescue */ 241 struct worker *rescuer; /* I: rescue worker */ 242 243 int saved_max_active; /* W: saved cwq max_active */ 244 const char *name; /* I: workqueue name */ 245 #ifdef CONFIG_LOCKDEP 246 struct lockdep_map lockdep_map; 247 #endif 248 }; 249 250 struct workqueue_struct *system_wq __read_mostly; 251 struct workqueue_struct *system_long_wq __read_mostly; 252 struct workqueue_struct *system_nrt_wq __read_mostly; 253 struct workqueue_struct *system_unbound_wq __read_mostly; 254 struct workqueue_struct *system_freezable_wq __read_mostly; 255 EXPORT_SYMBOL_GPL(system_wq); 256 EXPORT_SYMBOL_GPL(system_long_wq); 257 EXPORT_SYMBOL_GPL(system_nrt_wq); 258 EXPORT_SYMBOL_GPL(system_unbound_wq); 259 EXPORT_SYMBOL_GPL(system_freezable_wq); 260 261 #define CREATE_TRACE_POINTS 262 #include <trace/events/workqueue.h> 263 264 #define for_each_busy_worker(worker, i, pos, gcwq) \ 265 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ 266 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) 267 268 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, 269 unsigned int sw) 270 { 271 if (cpu < nr_cpu_ids) { 272 if (sw & 1) { 273 cpu = cpumask_next(cpu, mask); 274 if (cpu < nr_cpu_ids) 275 return cpu; 276 } 277 if (sw & 2) 278 return WORK_CPU_UNBOUND; 279 } 280 return WORK_CPU_NONE; 281 } 282 283 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 284 struct workqueue_struct *wq) 285 { 286 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); 287 } 288 289 /* 290 * CPU iterators 291 * 292 * An extra gcwq is defined for an invalid cpu number 293 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any 294 * specific CPU. The following iterators are similar to 295 * for_each_*_cpu() iterators but also considers the unbound gcwq. 296 * 297 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND 298 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND 299 * for_each_cwq_cpu() : possible CPUs for bound workqueues, 300 * WORK_CPU_UNBOUND for unbound workqueues 301 */ 302 #define for_each_gcwq_cpu(cpu) \ 303 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ 304 (cpu) < WORK_CPU_NONE; \ 305 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) 306 307 #define for_each_online_gcwq_cpu(cpu) \ 308 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \ 309 (cpu) < WORK_CPU_NONE; \ 310 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) 311 312 #define for_each_cwq_cpu(cpu, wq) \ 313 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \ 314 (cpu) < WORK_CPU_NONE; \ 315 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) 316 317 #ifdef CONFIG_DEBUG_OBJECTS_WORK 318 319 static struct debug_obj_descr work_debug_descr; 320 321 static void *work_debug_hint(void *addr) 322 { 323 return ((struct work_struct *) addr)->func; 324 } 325 326 /* 327 * fixup_init is called when: 328 * - an active object is initialized 329 */ 330 static int work_fixup_init(void *addr, enum debug_obj_state state) 331 { 332 struct work_struct *work = addr; 333 334 switch (state) { 335 case ODEBUG_STATE_ACTIVE: 336 cancel_work_sync(work); 337 debug_object_init(work, &work_debug_descr); 338 return 1; 339 default: 340 return 0; 341 } 342 } 343 344 /* 345 * fixup_activate is called when: 346 * - an active object is activated 347 * - an unknown object is activated (might be a statically initialized object) 348 */ 349 static int work_fixup_activate(void *addr, enum debug_obj_state state) 350 { 351 struct work_struct *work = addr; 352 353 switch (state) { 354 355 case ODEBUG_STATE_NOTAVAILABLE: 356 /* 357 * This is not really a fixup. The work struct was 358 * statically initialized. We just make sure that it 359 * is tracked in the object tracker. 360 */ 361 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { 362 debug_object_init(work, &work_debug_descr); 363 debug_object_activate(work, &work_debug_descr); 364 return 0; 365 } 366 WARN_ON_ONCE(1); 367 return 0; 368 369 case ODEBUG_STATE_ACTIVE: 370 WARN_ON(1); 371 372 default: 373 return 0; 374 } 375 } 376 377 /* 378 * fixup_free is called when: 379 * - an active object is freed 380 */ 381 static int work_fixup_free(void *addr, enum debug_obj_state state) 382 { 383 struct work_struct *work = addr; 384 385 switch (state) { 386 case ODEBUG_STATE_ACTIVE: 387 cancel_work_sync(work); 388 debug_object_free(work, &work_debug_descr); 389 return 1; 390 default: 391 return 0; 392 } 393 } 394 395 static struct debug_obj_descr work_debug_descr = { 396 .name = "work_struct", 397 .debug_hint = work_debug_hint, 398 .fixup_init = work_fixup_init, 399 .fixup_activate = work_fixup_activate, 400 .fixup_free = work_fixup_free, 401 }; 402 403 static inline void debug_work_activate(struct work_struct *work) 404 { 405 debug_object_activate(work, &work_debug_descr); 406 } 407 408 static inline void debug_work_deactivate(struct work_struct *work) 409 { 410 debug_object_deactivate(work, &work_debug_descr); 411 } 412 413 void __init_work(struct work_struct *work, int onstack) 414 { 415 if (onstack) 416 debug_object_init_on_stack(work, &work_debug_descr); 417 else 418 debug_object_init(work, &work_debug_descr); 419 } 420 EXPORT_SYMBOL_GPL(__init_work); 421 422 void destroy_work_on_stack(struct work_struct *work) 423 { 424 debug_object_free(work, &work_debug_descr); 425 } 426 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 427 428 #else 429 static inline void debug_work_activate(struct work_struct *work) { } 430 static inline void debug_work_deactivate(struct work_struct *work) { } 431 #endif 432 433 /* Serializes the accesses to the list of workqueues. */ 434 static DEFINE_SPINLOCK(workqueue_lock); 435 static LIST_HEAD(workqueues); 436 static bool workqueue_freezing; /* W: have wqs started freezing? */ 437 438 /* 439 * The almighty global cpu workqueues. nr_running is the only field 440 * which is expected to be used frequently by other cpus via 441 * try_to_wake_up(). Put it in a separate cacheline. 442 */ 443 static DEFINE_PER_CPU(struct global_cwq, global_cwq); 444 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); 445 446 /* 447 * Global cpu workqueue and nr_running counter for unbound gcwq. The 448 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its 449 * workers have WORKER_UNBOUND set. 450 */ 451 static struct global_cwq unbound_global_cwq; 452 static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */ 453 454 static int worker_thread(void *__worker); 455 456 static struct global_cwq *get_gcwq(unsigned int cpu) 457 { 458 if (cpu != WORK_CPU_UNBOUND) 459 return &per_cpu(global_cwq, cpu); 460 else 461 return &unbound_global_cwq; 462 } 463 464 static atomic_t *get_gcwq_nr_running(unsigned int cpu) 465 { 466 if (cpu != WORK_CPU_UNBOUND) 467 return &per_cpu(gcwq_nr_running, cpu); 468 else 469 return &unbound_gcwq_nr_running; 470 } 471 472 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 473 struct workqueue_struct *wq) 474 { 475 if (!(wq->flags & WQ_UNBOUND)) { 476 if (likely(cpu < nr_cpu_ids)) { 477 #ifdef CONFIG_SMP 478 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); 479 #else 480 return wq->cpu_wq.single; 481 #endif 482 } 483 } else if (likely(cpu == WORK_CPU_UNBOUND)) 484 return wq->cpu_wq.single; 485 return NULL; 486 } 487 488 static unsigned int work_color_to_flags(int color) 489 { 490 return color << WORK_STRUCT_COLOR_SHIFT; 491 } 492 493 static int get_work_color(struct work_struct *work) 494 { 495 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 496 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 497 } 498 499 static int work_next_color(int color) 500 { 501 return (color + 1) % WORK_NR_COLORS; 502 } 503 504 /* 505 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the 506 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is 507 * cleared and the work data contains the cpu number it was last on. 508 * 509 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the 510 * cwq, cpu or clear work->data. These functions should only be 511 * called while the work is owned - ie. while the PENDING bit is set. 512 * 513 * get_work_[g]cwq() can be used to obtain the gcwq or cwq 514 * corresponding to a work. gcwq is available once the work has been 515 * queued anywhere after initialization. cwq is available only from 516 * queueing until execution starts. 517 */ 518 static inline void set_work_data(struct work_struct *work, unsigned long data, 519 unsigned long flags) 520 { 521 BUG_ON(!work_pending(work)); 522 atomic_long_set(&work->data, data | flags | work_static(work)); 523 } 524 525 static void set_work_cwq(struct work_struct *work, 526 struct cpu_workqueue_struct *cwq, 527 unsigned long extra_flags) 528 { 529 set_work_data(work, (unsigned long)cwq, 530 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); 531 } 532 533 static void set_work_cpu(struct work_struct *work, unsigned int cpu) 534 { 535 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING); 536 } 537 538 static void clear_work_data(struct work_struct *work) 539 { 540 set_work_data(work, WORK_STRUCT_NO_CPU, 0); 541 } 542 543 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) 544 { 545 unsigned long data = atomic_long_read(&work->data); 546 547 if (data & WORK_STRUCT_CWQ) 548 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 549 else 550 return NULL; 551 } 552 553 static struct global_cwq *get_work_gcwq(struct work_struct *work) 554 { 555 unsigned long data = atomic_long_read(&work->data); 556 unsigned int cpu; 557 558 if (data & WORK_STRUCT_CWQ) 559 return ((struct cpu_workqueue_struct *) 560 (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq; 561 562 cpu = data >> WORK_STRUCT_FLAG_BITS; 563 if (cpu == WORK_CPU_NONE) 564 return NULL; 565 566 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); 567 return get_gcwq(cpu); 568 } 569 570 /* 571 * Policy functions. These define the policies on how the global 572 * worker pool is managed. Unless noted otherwise, these functions 573 * assume that they're being called with gcwq->lock held. 574 */ 575 576 static bool __need_more_worker(struct global_cwq *gcwq) 577 { 578 return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) || 579 gcwq->flags & GCWQ_HIGHPRI_PENDING; 580 } 581 582 /* 583 * Need to wake up a worker? Called from anything but currently 584 * running workers. 585 */ 586 static bool need_more_worker(struct global_cwq *gcwq) 587 { 588 return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq); 589 } 590 591 /* Can I start working? Called from busy but !running workers. */ 592 static bool may_start_working(struct global_cwq *gcwq) 593 { 594 return gcwq->nr_idle; 595 } 596 597 /* Do I need to keep working? Called from currently running workers. */ 598 static bool keep_working(struct global_cwq *gcwq) 599 { 600 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); 601 602 return !list_empty(&gcwq->worklist) && 603 (atomic_read(nr_running) <= 1 || 604 gcwq->flags & GCWQ_HIGHPRI_PENDING); 605 } 606 607 /* Do we need a new worker? Called from manager. */ 608 static bool need_to_create_worker(struct global_cwq *gcwq) 609 { 610 return need_more_worker(gcwq) && !may_start_working(gcwq); 611 } 612 613 /* Do I need to be the manager? */ 614 static bool need_to_manage_workers(struct global_cwq *gcwq) 615 { 616 return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS; 617 } 618 619 /* Do we have too many workers and should some go away? */ 620 static bool too_many_workers(struct global_cwq *gcwq) 621 { 622 bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; 623 int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */ 624 int nr_busy = gcwq->nr_workers - nr_idle; 625 626 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 627 } 628 629 /* 630 * Wake up functions. 631 */ 632 633 /* Return the first worker. Safe with preemption disabled */ 634 static struct worker *first_worker(struct global_cwq *gcwq) 635 { 636 if (unlikely(list_empty(&gcwq->idle_list))) 637 return NULL; 638 639 return list_first_entry(&gcwq->idle_list, struct worker, entry); 640 } 641 642 /** 643 * wake_up_worker - wake up an idle worker 644 * @gcwq: gcwq to wake worker for 645 * 646 * Wake up the first idle worker of @gcwq. 647 * 648 * CONTEXT: 649 * spin_lock_irq(gcwq->lock). 650 */ 651 static void wake_up_worker(struct global_cwq *gcwq) 652 { 653 struct worker *worker = first_worker(gcwq); 654 655 if (likely(worker)) 656 wake_up_process(worker->task); 657 } 658 659 /** 660 * wq_worker_waking_up - a worker is waking up 661 * @task: task waking up 662 * @cpu: CPU @task is waking up to 663 * 664 * This function is called during try_to_wake_up() when a worker is 665 * being awoken. 666 * 667 * CONTEXT: 668 * spin_lock_irq(rq->lock) 669 */ 670 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) 671 { 672 struct worker *worker = kthread_data(task); 673 674 if (!(worker->flags & WORKER_NOT_RUNNING)) 675 atomic_inc(get_gcwq_nr_running(cpu)); 676 } 677 678 /** 679 * wq_worker_sleeping - a worker is going to sleep 680 * @task: task going to sleep 681 * @cpu: CPU in question, must be the current CPU number 682 * 683 * This function is called during schedule() when a busy worker is 684 * going to sleep. Worker on the same cpu can be woken up by 685 * returning pointer to its task. 686 * 687 * CONTEXT: 688 * spin_lock_irq(rq->lock) 689 * 690 * RETURNS: 691 * Worker task on @cpu to wake up, %NULL if none. 692 */ 693 struct task_struct *wq_worker_sleeping(struct task_struct *task, 694 unsigned int cpu) 695 { 696 struct worker *worker = kthread_data(task), *to_wakeup = NULL; 697 struct global_cwq *gcwq = get_gcwq(cpu); 698 atomic_t *nr_running = get_gcwq_nr_running(cpu); 699 700 if (worker->flags & WORKER_NOT_RUNNING) 701 return NULL; 702 703 /* this can only happen on the local cpu */ 704 BUG_ON(cpu != raw_smp_processor_id()); 705 706 /* 707 * The counterpart of the following dec_and_test, implied mb, 708 * worklist not empty test sequence is in insert_work(). 709 * Please read comment there. 710 * 711 * NOT_RUNNING is clear. This means that trustee is not in 712 * charge and we're running on the local cpu w/ rq lock held 713 * and preemption disabled, which in turn means that none else 714 * could be manipulating idle_list, so dereferencing idle_list 715 * without gcwq lock is safe. 716 */ 717 if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist)) 718 to_wakeup = first_worker(gcwq); 719 return to_wakeup ? to_wakeup->task : NULL; 720 } 721 722 /** 723 * worker_set_flags - set worker flags and adjust nr_running accordingly 724 * @worker: self 725 * @flags: flags to set 726 * @wakeup: wakeup an idle worker if necessary 727 * 728 * Set @flags in @worker->flags and adjust nr_running accordingly. If 729 * nr_running becomes zero and @wakeup is %true, an idle worker is 730 * woken up. 731 * 732 * CONTEXT: 733 * spin_lock_irq(gcwq->lock) 734 */ 735 static inline void worker_set_flags(struct worker *worker, unsigned int flags, 736 bool wakeup) 737 { 738 struct global_cwq *gcwq = worker->gcwq; 739 740 WARN_ON_ONCE(worker->task != current); 741 742 /* 743 * If transitioning into NOT_RUNNING, adjust nr_running and 744 * wake up an idle worker as necessary if requested by 745 * @wakeup. 746 */ 747 if ((flags & WORKER_NOT_RUNNING) && 748 !(worker->flags & WORKER_NOT_RUNNING)) { 749 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); 750 751 if (wakeup) { 752 if (atomic_dec_and_test(nr_running) && 753 !list_empty(&gcwq->worklist)) 754 wake_up_worker(gcwq); 755 } else 756 atomic_dec(nr_running); 757 } 758 759 worker->flags |= flags; 760 } 761 762 /** 763 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 764 * @worker: self 765 * @flags: flags to clear 766 * 767 * Clear @flags in @worker->flags and adjust nr_running accordingly. 768 * 769 * CONTEXT: 770 * spin_lock_irq(gcwq->lock) 771 */ 772 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 773 { 774 struct global_cwq *gcwq = worker->gcwq; 775 unsigned int oflags = worker->flags; 776 777 WARN_ON_ONCE(worker->task != current); 778 779 worker->flags &= ~flags; 780 781 /* 782 * If transitioning out of NOT_RUNNING, increment nr_running. Note 783 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 784 * of multiple flags, not a single flag. 785 */ 786 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 787 if (!(worker->flags & WORKER_NOT_RUNNING)) 788 atomic_inc(get_gcwq_nr_running(gcwq->cpu)); 789 } 790 791 /** 792 * busy_worker_head - return the busy hash head for a work 793 * @gcwq: gcwq of interest 794 * @work: work to be hashed 795 * 796 * Return hash head of @gcwq for @work. 797 * 798 * CONTEXT: 799 * spin_lock_irq(gcwq->lock). 800 * 801 * RETURNS: 802 * Pointer to the hash head. 803 */ 804 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, 805 struct work_struct *work) 806 { 807 const int base_shift = ilog2(sizeof(struct work_struct)); 808 unsigned long v = (unsigned long)work; 809 810 /* simple shift and fold hash, do we need something better? */ 811 v >>= base_shift; 812 v += v >> BUSY_WORKER_HASH_ORDER; 813 v &= BUSY_WORKER_HASH_MASK; 814 815 return &gcwq->busy_hash[v]; 816 } 817 818 /** 819 * __find_worker_executing_work - find worker which is executing a work 820 * @gcwq: gcwq of interest 821 * @bwh: hash head as returned by busy_worker_head() 822 * @work: work to find worker for 823 * 824 * Find a worker which is executing @work on @gcwq. @bwh should be 825 * the hash head obtained by calling busy_worker_head() with the same 826 * work. 827 * 828 * CONTEXT: 829 * spin_lock_irq(gcwq->lock). 830 * 831 * RETURNS: 832 * Pointer to worker which is executing @work if found, NULL 833 * otherwise. 834 */ 835 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, 836 struct hlist_head *bwh, 837 struct work_struct *work) 838 { 839 struct worker *worker; 840 struct hlist_node *tmp; 841 842 hlist_for_each_entry(worker, tmp, bwh, hentry) 843 if (worker->current_work == work) 844 return worker; 845 return NULL; 846 } 847 848 /** 849 * find_worker_executing_work - find worker which is executing a work 850 * @gcwq: gcwq of interest 851 * @work: work to find worker for 852 * 853 * Find a worker which is executing @work on @gcwq. This function is 854 * identical to __find_worker_executing_work() except that this 855 * function calculates @bwh itself. 856 * 857 * CONTEXT: 858 * spin_lock_irq(gcwq->lock). 859 * 860 * RETURNS: 861 * Pointer to worker which is executing @work if found, NULL 862 * otherwise. 863 */ 864 static struct worker *find_worker_executing_work(struct global_cwq *gcwq, 865 struct work_struct *work) 866 { 867 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work), 868 work); 869 } 870 871 /** 872 * gcwq_determine_ins_pos - find insertion position 873 * @gcwq: gcwq of interest 874 * @cwq: cwq a work is being queued for 875 * 876 * A work for @cwq is about to be queued on @gcwq, determine insertion 877 * position for the work. If @cwq is for HIGHPRI wq, the work is 878 * queued at the head of the queue but in FIFO order with respect to 879 * other HIGHPRI works; otherwise, at the end of the queue. This 880 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that 881 * there are HIGHPRI works pending. 882 * 883 * CONTEXT: 884 * spin_lock_irq(gcwq->lock). 885 * 886 * RETURNS: 887 * Pointer to inserstion position. 888 */ 889 static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq, 890 struct cpu_workqueue_struct *cwq) 891 { 892 struct work_struct *twork; 893 894 if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) 895 return &gcwq->worklist; 896 897 list_for_each_entry(twork, &gcwq->worklist, entry) { 898 struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); 899 900 if (!(tcwq->wq->flags & WQ_HIGHPRI)) 901 break; 902 } 903 904 gcwq->flags |= GCWQ_HIGHPRI_PENDING; 905 return &twork->entry; 906 } 907 908 /** 909 * insert_work - insert a work into gcwq 910 * @cwq: cwq @work belongs to 911 * @work: work to insert 912 * @head: insertion point 913 * @extra_flags: extra WORK_STRUCT_* flags to set 914 * 915 * Insert @work which belongs to @cwq into @gcwq after @head. 916 * @extra_flags is or'd to work_struct flags. 917 * 918 * CONTEXT: 919 * spin_lock_irq(gcwq->lock). 920 */ 921 static void insert_work(struct cpu_workqueue_struct *cwq, 922 struct work_struct *work, struct list_head *head, 923 unsigned int extra_flags) 924 { 925 struct global_cwq *gcwq = cwq->gcwq; 926 927 /* we own @work, set data and link */ 928 set_work_cwq(work, cwq, extra_flags); 929 930 /* 931 * Ensure that we get the right work->data if we see the 932 * result of list_add() below, see try_to_grab_pending(). 933 */ 934 smp_wmb(); 935 936 list_add_tail(&work->entry, head); 937 938 /* 939 * Ensure either worker_sched_deactivated() sees the above 940 * list_add_tail() or we see zero nr_running to avoid workers 941 * lying around lazily while there are works to be processed. 942 */ 943 smp_mb(); 944 945 if (__need_more_worker(gcwq)) 946 wake_up_worker(gcwq); 947 } 948 949 /* 950 * Test whether @work is being queued from another work executing on the 951 * same workqueue. This is rather expensive and should only be used from 952 * cold paths. 953 */ 954 static bool is_chained_work(struct workqueue_struct *wq) 955 { 956 unsigned long flags; 957 unsigned int cpu; 958 959 for_each_gcwq_cpu(cpu) { 960 struct global_cwq *gcwq = get_gcwq(cpu); 961 struct worker *worker; 962 struct hlist_node *pos; 963 int i; 964 965 spin_lock_irqsave(&gcwq->lock, flags); 966 for_each_busy_worker(worker, i, pos, gcwq) { 967 if (worker->task != current) 968 continue; 969 spin_unlock_irqrestore(&gcwq->lock, flags); 970 /* 971 * I'm @worker, no locking necessary. See if @work 972 * is headed to the same workqueue. 973 */ 974 return worker->current_cwq->wq == wq; 975 } 976 spin_unlock_irqrestore(&gcwq->lock, flags); 977 } 978 return false; 979 } 980 981 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 982 struct work_struct *work) 983 { 984 struct global_cwq *gcwq; 985 struct cpu_workqueue_struct *cwq; 986 struct list_head *worklist; 987 unsigned int work_flags; 988 unsigned long flags; 989 990 debug_work_activate(work); 991 992 /* if dying, only works from the same workqueue are allowed */ 993 if (unlikely(wq->flags & WQ_DYING) && 994 WARN_ON_ONCE(!is_chained_work(wq))) 995 return; 996 997 /* determine gcwq to use */ 998 if (!(wq->flags & WQ_UNBOUND)) { 999 struct global_cwq *last_gcwq; 1000 1001 if (unlikely(cpu == WORK_CPU_UNBOUND)) 1002 cpu = raw_smp_processor_id(); 1003 1004 /* 1005 * It's multi cpu. If @wq is non-reentrant and @work 1006 * was previously on a different cpu, it might still 1007 * be running there, in which case the work needs to 1008 * be queued on that cpu to guarantee non-reentrance. 1009 */ 1010 gcwq = get_gcwq(cpu); 1011 if (wq->flags & WQ_NON_REENTRANT && 1012 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { 1013 struct worker *worker; 1014 1015 spin_lock_irqsave(&last_gcwq->lock, flags); 1016 1017 worker = find_worker_executing_work(last_gcwq, work); 1018 1019 if (worker && worker->current_cwq->wq == wq) 1020 gcwq = last_gcwq; 1021 else { 1022 /* meh... not running there, queue here */ 1023 spin_unlock_irqrestore(&last_gcwq->lock, flags); 1024 spin_lock_irqsave(&gcwq->lock, flags); 1025 } 1026 } else 1027 spin_lock_irqsave(&gcwq->lock, flags); 1028 } else { 1029 gcwq = get_gcwq(WORK_CPU_UNBOUND); 1030 spin_lock_irqsave(&gcwq->lock, flags); 1031 } 1032 1033 /* gcwq determined, get cwq and queue */ 1034 cwq = get_cwq(gcwq->cpu, wq); 1035 trace_workqueue_queue_work(cpu, cwq, work); 1036 1037 BUG_ON(!list_empty(&work->entry)); 1038 1039 cwq->nr_in_flight[cwq->work_color]++; 1040 work_flags = work_color_to_flags(cwq->work_color); 1041 1042 if (likely(cwq->nr_active < cwq->max_active)) { 1043 trace_workqueue_activate_work(work); 1044 cwq->nr_active++; 1045 worklist = gcwq_determine_ins_pos(gcwq, cwq); 1046 } else { 1047 work_flags |= WORK_STRUCT_DELAYED; 1048 worklist = &cwq->delayed_works; 1049 } 1050 1051 insert_work(cwq, work, worklist, work_flags); 1052 1053 spin_unlock_irqrestore(&gcwq->lock, flags); 1054 } 1055 1056 /** 1057 * queue_work - queue work on a workqueue 1058 * @wq: workqueue to use 1059 * @work: work to queue 1060 * 1061 * Returns 0 if @work was already on a queue, non-zero otherwise. 1062 * 1063 * We queue the work to the CPU on which it was submitted, but if the CPU dies 1064 * it can be processed by another CPU. 1065 */ 1066 int queue_work(struct workqueue_struct *wq, struct work_struct *work) 1067 { 1068 int ret; 1069 1070 ret = queue_work_on(get_cpu(), wq, work); 1071 put_cpu(); 1072 1073 return ret; 1074 } 1075 EXPORT_SYMBOL_GPL(queue_work); 1076 1077 /** 1078 * queue_work_on - queue work on specific cpu 1079 * @cpu: CPU number to execute work on 1080 * @wq: workqueue to use 1081 * @work: work to queue 1082 * 1083 * Returns 0 if @work was already on a queue, non-zero otherwise. 1084 * 1085 * We queue the work to a specific CPU, the caller must ensure it 1086 * can't go away. 1087 */ 1088 int 1089 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) 1090 { 1091 int ret = 0; 1092 1093 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1094 __queue_work(cpu, wq, work); 1095 ret = 1; 1096 } 1097 return ret; 1098 } 1099 EXPORT_SYMBOL_GPL(queue_work_on); 1100 1101 static void delayed_work_timer_fn(unsigned long __data) 1102 { 1103 struct delayed_work *dwork = (struct delayed_work *)__data; 1104 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); 1105 1106 __queue_work(smp_processor_id(), cwq->wq, &dwork->work); 1107 } 1108 1109 /** 1110 * queue_delayed_work - queue work on a workqueue after delay 1111 * @wq: workqueue to use 1112 * @dwork: delayable work to queue 1113 * @delay: number of jiffies to wait before queueing 1114 * 1115 * Returns 0 if @work was already on a queue, non-zero otherwise. 1116 */ 1117 int queue_delayed_work(struct workqueue_struct *wq, 1118 struct delayed_work *dwork, unsigned long delay) 1119 { 1120 if (delay == 0) 1121 return queue_work(wq, &dwork->work); 1122 1123 return queue_delayed_work_on(-1, wq, dwork, delay); 1124 } 1125 EXPORT_SYMBOL_GPL(queue_delayed_work); 1126 1127 /** 1128 * queue_delayed_work_on - queue work on specific CPU after delay 1129 * @cpu: CPU number to execute work on 1130 * @wq: workqueue to use 1131 * @dwork: work to queue 1132 * @delay: number of jiffies to wait before queueing 1133 * 1134 * Returns 0 if @work was already on a queue, non-zero otherwise. 1135 */ 1136 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1137 struct delayed_work *dwork, unsigned long delay) 1138 { 1139 int ret = 0; 1140 struct timer_list *timer = &dwork->timer; 1141 struct work_struct *work = &dwork->work; 1142 1143 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1144 unsigned int lcpu; 1145 1146 BUG_ON(timer_pending(timer)); 1147 BUG_ON(!list_empty(&work->entry)); 1148 1149 timer_stats_timer_set_start_info(&dwork->timer); 1150 1151 /* 1152 * This stores cwq for the moment, for the timer_fn. 1153 * Note that the work's gcwq is preserved to allow 1154 * reentrance detection for delayed works. 1155 */ 1156 if (!(wq->flags & WQ_UNBOUND)) { 1157 struct global_cwq *gcwq = get_work_gcwq(work); 1158 1159 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) 1160 lcpu = gcwq->cpu; 1161 else 1162 lcpu = raw_smp_processor_id(); 1163 } else 1164 lcpu = WORK_CPU_UNBOUND; 1165 1166 set_work_cwq(work, get_cwq(lcpu, wq), 0); 1167 1168 timer->expires = jiffies + delay; 1169 timer->data = (unsigned long)dwork; 1170 timer->function = delayed_work_timer_fn; 1171 1172 if (unlikely(cpu >= 0)) 1173 add_timer_on(timer, cpu); 1174 else 1175 add_timer(timer); 1176 ret = 1; 1177 } 1178 return ret; 1179 } 1180 EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1181 1182 /** 1183 * worker_enter_idle - enter idle state 1184 * @worker: worker which is entering idle state 1185 * 1186 * @worker is entering idle state. Update stats and idle timer if 1187 * necessary. 1188 * 1189 * LOCKING: 1190 * spin_lock_irq(gcwq->lock). 1191 */ 1192 static void worker_enter_idle(struct worker *worker) 1193 { 1194 struct global_cwq *gcwq = worker->gcwq; 1195 1196 BUG_ON(worker->flags & WORKER_IDLE); 1197 BUG_ON(!list_empty(&worker->entry) && 1198 (worker->hentry.next || worker->hentry.pprev)); 1199 1200 /* can't use worker_set_flags(), also called from start_worker() */ 1201 worker->flags |= WORKER_IDLE; 1202 gcwq->nr_idle++; 1203 worker->last_active = jiffies; 1204 1205 /* idle_list is LIFO */ 1206 list_add(&worker->entry, &gcwq->idle_list); 1207 1208 if (likely(!(worker->flags & WORKER_ROGUE))) { 1209 if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) 1210 mod_timer(&gcwq->idle_timer, 1211 jiffies + IDLE_WORKER_TIMEOUT); 1212 } else 1213 wake_up_all(&gcwq->trustee_wait); 1214 1215 /* sanity check nr_running */ 1216 WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle && 1217 atomic_read(get_gcwq_nr_running(gcwq->cpu))); 1218 } 1219 1220 /** 1221 * worker_leave_idle - leave idle state 1222 * @worker: worker which is leaving idle state 1223 * 1224 * @worker is leaving idle state. Update stats. 1225 * 1226 * LOCKING: 1227 * spin_lock_irq(gcwq->lock). 1228 */ 1229 static void worker_leave_idle(struct worker *worker) 1230 { 1231 struct global_cwq *gcwq = worker->gcwq; 1232 1233 BUG_ON(!(worker->flags & WORKER_IDLE)); 1234 worker_clr_flags(worker, WORKER_IDLE); 1235 gcwq->nr_idle--; 1236 list_del_init(&worker->entry); 1237 } 1238 1239 /** 1240 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq 1241 * @worker: self 1242 * 1243 * Works which are scheduled while the cpu is online must at least be 1244 * scheduled to a worker which is bound to the cpu so that if they are 1245 * flushed from cpu callbacks while cpu is going down, they are 1246 * guaranteed to execute on the cpu. 1247 * 1248 * This function is to be used by rogue workers and rescuers to bind 1249 * themselves to the target cpu and may race with cpu going down or 1250 * coming online. kthread_bind() can't be used because it may put the 1251 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used 1252 * verbatim as it's best effort and blocking and gcwq may be 1253 * [dis]associated in the meantime. 1254 * 1255 * This function tries set_cpus_allowed() and locks gcwq and verifies 1256 * the binding against GCWQ_DISASSOCIATED which is set during 1257 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters 1258 * idle state or fetches works without dropping lock, it can guarantee 1259 * the scheduling requirement described in the first paragraph. 1260 * 1261 * CONTEXT: 1262 * Might sleep. Called without any lock but returns with gcwq->lock 1263 * held. 1264 * 1265 * RETURNS: 1266 * %true if the associated gcwq is online (@worker is successfully 1267 * bound), %false if offline. 1268 */ 1269 static bool worker_maybe_bind_and_lock(struct worker *worker) 1270 __acquires(&gcwq->lock) 1271 { 1272 struct global_cwq *gcwq = worker->gcwq; 1273 struct task_struct *task = worker->task; 1274 1275 while (true) { 1276 /* 1277 * The following call may fail, succeed or succeed 1278 * without actually migrating the task to the cpu if 1279 * it races with cpu hotunplug operation. Verify 1280 * against GCWQ_DISASSOCIATED. 1281 */ 1282 if (!(gcwq->flags & GCWQ_DISASSOCIATED)) 1283 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); 1284 1285 spin_lock_irq(&gcwq->lock); 1286 if (gcwq->flags & GCWQ_DISASSOCIATED) 1287 return false; 1288 if (task_cpu(task) == gcwq->cpu && 1289 cpumask_equal(¤t->cpus_allowed, 1290 get_cpu_mask(gcwq->cpu))) 1291 return true; 1292 spin_unlock_irq(&gcwq->lock); 1293 1294 /* CPU has come up inbetween, retry migration */ 1295 cpu_relax(); 1296 } 1297 } 1298 1299 /* 1300 * Function for worker->rebind_work used to rebind rogue busy workers 1301 * to the associated cpu which is coming back online. This is 1302 * scheduled by cpu up but can race with other cpu hotplug operations 1303 * and may be executed twice without intervening cpu down. 1304 */ 1305 static void worker_rebind_fn(struct work_struct *work) 1306 { 1307 struct worker *worker = container_of(work, struct worker, rebind_work); 1308 struct global_cwq *gcwq = worker->gcwq; 1309 1310 if (worker_maybe_bind_and_lock(worker)) 1311 worker_clr_flags(worker, WORKER_REBIND); 1312 1313 spin_unlock_irq(&gcwq->lock); 1314 } 1315 1316 static struct worker *alloc_worker(void) 1317 { 1318 struct worker *worker; 1319 1320 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 1321 if (worker) { 1322 INIT_LIST_HEAD(&worker->entry); 1323 INIT_LIST_HEAD(&worker->scheduled); 1324 INIT_WORK(&worker->rebind_work, worker_rebind_fn); 1325 /* on creation a worker is in !idle && prep state */ 1326 worker->flags = WORKER_PREP; 1327 } 1328 return worker; 1329 } 1330 1331 /** 1332 * create_worker - create a new workqueue worker 1333 * @gcwq: gcwq the new worker will belong to 1334 * @bind: whether to set affinity to @cpu or not 1335 * 1336 * Create a new worker which is bound to @gcwq. The returned worker 1337 * can be started by calling start_worker() or destroyed using 1338 * destroy_worker(). 1339 * 1340 * CONTEXT: 1341 * Might sleep. Does GFP_KERNEL allocations. 1342 * 1343 * RETURNS: 1344 * Pointer to the newly created worker. 1345 */ 1346 static struct worker *create_worker(struct global_cwq *gcwq, bool bind) 1347 { 1348 bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; 1349 struct worker *worker = NULL; 1350 int id = -1; 1351 1352 spin_lock_irq(&gcwq->lock); 1353 while (ida_get_new(&gcwq->worker_ida, &id)) { 1354 spin_unlock_irq(&gcwq->lock); 1355 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) 1356 goto fail; 1357 spin_lock_irq(&gcwq->lock); 1358 } 1359 spin_unlock_irq(&gcwq->lock); 1360 1361 worker = alloc_worker(); 1362 if (!worker) 1363 goto fail; 1364 1365 worker->gcwq = gcwq; 1366 worker->id = id; 1367 1368 if (!on_unbound_cpu) 1369 worker->task = kthread_create(worker_thread, worker, 1370 "kworker/%u:%d", gcwq->cpu, id); 1371 else 1372 worker->task = kthread_create(worker_thread, worker, 1373 "kworker/u:%d", id); 1374 if (IS_ERR(worker->task)) 1375 goto fail; 1376 1377 /* 1378 * A rogue worker will become a regular one if CPU comes 1379 * online later on. Make sure every worker has 1380 * PF_THREAD_BOUND set. 1381 */ 1382 if (bind && !on_unbound_cpu) 1383 kthread_bind(worker->task, gcwq->cpu); 1384 else { 1385 worker->task->flags |= PF_THREAD_BOUND; 1386 if (on_unbound_cpu) 1387 worker->flags |= WORKER_UNBOUND; 1388 } 1389 1390 return worker; 1391 fail: 1392 if (id >= 0) { 1393 spin_lock_irq(&gcwq->lock); 1394 ida_remove(&gcwq->worker_ida, id); 1395 spin_unlock_irq(&gcwq->lock); 1396 } 1397 kfree(worker); 1398 return NULL; 1399 } 1400 1401 /** 1402 * start_worker - start a newly created worker 1403 * @worker: worker to start 1404 * 1405 * Make the gcwq aware of @worker and start it. 1406 * 1407 * CONTEXT: 1408 * spin_lock_irq(gcwq->lock). 1409 */ 1410 static void start_worker(struct worker *worker) 1411 { 1412 worker->flags |= WORKER_STARTED; 1413 worker->gcwq->nr_workers++; 1414 worker_enter_idle(worker); 1415 wake_up_process(worker->task); 1416 } 1417 1418 /** 1419 * destroy_worker - destroy a workqueue worker 1420 * @worker: worker to be destroyed 1421 * 1422 * Destroy @worker and adjust @gcwq stats accordingly. 1423 * 1424 * CONTEXT: 1425 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1426 */ 1427 static void destroy_worker(struct worker *worker) 1428 { 1429 struct global_cwq *gcwq = worker->gcwq; 1430 int id = worker->id; 1431 1432 /* sanity check frenzy */ 1433 BUG_ON(worker->current_work); 1434 BUG_ON(!list_empty(&worker->scheduled)); 1435 1436 if (worker->flags & WORKER_STARTED) 1437 gcwq->nr_workers--; 1438 if (worker->flags & WORKER_IDLE) 1439 gcwq->nr_idle--; 1440 1441 list_del_init(&worker->entry); 1442 worker->flags |= WORKER_DIE; 1443 1444 spin_unlock_irq(&gcwq->lock); 1445 1446 kthread_stop(worker->task); 1447 kfree(worker); 1448 1449 spin_lock_irq(&gcwq->lock); 1450 ida_remove(&gcwq->worker_ida, id); 1451 } 1452 1453 static void idle_worker_timeout(unsigned long __gcwq) 1454 { 1455 struct global_cwq *gcwq = (void *)__gcwq; 1456 1457 spin_lock_irq(&gcwq->lock); 1458 1459 if (too_many_workers(gcwq)) { 1460 struct worker *worker; 1461 unsigned long expires; 1462 1463 /* idle_list is kept in LIFO order, check the last one */ 1464 worker = list_entry(gcwq->idle_list.prev, struct worker, entry); 1465 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 1466 1467 if (time_before(jiffies, expires)) 1468 mod_timer(&gcwq->idle_timer, expires); 1469 else { 1470 /* it's been idle for too long, wake up manager */ 1471 gcwq->flags |= GCWQ_MANAGE_WORKERS; 1472 wake_up_worker(gcwq); 1473 } 1474 } 1475 1476 spin_unlock_irq(&gcwq->lock); 1477 } 1478 1479 static bool send_mayday(struct work_struct *work) 1480 { 1481 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1482 struct workqueue_struct *wq = cwq->wq; 1483 unsigned int cpu; 1484 1485 if (!(wq->flags & WQ_RESCUER)) 1486 return false; 1487 1488 /* mayday mayday mayday */ 1489 cpu = cwq->gcwq->cpu; 1490 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1491 if (cpu == WORK_CPU_UNBOUND) 1492 cpu = 0; 1493 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask)) 1494 wake_up_process(wq->rescuer->task); 1495 return true; 1496 } 1497 1498 static void gcwq_mayday_timeout(unsigned long __gcwq) 1499 { 1500 struct global_cwq *gcwq = (void *)__gcwq; 1501 struct work_struct *work; 1502 1503 spin_lock_irq(&gcwq->lock); 1504 1505 if (need_to_create_worker(gcwq)) { 1506 /* 1507 * We've been trying to create a new worker but 1508 * haven't been successful. We might be hitting an 1509 * allocation deadlock. Send distress signals to 1510 * rescuers. 1511 */ 1512 list_for_each_entry(work, &gcwq->worklist, entry) 1513 send_mayday(work); 1514 } 1515 1516 spin_unlock_irq(&gcwq->lock); 1517 1518 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL); 1519 } 1520 1521 /** 1522 * maybe_create_worker - create a new worker if necessary 1523 * @gcwq: gcwq to create a new worker for 1524 * 1525 * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to 1526 * have at least one idle worker on return from this function. If 1527 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 1528 * sent to all rescuers with works scheduled on @gcwq to resolve 1529 * possible allocation deadlock. 1530 * 1531 * On return, need_to_create_worker() is guaranteed to be false and 1532 * may_start_working() true. 1533 * 1534 * LOCKING: 1535 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1536 * multiple times. Does GFP_KERNEL allocations. Called only from 1537 * manager. 1538 * 1539 * RETURNS: 1540 * false if no action was taken and gcwq->lock stayed locked, true 1541 * otherwise. 1542 */ 1543 static bool maybe_create_worker(struct global_cwq *gcwq) 1544 __releases(&gcwq->lock) 1545 __acquires(&gcwq->lock) 1546 { 1547 if (!need_to_create_worker(gcwq)) 1548 return false; 1549 restart: 1550 spin_unlock_irq(&gcwq->lock); 1551 1552 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 1553 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1554 1555 while (true) { 1556 struct worker *worker; 1557 1558 worker = create_worker(gcwq, true); 1559 if (worker) { 1560 del_timer_sync(&gcwq->mayday_timer); 1561 spin_lock_irq(&gcwq->lock); 1562 start_worker(worker); 1563 BUG_ON(need_to_create_worker(gcwq)); 1564 return true; 1565 } 1566 1567 if (!need_to_create_worker(gcwq)) 1568 break; 1569 1570 __set_current_state(TASK_INTERRUPTIBLE); 1571 schedule_timeout(CREATE_COOLDOWN); 1572 1573 if (!need_to_create_worker(gcwq)) 1574 break; 1575 } 1576 1577 del_timer_sync(&gcwq->mayday_timer); 1578 spin_lock_irq(&gcwq->lock); 1579 if (need_to_create_worker(gcwq)) 1580 goto restart; 1581 return true; 1582 } 1583 1584 /** 1585 * maybe_destroy_worker - destroy workers which have been idle for a while 1586 * @gcwq: gcwq to destroy workers for 1587 * 1588 * Destroy @gcwq workers which have been idle for longer than 1589 * IDLE_WORKER_TIMEOUT. 1590 * 1591 * LOCKING: 1592 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1593 * multiple times. Called only from manager. 1594 * 1595 * RETURNS: 1596 * false if no action was taken and gcwq->lock stayed locked, true 1597 * otherwise. 1598 */ 1599 static bool maybe_destroy_workers(struct global_cwq *gcwq) 1600 { 1601 bool ret = false; 1602 1603 while (too_many_workers(gcwq)) { 1604 struct worker *worker; 1605 unsigned long expires; 1606 1607 worker = list_entry(gcwq->idle_list.prev, struct worker, entry); 1608 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 1609 1610 if (time_before(jiffies, expires)) { 1611 mod_timer(&gcwq->idle_timer, expires); 1612 break; 1613 } 1614 1615 destroy_worker(worker); 1616 ret = true; 1617 } 1618 1619 return ret; 1620 } 1621 1622 /** 1623 * manage_workers - manage worker pool 1624 * @worker: self 1625 * 1626 * Assume the manager role and manage gcwq worker pool @worker belongs 1627 * to. At any given time, there can be only zero or one manager per 1628 * gcwq. The exclusion is handled automatically by this function. 1629 * 1630 * The caller can safely start processing works on false return. On 1631 * true return, it's guaranteed that need_to_create_worker() is false 1632 * and may_start_working() is true. 1633 * 1634 * CONTEXT: 1635 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1636 * multiple times. Does GFP_KERNEL allocations. 1637 * 1638 * RETURNS: 1639 * false if no action was taken and gcwq->lock stayed locked, true if 1640 * some action was taken. 1641 */ 1642 static bool manage_workers(struct worker *worker) 1643 { 1644 struct global_cwq *gcwq = worker->gcwq; 1645 bool ret = false; 1646 1647 if (gcwq->flags & GCWQ_MANAGING_WORKERS) 1648 return ret; 1649 1650 gcwq->flags &= ~GCWQ_MANAGE_WORKERS; 1651 gcwq->flags |= GCWQ_MANAGING_WORKERS; 1652 1653 /* 1654 * Destroy and then create so that may_start_working() is true 1655 * on return. 1656 */ 1657 ret |= maybe_destroy_workers(gcwq); 1658 ret |= maybe_create_worker(gcwq); 1659 1660 gcwq->flags &= ~GCWQ_MANAGING_WORKERS; 1661 1662 /* 1663 * The trustee might be waiting to take over the manager 1664 * position, tell it we're done. 1665 */ 1666 if (unlikely(gcwq->trustee)) 1667 wake_up_all(&gcwq->trustee_wait); 1668 1669 return ret; 1670 } 1671 1672 /** 1673 * move_linked_works - move linked works to a list 1674 * @work: start of series of works to be scheduled 1675 * @head: target list to append @work to 1676 * @nextp: out paramter for nested worklist walking 1677 * 1678 * Schedule linked works starting from @work to @head. Work series to 1679 * be scheduled starts at @work and includes any consecutive work with 1680 * WORK_STRUCT_LINKED set in its predecessor. 1681 * 1682 * If @nextp is not NULL, it's updated to point to the next work of 1683 * the last scheduled work. This allows move_linked_works() to be 1684 * nested inside outer list_for_each_entry_safe(). 1685 * 1686 * CONTEXT: 1687 * spin_lock_irq(gcwq->lock). 1688 */ 1689 static void move_linked_works(struct work_struct *work, struct list_head *head, 1690 struct work_struct **nextp) 1691 { 1692 struct work_struct *n; 1693 1694 /* 1695 * Linked worklist will always end before the end of the list, 1696 * use NULL for list head. 1697 */ 1698 list_for_each_entry_safe_from(work, n, NULL, entry) { 1699 list_move_tail(&work->entry, head); 1700 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1701 break; 1702 } 1703 1704 /* 1705 * If we're already inside safe list traversal and have moved 1706 * multiple works to the scheduled queue, the next position 1707 * needs to be updated. 1708 */ 1709 if (nextp) 1710 *nextp = n; 1711 } 1712 1713 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 1714 { 1715 struct work_struct *work = list_first_entry(&cwq->delayed_works, 1716 struct work_struct, entry); 1717 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1718 1719 trace_workqueue_activate_work(work); 1720 move_linked_works(work, pos, NULL); 1721 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 1722 cwq->nr_active++; 1723 } 1724 1725 /** 1726 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1727 * @cwq: cwq of interest 1728 * @color: color of work which left the queue 1729 * @delayed: for a delayed work 1730 * 1731 * A work either has completed or is removed from pending queue, 1732 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1733 * 1734 * CONTEXT: 1735 * spin_lock_irq(gcwq->lock). 1736 */ 1737 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, 1738 bool delayed) 1739 { 1740 /* ignore uncolored works */ 1741 if (color == WORK_NO_COLOR) 1742 return; 1743 1744 cwq->nr_in_flight[color]--; 1745 1746 if (!delayed) { 1747 cwq->nr_active--; 1748 if (!list_empty(&cwq->delayed_works)) { 1749 /* one down, submit a delayed one */ 1750 if (cwq->nr_active < cwq->max_active) 1751 cwq_activate_first_delayed(cwq); 1752 } 1753 } 1754 1755 /* is flush in progress and are we at the flushing tip? */ 1756 if (likely(cwq->flush_color != color)) 1757 return; 1758 1759 /* are there still in-flight works? */ 1760 if (cwq->nr_in_flight[color]) 1761 return; 1762 1763 /* this cwq is done, clear flush_color */ 1764 cwq->flush_color = -1; 1765 1766 /* 1767 * If this was the last cwq, wake up the first flusher. It 1768 * will handle the rest. 1769 */ 1770 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 1771 complete(&cwq->wq->first_flusher->done); 1772 } 1773 1774 /** 1775 * process_one_work - process single work 1776 * @worker: self 1777 * @work: work to process 1778 * 1779 * Process @work. This function contains all the logics necessary to 1780 * process a single work including synchronization against and 1781 * interaction with other workers on the same cpu, queueing and 1782 * flushing. As long as context requirement is met, any worker can 1783 * call this function to process a work. 1784 * 1785 * CONTEXT: 1786 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1787 */ 1788 static void process_one_work(struct worker *worker, struct work_struct *work) 1789 __releases(&gcwq->lock) 1790 __acquires(&gcwq->lock) 1791 { 1792 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1793 struct global_cwq *gcwq = cwq->gcwq; 1794 struct hlist_head *bwh = busy_worker_head(gcwq, work); 1795 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; 1796 work_func_t f = work->func; 1797 int work_color; 1798 struct worker *collision; 1799 #ifdef CONFIG_LOCKDEP 1800 /* 1801 * It is permissible to free the struct work_struct from 1802 * inside the function that is called from it, this we need to 1803 * take into account for lockdep too. To avoid bogus "held 1804 * lock freed" warnings as well as problems when looking into 1805 * work->lockdep_map, make a copy and use that here. 1806 */ 1807 struct lockdep_map lockdep_map = work->lockdep_map; 1808 #endif 1809 /* 1810 * A single work shouldn't be executed concurrently by 1811 * multiple workers on a single cpu. Check whether anyone is 1812 * already processing the work. If so, defer the work to the 1813 * currently executing one. 1814 */ 1815 collision = __find_worker_executing_work(gcwq, bwh, work); 1816 if (unlikely(collision)) { 1817 move_linked_works(work, &collision->scheduled, NULL); 1818 return; 1819 } 1820 1821 /* claim and process */ 1822 debug_work_deactivate(work); 1823 hlist_add_head(&worker->hentry, bwh); 1824 worker->current_work = work; 1825 worker->current_cwq = cwq; 1826 work_color = get_work_color(work); 1827 1828 /* record the current cpu number in the work data and dequeue */ 1829 set_work_cpu(work, gcwq->cpu); 1830 list_del_init(&work->entry); 1831 1832 /* 1833 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI, 1834 * wake up another worker; otherwise, clear HIGHPRI_PENDING. 1835 */ 1836 if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { 1837 struct work_struct *nwork = list_first_entry(&gcwq->worklist, 1838 struct work_struct, entry); 1839 1840 if (!list_empty(&gcwq->worklist) && 1841 get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) 1842 wake_up_worker(gcwq); 1843 else 1844 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; 1845 } 1846 1847 /* 1848 * CPU intensive works don't participate in concurrency 1849 * management. They're the scheduler's responsibility. 1850 */ 1851 if (unlikely(cpu_intensive)) 1852 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); 1853 1854 spin_unlock_irq(&gcwq->lock); 1855 1856 work_clear_pending(work); 1857 lock_map_acquire_read(&cwq->wq->lockdep_map); 1858 lock_map_acquire(&lockdep_map); 1859 trace_workqueue_execute_start(work); 1860 f(work); 1861 /* 1862 * While we must be careful to not use "work" after this, the trace 1863 * point will only record its address. 1864 */ 1865 trace_workqueue_execute_end(work); 1866 lock_map_release(&lockdep_map); 1867 lock_map_release(&cwq->wq->lockdep_map); 1868 1869 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 1870 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 1871 "%s/0x%08x/%d\n", 1872 current->comm, preempt_count(), task_pid_nr(current)); 1873 printk(KERN_ERR " last function: "); 1874 print_symbol("%s\n", (unsigned long)f); 1875 debug_show_held_locks(current); 1876 dump_stack(); 1877 } 1878 1879 spin_lock_irq(&gcwq->lock); 1880 1881 /* clear cpu intensive status */ 1882 if (unlikely(cpu_intensive)) 1883 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 1884 1885 /* we're done with it, release */ 1886 hlist_del_init(&worker->hentry); 1887 worker->current_work = NULL; 1888 worker->current_cwq = NULL; 1889 cwq_dec_nr_in_flight(cwq, work_color, false); 1890 } 1891 1892 /** 1893 * process_scheduled_works - process scheduled works 1894 * @worker: self 1895 * 1896 * Process all scheduled works. Please note that the scheduled list 1897 * may change while processing a work, so this function repeatedly 1898 * fetches a work from the top and executes it. 1899 * 1900 * CONTEXT: 1901 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1902 * multiple times. 1903 */ 1904 static void process_scheduled_works(struct worker *worker) 1905 { 1906 while (!list_empty(&worker->scheduled)) { 1907 struct work_struct *work = list_first_entry(&worker->scheduled, 1908 struct work_struct, entry); 1909 process_one_work(worker, work); 1910 } 1911 } 1912 1913 /** 1914 * worker_thread - the worker thread function 1915 * @__worker: self 1916 * 1917 * The gcwq worker thread function. There's a single dynamic pool of 1918 * these per each cpu. These workers process all works regardless of 1919 * their specific target workqueue. The only exception is works which 1920 * belong to workqueues with a rescuer which will be explained in 1921 * rescuer_thread(). 1922 */ 1923 static int worker_thread(void *__worker) 1924 { 1925 struct worker *worker = __worker; 1926 struct global_cwq *gcwq = worker->gcwq; 1927 1928 /* tell the scheduler that this is a workqueue worker */ 1929 worker->task->flags |= PF_WQ_WORKER; 1930 woke_up: 1931 spin_lock_irq(&gcwq->lock); 1932 1933 /* DIE can be set only while we're idle, checking here is enough */ 1934 if (worker->flags & WORKER_DIE) { 1935 spin_unlock_irq(&gcwq->lock); 1936 worker->task->flags &= ~PF_WQ_WORKER; 1937 return 0; 1938 } 1939 1940 worker_leave_idle(worker); 1941 recheck: 1942 /* no more worker necessary? */ 1943 if (!need_more_worker(gcwq)) 1944 goto sleep; 1945 1946 /* do we need to manage? */ 1947 if (unlikely(!may_start_working(gcwq)) && manage_workers(worker)) 1948 goto recheck; 1949 1950 /* 1951 * ->scheduled list can only be filled while a worker is 1952 * preparing to process a work or actually processing it. 1953 * Make sure nobody diddled with it while I was sleeping. 1954 */ 1955 BUG_ON(!list_empty(&worker->scheduled)); 1956 1957 /* 1958 * When control reaches this point, we're guaranteed to have 1959 * at least one idle worker or that someone else has already 1960 * assumed the manager role. 1961 */ 1962 worker_clr_flags(worker, WORKER_PREP); 1963 1964 do { 1965 struct work_struct *work = 1966 list_first_entry(&gcwq->worklist, 1967 struct work_struct, entry); 1968 1969 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { 1970 /* optimization path, not strictly necessary */ 1971 process_one_work(worker, work); 1972 if (unlikely(!list_empty(&worker->scheduled))) 1973 process_scheduled_works(worker); 1974 } else { 1975 move_linked_works(work, &worker->scheduled, NULL); 1976 process_scheduled_works(worker); 1977 } 1978 } while (keep_working(gcwq)); 1979 1980 worker_set_flags(worker, WORKER_PREP, false); 1981 sleep: 1982 if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker)) 1983 goto recheck; 1984 1985 /* 1986 * gcwq->lock is held and there's no work to process and no 1987 * need to manage, sleep. Workers are woken up only while 1988 * holding gcwq->lock or from local cpu, so setting the 1989 * current state before releasing gcwq->lock is enough to 1990 * prevent losing any event. 1991 */ 1992 worker_enter_idle(worker); 1993 __set_current_state(TASK_INTERRUPTIBLE); 1994 spin_unlock_irq(&gcwq->lock); 1995 schedule(); 1996 goto woke_up; 1997 } 1998 1999 /** 2000 * rescuer_thread - the rescuer thread function 2001 * @__wq: the associated workqueue 2002 * 2003 * Workqueue rescuer thread function. There's one rescuer for each 2004 * workqueue which has WQ_RESCUER set. 2005 * 2006 * Regular work processing on a gcwq may block trying to create a new 2007 * worker which uses GFP_KERNEL allocation which has slight chance of 2008 * developing into deadlock if some works currently on the same queue 2009 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2010 * the problem rescuer solves. 2011 * 2012 * When such condition is possible, the gcwq summons rescuers of all 2013 * workqueues which have works queued on the gcwq and let them process 2014 * those works so that forward progress can be guaranteed. 2015 * 2016 * This should happen rarely. 2017 */ 2018 static int rescuer_thread(void *__wq) 2019 { 2020 struct workqueue_struct *wq = __wq; 2021 struct worker *rescuer = wq->rescuer; 2022 struct list_head *scheduled = &rescuer->scheduled; 2023 bool is_unbound = wq->flags & WQ_UNBOUND; 2024 unsigned int cpu; 2025 2026 set_user_nice(current, RESCUER_NICE_LEVEL); 2027 repeat: 2028 set_current_state(TASK_INTERRUPTIBLE); 2029 2030 if (kthread_should_stop()) 2031 return 0; 2032 2033 /* 2034 * See whether any cpu is asking for help. Unbounded 2035 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. 2036 */ 2037 for_each_mayday_cpu(cpu, wq->mayday_mask) { 2038 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; 2039 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); 2040 struct global_cwq *gcwq = cwq->gcwq; 2041 struct work_struct *work, *n; 2042 2043 __set_current_state(TASK_RUNNING); 2044 mayday_clear_cpu(cpu, wq->mayday_mask); 2045 2046 /* migrate to the target cpu if possible */ 2047 rescuer->gcwq = gcwq; 2048 worker_maybe_bind_and_lock(rescuer); 2049 2050 /* 2051 * Slurp in all works issued via this workqueue and 2052 * process'em. 2053 */ 2054 BUG_ON(!list_empty(&rescuer->scheduled)); 2055 list_for_each_entry_safe(work, n, &gcwq->worklist, entry) 2056 if (get_work_cwq(work) == cwq) 2057 move_linked_works(work, scheduled, &n); 2058 2059 process_scheduled_works(rescuer); 2060 2061 /* 2062 * Leave this gcwq. If keep_working() is %true, notify a 2063 * regular worker; otherwise, we end up with 0 concurrency 2064 * and stalling the execution. 2065 */ 2066 if (keep_working(gcwq)) 2067 wake_up_worker(gcwq); 2068 2069 spin_unlock_irq(&gcwq->lock); 2070 } 2071 2072 schedule(); 2073 goto repeat; 2074 } 2075 2076 struct wq_barrier { 2077 struct work_struct work; 2078 struct completion done; 2079 }; 2080 2081 static void wq_barrier_func(struct work_struct *work) 2082 { 2083 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2084 complete(&barr->done); 2085 } 2086 2087 /** 2088 * insert_wq_barrier - insert a barrier work 2089 * @cwq: cwq to insert barrier into 2090 * @barr: wq_barrier to insert 2091 * @target: target work to attach @barr to 2092 * @worker: worker currently executing @target, NULL if @target is not executing 2093 * 2094 * @barr is linked to @target such that @barr is completed only after 2095 * @target finishes execution. Please note that the ordering 2096 * guarantee is observed only with respect to @target and on the local 2097 * cpu. 2098 * 2099 * Currently, a queued barrier can't be canceled. This is because 2100 * try_to_grab_pending() can't determine whether the work to be 2101 * grabbed is at the head of the queue and thus can't clear LINKED 2102 * flag of the previous work while there must be a valid next work 2103 * after a work with LINKED flag set. 2104 * 2105 * Note that when @worker is non-NULL, @target may be modified 2106 * underneath us, so we can't reliably determine cwq from @target. 2107 * 2108 * CONTEXT: 2109 * spin_lock_irq(gcwq->lock). 2110 */ 2111 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 2112 struct wq_barrier *barr, 2113 struct work_struct *target, struct worker *worker) 2114 { 2115 struct list_head *head; 2116 unsigned int linked = 0; 2117 2118 /* 2119 * debugobject calls are safe here even with gcwq->lock locked 2120 * as we know for sure that this will not trigger any of the 2121 * checks and call back into the fixup functions where we 2122 * might deadlock. 2123 */ 2124 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 2125 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2126 init_completion(&barr->done); 2127 2128 /* 2129 * If @target is currently being executed, schedule the 2130 * barrier to the worker; otherwise, put it after @target. 2131 */ 2132 if (worker) 2133 head = worker->scheduled.next; 2134 else { 2135 unsigned long *bits = work_data_bits(target); 2136 2137 head = target->entry.next; 2138 /* there can already be other linked works, inherit and set */ 2139 linked = *bits & WORK_STRUCT_LINKED; 2140 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 2141 } 2142 2143 debug_work_activate(&barr->work); 2144 insert_work(cwq, &barr->work, head, 2145 work_color_to_flags(WORK_NO_COLOR) | linked); 2146 } 2147 2148 /** 2149 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing 2150 * @wq: workqueue being flushed 2151 * @flush_color: new flush color, < 0 for no-op 2152 * @work_color: new work color, < 0 for no-op 2153 * 2154 * Prepare cwqs for workqueue flushing. 2155 * 2156 * If @flush_color is non-negative, flush_color on all cwqs should be 2157 * -1. If no cwq has in-flight commands at the specified color, all 2158 * cwq->flush_color's stay at -1 and %false is returned. If any cwq 2159 * has in flight commands, its cwq->flush_color is set to 2160 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq 2161 * wakeup logic is armed and %true is returned. 2162 * 2163 * The caller should have initialized @wq->first_flusher prior to 2164 * calling this function with non-negative @flush_color. If 2165 * @flush_color is negative, no flush color update is done and %false 2166 * is returned. 2167 * 2168 * If @work_color is non-negative, all cwqs should have the same 2169 * work_color which is previous to @work_color and all will be 2170 * advanced to @work_color. 2171 * 2172 * CONTEXT: 2173 * mutex_lock(wq->flush_mutex). 2174 * 2175 * RETURNS: 2176 * %true if @flush_color >= 0 and there's something to flush. %false 2177 * otherwise. 2178 */ 2179 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, 2180 int flush_color, int work_color) 2181 { 2182 bool wait = false; 2183 unsigned int cpu; 2184 2185 if (flush_color >= 0) { 2186 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); 2187 atomic_set(&wq->nr_cwqs_to_flush, 1); 2188 } 2189 2190 for_each_cwq_cpu(cpu, wq) { 2191 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2192 struct global_cwq *gcwq = cwq->gcwq; 2193 2194 spin_lock_irq(&gcwq->lock); 2195 2196 if (flush_color >= 0) { 2197 BUG_ON(cwq->flush_color != -1); 2198 2199 if (cwq->nr_in_flight[flush_color]) { 2200 cwq->flush_color = flush_color; 2201 atomic_inc(&wq->nr_cwqs_to_flush); 2202 wait = true; 2203 } 2204 } 2205 2206 if (work_color >= 0) { 2207 BUG_ON(work_color != work_next_color(cwq->work_color)); 2208 cwq->work_color = work_color; 2209 } 2210 2211 spin_unlock_irq(&gcwq->lock); 2212 } 2213 2214 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) 2215 complete(&wq->first_flusher->done); 2216 2217 return wait; 2218 } 2219 2220 /** 2221 * flush_workqueue - ensure that any scheduled work has run to completion. 2222 * @wq: workqueue to flush 2223 * 2224 * Forces execution of the workqueue and blocks until its completion. 2225 * This is typically used in driver shutdown handlers. 2226 * 2227 * We sleep until all works which were queued on entry have been handled, 2228 * but we are not livelocked by new incoming ones. 2229 */ 2230 void flush_workqueue(struct workqueue_struct *wq) 2231 { 2232 struct wq_flusher this_flusher = { 2233 .list = LIST_HEAD_INIT(this_flusher.list), 2234 .flush_color = -1, 2235 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 2236 }; 2237 int next_color; 2238 2239 lock_map_acquire(&wq->lockdep_map); 2240 lock_map_release(&wq->lockdep_map); 2241 2242 mutex_lock(&wq->flush_mutex); 2243 2244 /* 2245 * Start-to-wait phase 2246 */ 2247 next_color = work_next_color(wq->work_color); 2248 2249 if (next_color != wq->flush_color) { 2250 /* 2251 * Color space is not full. The current work_color 2252 * becomes our flush_color and work_color is advanced 2253 * by one. 2254 */ 2255 BUG_ON(!list_empty(&wq->flusher_overflow)); 2256 this_flusher.flush_color = wq->work_color; 2257 wq->work_color = next_color; 2258 2259 if (!wq->first_flusher) { 2260 /* no flush in progress, become the first flusher */ 2261 BUG_ON(wq->flush_color != this_flusher.flush_color); 2262 2263 wq->first_flusher = &this_flusher; 2264 2265 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, 2266 wq->work_color)) { 2267 /* nothing to flush, done */ 2268 wq->flush_color = next_color; 2269 wq->first_flusher = NULL; 2270 goto out_unlock; 2271 } 2272 } else { 2273 /* wait in queue */ 2274 BUG_ON(wq->flush_color == this_flusher.flush_color); 2275 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2276 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2277 } 2278 } else { 2279 /* 2280 * Oops, color space is full, wait on overflow queue. 2281 * The next flush completion will assign us 2282 * flush_color and transfer to flusher_queue. 2283 */ 2284 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 2285 } 2286 2287 mutex_unlock(&wq->flush_mutex); 2288 2289 wait_for_completion(&this_flusher.done); 2290 2291 /* 2292 * Wake-up-and-cascade phase 2293 * 2294 * First flushers are responsible for cascading flushes and 2295 * handling overflow. Non-first flushers can simply return. 2296 */ 2297 if (wq->first_flusher != &this_flusher) 2298 return; 2299 2300 mutex_lock(&wq->flush_mutex); 2301 2302 /* we might have raced, check again with mutex held */ 2303 if (wq->first_flusher != &this_flusher) 2304 goto out_unlock; 2305 2306 wq->first_flusher = NULL; 2307 2308 BUG_ON(!list_empty(&this_flusher.list)); 2309 BUG_ON(wq->flush_color != this_flusher.flush_color); 2310 2311 while (true) { 2312 struct wq_flusher *next, *tmp; 2313 2314 /* complete all the flushers sharing the current flush color */ 2315 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 2316 if (next->flush_color != wq->flush_color) 2317 break; 2318 list_del_init(&next->list); 2319 complete(&next->done); 2320 } 2321 2322 BUG_ON(!list_empty(&wq->flusher_overflow) && 2323 wq->flush_color != work_next_color(wq->work_color)); 2324 2325 /* this flush_color is finished, advance by one */ 2326 wq->flush_color = work_next_color(wq->flush_color); 2327 2328 /* one color has been freed, handle overflow queue */ 2329 if (!list_empty(&wq->flusher_overflow)) { 2330 /* 2331 * Assign the same color to all overflowed 2332 * flushers, advance work_color and append to 2333 * flusher_queue. This is the start-to-wait 2334 * phase for these overflowed flushers. 2335 */ 2336 list_for_each_entry(tmp, &wq->flusher_overflow, list) 2337 tmp->flush_color = wq->work_color; 2338 2339 wq->work_color = work_next_color(wq->work_color); 2340 2341 list_splice_tail_init(&wq->flusher_overflow, 2342 &wq->flusher_queue); 2343 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2344 } 2345 2346 if (list_empty(&wq->flusher_queue)) { 2347 BUG_ON(wq->flush_color != wq->work_color); 2348 break; 2349 } 2350 2351 /* 2352 * Need to flush more colors. Make the next flusher 2353 * the new first flusher and arm cwqs. 2354 */ 2355 BUG_ON(wq->flush_color == wq->work_color); 2356 BUG_ON(wq->flush_color != next->flush_color); 2357 2358 list_del_init(&next->list); 2359 wq->first_flusher = next; 2360 2361 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) 2362 break; 2363 2364 /* 2365 * Meh... this color is already done, clear first 2366 * flusher and repeat cascading. 2367 */ 2368 wq->first_flusher = NULL; 2369 } 2370 2371 out_unlock: 2372 mutex_unlock(&wq->flush_mutex); 2373 } 2374 EXPORT_SYMBOL_GPL(flush_workqueue); 2375 2376 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 2377 bool wait_executing) 2378 { 2379 struct worker *worker = NULL; 2380 struct global_cwq *gcwq; 2381 struct cpu_workqueue_struct *cwq; 2382 2383 might_sleep(); 2384 gcwq = get_work_gcwq(work); 2385 if (!gcwq) 2386 return false; 2387 2388 spin_lock_irq(&gcwq->lock); 2389 if (!list_empty(&work->entry)) { 2390 /* 2391 * See the comment near try_to_grab_pending()->smp_rmb(). 2392 * If it was re-queued to a different gcwq under us, we 2393 * are not going to wait. 2394 */ 2395 smp_rmb(); 2396 cwq = get_work_cwq(work); 2397 if (unlikely(!cwq || gcwq != cwq->gcwq)) 2398 goto already_gone; 2399 } else if (wait_executing) { 2400 worker = find_worker_executing_work(gcwq, work); 2401 if (!worker) 2402 goto already_gone; 2403 cwq = worker->current_cwq; 2404 } else 2405 goto already_gone; 2406 2407 insert_wq_barrier(cwq, barr, work, worker); 2408 spin_unlock_irq(&gcwq->lock); 2409 2410 /* 2411 * If @max_active is 1 or rescuer is in use, flushing another work 2412 * item on the same workqueue may lead to deadlock. Make sure the 2413 * flusher is not running on the same workqueue by verifying write 2414 * access. 2415 */ 2416 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) 2417 lock_map_acquire(&cwq->wq->lockdep_map); 2418 else 2419 lock_map_acquire_read(&cwq->wq->lockdep_map); 2420 lock_map_release(&cwq->wq->lockdep_map); 2421 2422 return true; 2423 already_gone: 2424 spin_unlock_irq(&gcwq->lock); 2425 return false; 2426 } 2427 2428 /** 2429 * flush_work - wait for a work to finish executing the last queueing instance 2430 * @work: the work to flush 2431 * 2432 * Wait until @work has finished execution. This function considers 2433 * only the last queueing instance of @work. If @work has been 2434 * enqueued across different CPUs on a non-reentrant workqueue or on 2435 * multiple workqueues, @work might still be executing on return on 2436 * some of the CPUs from earlier queueing. 2437 * 2438 * If @work was queued only on a non-reentrant, ordered or unbound 2439 * workqueue, @work is guaranteed to be idle on return if it hasn't 2440 * been requeued since flush started. 2441 * 2442 * RETURNS: 2443 * %true if flush_work() waited for the work to finish execution, 2444 * %false if it was already idle. 2445 */ 2446 bool flush_work(struct work_struct *work) 2447 { 2448 struct wq_barrier barr; 2449 2450 if (start_flush_work(work, &barr, true)) { 2451 wait_for_completion(&barr.done); 2452 destroy_work_on_stack(&barr.work); 2453 return true; 2454 } else 2455 return false; 2456 } 2457 EXPORT_SYMBOL_GPL(flush_work); 2458 2459 static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) 2460 { 2461 struct wq_barrier barr; 2462 struct worker *worker; 2463 2464 spin_lock_irq(&gcwq->lock); 2465 2466 worker = find_worker_executing_work(gcwq, work); 2467 if (unlikely(worker)) 2468 insert_wq_barrier(worker->current_cwq, &barr, work, worker); 2469 2470 spin_unlock_irq(&gcwq->lock); 2471 2472 if (unlikely(worker)) { 2473 wait_for_completion(&barr.done); 2474 destroy_work_on_stack(&barr.work); 2475 return true; 2476 } else 2477 return false; 2478 } 2479 2480 static bool wait_on_work(struct work_struct *work) 2481 { 2482 bool ret = false; 2483 int cpu; 2484 2485 might_sleep(); 2486 2487 lock_map_acquire(&work->lockdep_map); 2488 lock_map_release(&work->lockdep_map); 2489 2490 for_each_gcwq_cpu(cpu) 2491 ret |= wait_on_cpu_work(get_gcwq(cpu), work); 2492 return ret; 2493 } 2494 2495 /** 2496 * flush_work_sync - wait until a work has finished execution 2497 * @work: the work to flush 2498 * 2499 * Wait until @work has finished execution. On return, it's 2500 * guaranteed that all queueing instances of @work which happened 2501 * before this function is called are finished. In other words, if 2502 * @work hasn't been requeued since this function was called, @work is 2503 * guaranteed to be idle on return. 2504 * 2505 * RETURNS: 2506 * %true if flush_work_sync() waited for the work to finish execution, 2507 * %false if it was already idle. 2508 */ 2509 bool flush_work_sync(struct work_struct *work) 2510 { 2511 struct wq_barrier barr; 2512 bool pending, waited; 2513 2514 /* we'll wait for executions separately, queue barr only if pending */ 2515 pending = start_flush_work(work, &barr, false); 2516 2517 /* wait for executions to finish */ 2518 waited = wait_on_work(work); 2519 2520 /* wait for the pending one */ 2521 if (pending) { 2522 wait_for_completion(&barr.done); 2523 destroy_work_on_stack(&barr.work); 2524 } 2525 2526 return pending || waited; 2527 } 2528 EXPORT_SYMBOL_GPL(flush_work_sync); 2529 2530 /* 2531 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 2532 * so this work can't be re-armed in any way. 2533 */ 2534 static int try_to_grab_pending(struct work_struct *work) 2535 { 2536 struct global_cwq *gcwq; 2537 int ret = -1; 2538 2539 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 2540 return 0; 2541 2542 /* 2543 * The queueing is in progress, or it is already queued. Try to 2544 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 2545 */ 2546 gcwq = get_work_gcwq(work); 2547 if (!gcwq) 2548 return ret; 2549 2550 spin_lock_irq(&gcwq->lock); 2551 if (!list_empty(&work->entry)) { 2552 /* 2553 * This work is queued, but perhaps we locked the wrong gcwq. 2554 * In that case we must see the new value after rmb(), see 2555 * insert_work()->wmb(). 2556 */ 2557 smp_rmb(); 2558 if (gcwq == get_work_gcwq(work)) { 2559 debug_work_deactivate(work); 2560 list_del_init(&work->entry); 2561 cwq_dec_nr_in_flight(get_work_cwq(work), 2562 get_work_color(work), 2563 *work_data_bits(work) & WORK_STRUCT_DELAYED); 2564 ret = 1; 2565 } 2566 } 2567 spin_unlock_irq(&gcwq->lock); 2568 2569 return ret; 2570 } 2571 2572 static bool __cancel_work_timer(struct work_struct *work, 2573 struct timer_list* timer) 2574 { 2575 int ret; 2576 2577 do { 2578 ret = (timer && likely(del_timer(timer))); 2579 if (!ret) 2580 ret = try_to_grab_pending(work); 2581 wait_on_work(work); 2582 } while (unlikely(ret < 0)); 2583 2584 clear_work_data(work); 2585 return ret; 2586 } 2587 2588 /** 2589 * cancel_work_sync - cancel a work and wait for it to finish 2590 * @work: the work to cancel 2591 * 2592 * Cancel @work and wait for its execution to finish. This function 2593 * can be used even if the work re-queues itself or migrates to 2594 * another workqueue. On return from this function, @work is 2595 * guaranteed to be not pending or executing on any CPU. 2596 * 2597 * cancel_work_sync(&delayed_work->work) must not be used for 2598 * delayed_work's. Use cancel_delayed_work_sync() instead. 2599 * 2600 * The caller must ensure that the workqueue on which @work was last 2601 * queued can't be destroyed before this function returns. 2602 * 2603 * RETURNS: 2604 * %true if @work was pending, %false otherwise. 2605 */ 2606 bool cancel_work_sync(struct work_struct *work) 2607 { 2608 return __cancel_work_timer(work, NULL); 2609 } 2610 EXPORT_SYMBOL_GPL(cancel_work_sync); 2611 2612 /** 2613 * flush_delayed_work - wait for a dwork to finish executing the last queueing 2614 * @dwork: the delayed work to flush 2615 * 2616 * Delayed timer is cancelled and the pending work is queued for 2617 * immediate execution. Like flush_work(), this function only 2618 * considers the last queueing instance of @dwork. 2619 * 2620 * RETURNS: 2621 * %true if flush_work() waited for the work to finish execution, 2622 * %false if it was already idle. 2623 */ 2624 bool flush_delayed_work(struct delayed_work *dwork) 2625 { 2626 if (del_timer_sync(&dwork->timer)) 2627 __queue_work(raw_smp_processor_id(), 2628 get_work_cwq(&dwork->work)->wq, &dwork->work); 2629 return flush_work(&dwork->work); 2630 } 2631 EXPORT_SYMBOL(flush_delayed_work); 2632 2633 /** 2634 * flush_delayed_work_sync - wait for a dwork to finish 2635 * @dwork: the delayed work to flush 2636 * 2637 * Delayed timer is cancelled and the pending work is queued for 2638 * execution immediately. Other than timer handling, its behavior 2639 * is identical to flush_work_sync(). 2640 * 2641 * RETURNS: 2642 * %true if flush_work_sync() waited for the work to finish execution, 2643 * %false if it was already idle. 2644 */ 2645 bool flush_delayed_work_sync(struct delayed_work *dwork) 2646 { 2647 if (del_timer_sync(&dwork->timer)) 2648 __queue_work(raw_smp_processor_id(), 2649 get_work_cwq(&dwork->work)->wq, &dwork->work); 2650 return flush_work_sync(&dwork->work); 2651 } 2652 EXPORT_SYMBOL(flush_delayed_work_sync); 2653 2654 /** 2655 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2656 * @dwork: the delayed work cancel 2657 * 2658 * This is cancel_work_sync() for delayed works. 2659 * 2660 * RETURNS: 2661 * %true if @dwork was pending, %false otherwise. 2662 */ 2663 bool cancel_delayed_work_sync(struct delayed_work *dwork) 2664 { 2665 return __cancel_work_timer(&dwork->work, &dwork->timer); 2666 } 2667 EXPORT_SYMBOL(cancel_delayed_work_sync); 2668 2669 /** 2670 * schedule_work - put work task in global workqueue 2671 * @work: job to be done 2672 * 2673 * Returns zero if @work was already on the kernel-global workqueue and 2674 * non-zero otherwise. 2675 * 2676 * This puts a job in the kernel-global workqueue if it was not already 2677 * queued and leaves it in the same position on the kernel-global 2678 * workqueue otherwise. 2679 */ 2680 int schedule_work(struct work_struct *work) 2681 { 2682 return queue_work(system_wq, work); 2683 } 2684 EXPORT_SYMBOL(schedule_work); 2685 2686 /* 2687 * schedule_work_on - put work task on a specific cpu 2688 * @cpu: cpu to put the work task on 2689 * @work: job to be done 2690 * 2691 * This puts a job on a specific cpu 2692 */ 2693 int schedule_work_on(int cpu, struct work_struct *work) 2694 { 2695 return queue_work_on(cpu, system_wq, work); 2696 } 2697 EXPORT_SYMBOL(schedule_work_on); 2698 2699 /** 2700 * schedule_delayed_work - put work task in global workqueue after delay 2701 * @dwork: job to be done 2702 * @delay: number of jiffies to wait or 0 for immediate execution 2703 * 2704 * After waiting for a given time this puts a job in the kernel-global 2705 * workqueue. 2706 */ 2707 int schedule_delayed_work(struct delayed_work *dwork, 2708 unsigned long delay) 2709 { 2710 return queue_delayed_work(system_wq, dwork, delay); 2711 } 2712 EXPORT_SYMBOL(schedule_delayed_work); 2713 2714 /** 2715 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 2716 * @cpu: cpu to use 2717 * @dwork: job to be done 2718 * @delay: number of jiffies to wait 2719 * 2720 * After waiting for a given time this puts a job in the kernel-global 2721 * workqueue on the specified CPU. 2722 */ 2723 int schedule_delayed_work_on(int cpu, 2724 struct delayed_work *dwork, unsigned long delay) 2725 { 2726 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 2727 } 2728 EXPORT_SYMBOL(schedule_delayed_work_on); 2729 2730 /** 2731 * schedule_on_each_cpu - execute a function synchronously on each online CPU 2732 * @func: the function to call 2733 * 2734 * schedule_on_each_cpu() executes @func on each online CPU using the 2735 * system workqueue and blocks until all CPUs have completed. 2736 * schedule_on_each_cpu() is very slow. 2737 * 2738 * RETURNS: 2739 * 0 on success, -errno on failure. 2740 */ 2741 int schedule_on_each_cpu(work_func_t func) 2742 { 2743 int cpu; 2744 struct work_struct __percpu *works; 2745 2746 works = alloc_percpu(struct work_struct); 2747 if (!works) 2748 return -ENOMEM; 2749 2750 get_online_cpus(); 2751 2752 for_each_online_cpu(cpu) { 2753 struct work_struct *work = per_cpu_ptr(works, cpu); 2754 2755 INIT_WORK(work, func); 2756 schedule_work_on(cpu, work); 2757 } 2758 2759 for_each_online_cpu(cpu) 2760 flush_work(per_cpu_ptr(works, cpu)); 2761 2762 put_online_cpus(); 2763 free_percpu(works); 2764 return 0; 2765 } 2766 2767 /** 2768 * flush_scheduled_work - ensure that any scheduled work has run to completion. 2769 * 2770 * Forces execution of the kernel-global workqueue and blocks until its 2771 * completion. 2772 * 2773 * Think twice before calling this function! It's very easy to get into 2774 * trouble if you don't take great care. Either of the following situations 2775 * will lead to deadlock: 2776 * 2777 * One of the work items currently on the workqueue needs to acquire 2778 * a lock held by your code or its caller. 2779 * 2780 * Your code is running in the context of a work routine. 2781 * 2782 * They will be detected by lockdep when they occur, but the first might not 2783 * occur very often. It depends on what work items are on the workqueue and 2784 * what locks they need, which you have no control over. 2785 * 2786 * In most situations flushing the entire workqueue is overkill; you merely 2787 * need to know that a particular work item isn't queued and isn't running. 2788 * In such cases you should use cancel_delayed_work_sync() or 2789 * cancel_work_sync() instead. 2790 */ 2791 void flush_scheduled_work(void) 2792 { 2793 flush_workqueue(system_wq); 2794 } 2795 EXPORT_SYMBOL(flush_scheduled_work); 2796 2797 /** 2798 * execute_in_process_context - reliably execute the routine with user context 2799 * @fn: the function to execute 2800 * @ew: guaranteed storage for the execute work structure (must 2801 * be available when the work executes) 2802 * 2803 * Executes the function immediately if process context is available, 2804 * otherwise schedules the function for delayed execution. 2805 * 2806 * Returns: 0 - function was executed 2807 * 1 - function was scheduled for execution 2808 */ 2809 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 2810 { 2811 if (!in_interrupt()) { 2812 fn(&ew->work); 2813 return 0; 2814 } 2815 2816 INIT_WORK(&ew->work, fn); 2817 schedule_work(&ew->work); 2818 2819 return 1; 2820 } 2821 EXPORT_SYMBOL_GPL(execute_in_process_context); 2822 2823 int keventd_up(void) 2824 { 2825 return system_wq != NULL; 2826 } 2827 2828 static int alloc_cwqs(struct workqueue_struct *wq) 2829 { 2830 /* 2831 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 2832 * Make sure that the alignment isn't lower than that of 2833 * unsigned long long. 2834 */ 2835 const size_t size = sizeof(struct cpu_workqueue_struct); 2836 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, 2837 __alignof__(unsigned long long)); 2838 #ifdef CONFIG_SMP 2839 bool percpu = !(wq->flags & WQ_UNBOUND); 2840 #else 2841 bool percpu = false; 2842 #endif 2843 2844 if (percpu) 2845 wq->cpu_wq.pcpu = __alloc_percpu(size, align); 2846 else { 2847 void *ptr; 2848 2849 /* 2850 * Allocate enough room to align cwq and put an extra 2851 * pointer at the end pointing back to the originally 2852 * allocated pointer which will be used for free. 2853 */ 2854 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); 2855 if (ptr) { 2856 wq->cpu_wq.single = PTR_ALIGN(ptr, align); 2857 *(void **)(wq->cpu_wq.single + 1) = ptr; 2858 } 2859 } 2860 2861 /* just in case, make sure it's actually aligned 2862 * - this is affected by PERCPU() alignment in vmlinux.lds.S 2863 */ 2864 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); 2865 return wq->cpu_wq.v ? 0 : -ENOMEM; 2866 } 2867 2868 static void free_cwqs(struct workqueue_struct *wq) 2869 { 2870 #ifdef CONFIG_SMP 2871 bool percpu = !(wq->flags & WQ_UNBOUND); 2872 #else 2873 bool percpu = false; 2874 #endif 2875 2876 if (percpu) 2877 free_percpu(wq->cpu_wq.pcpu); 2878 else if (wq->cpu_wq.single) { 2879 /* the pointer to free is stored right after the cwq */ 2880 kfree(*(void **)(wq->cpu_wq.single + 1)); 2881 } 2882 } 2883 2884 static int wq_clamp_max_active(int max_active, unsigned int flags, 2885 const char *name) 2886 { 2887 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 2888 2889 if (max_active < 1 || max_active > lim) 2890 printk(KERN_WARNING "workqueue: max_active %d requested for %s " 2891 "is out of range, clamping between %d and %d\n", 2892 max_active, name, 1, lim); 2893 2894 return clamp_val(max_active, 1, lim); 2895 } 2896 2897 struct workqueue_struct *__alloc_workqueue_key(const char *name, 2898 unsigned int flags, 2899 int max_active, 2900 struct lock_class_key *key, 2901 const char *lock_name) 2902 { 2903 struct workqueue_struct *wq; 2904 unsigned int cpu; 2905 2906 /* 2907 * Workqueues which may be used during memory reclaim should 2908 * have a rescuer to guarantee forward progress. 2909 */ 2910 if (flags & WQ_MEM_RECLAIM) 2911 flags |= WQ_RESCUER; 2912 2913 /* 2914 * Unbound workqueues aren't concurrency managed and should be 2915 * dispatched to workers immediately. 2916 */ 2917 if (flags & WQ_UNBOUND) 2918 flags |= WQ_HIGHPRI; 2919 2920 max_active = max_active ?: WQ_DFL_ACTIVE; 2921 max_active = wq_clamp_max_active(max_active, flags, name); 2922 2923 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 2924 if (!wq) 2925 goto err; 2926 2927 wq->flags = flags; 2928 wq->saved_max_active = max_active; 2929 mutex_init(&wq->flush_mutex); 2930 atomic_set(&wq->nr_cwqs_to_flush, 0); 2931 INIT_LIST_HEAD(&wq->flusher_queue); 2932 INIT_LIST_HEAD(&wq->flusher_overflow); 2933 2934 wq->name = name; 2935 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 2936 INIT_LIST_HEAD(&wq->list); 2937 2938 if (alloc_cwqs(wq) < 0) 2939 goto err; 2940 2941 for_each_cwq_cpu(cpu, wq) { 2942 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2943 struct global_cwq *gcwq = get_gcwq(cpu); 2944 2945 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); 2946 cwq->gcwq = gcwq; 2947 cwq->wq = wq; 2948 cwq->flush_color = -1; 2949 cwq->max_active = max_active; 2950 INIT_LIST_HEAD(&cwq->delayed_works); 2951 } 2952 2953 if (flags & WQ_RESCUER) { 2954 struct worker *rescuer; 2955 2956 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL)) 2957 goto err; 2958 2959 wq->rescuer = rescuer = alloc_worker(); 2960 if (!rescuer) 2961 goto err; 2962 2963 rescuer->task = kthread_create(rescuer_thread, wq, "%s", name); 2964 if (IS_ERR(rescuer->task)) 2965 goto err; 2966 2967 rescuer->task->flags |= PF_THREAD_BOUND; 2968 wake_up_process(rescuer->task); 2969 } 2970 2971 /* 2972 * workqueue_lock protects global freeze state and workqueues 2973 * list. Grab it, set max_active accordingly and add the new 2974 * workqueue to workqueues list. 2975 */ 2976 spin_lock(&workqueue_lock); 2977 2978 if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 2979 for_each_cwq_cpu(cpu, wq) 2980 get_cwq(cpu, wq)->max_active = 0; 2981 2982 list_add(&wq->list, &workqueues); 2983 2984 spin_unlock(&workqueue_lock); 2985 2986 return wq; 2987 err: 2988 if (wq) { 2989 free_cwqs(wq); 2990 free_mayday_mask(wq->mayday_mask); 2991 kfree(wq->rescuer); 2992 kfree(wq); 2993 } 2994 return NULL; 2995 } 2996 EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 2997 2998 /** 2999 * destroy_workqueue - safely terminate a workqueue 3000 * @wq: target workqueue 3001 * 3002 * Safely destroy a workqueue. All work currently pending will be done first. 3003 */ 3004 void destroy_workqueue(struct workqueue_struct *wq) 3005 { 3006 unsigned int flush_cnt = 0; 3007 unsigned int cpu; 3008 3009 /* 3010 * Mark @wq dying and drain all pending works. Once WQ_DYING is 3011 * set, only chain queueing is allowed. IOW, only currently 3012 * pending or running work items on @wq can queue further work 3013 * items on it. @wq is flushed repeatedly until it becomes empty. 3014 * The number of flushing is detemined by the depth of chaining and 3015 * should be relatively short. Whine if it takes too long. 3016 */ 3017 wq->flags |= WQ_DYING; 3018 reflush: 3019 flush_workqueue(wq); 3020 3021 for_each_cwq_cpu(cpu, wq) { 3022 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3023 3024 if (!cwq->nr_active && list_empty(&cwq->delayed_works)) 3025 continue; 3026 3027 if (++flush_cnt == 10 || 3028 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 3029 printk(KERN_WARNING "workqueue %s: flush on " 3030 "destruction isn't complete after %u tries\n", 3031 wq->name, flush_cnt); 3032 goto reflush; 3033 } 3034 3035 /* 3036 * wq list is used to freeze wq, remove from list after 3037 * flushing is complete in case freeze races us. 3038 */ 3039 spin_lock(&workqueue_lock); 3040 list_del(&wq->list); 3041 spin_unlock(&workqueue_lock); 3042 3043 /* sanity check */ 3044 for_each_cwq_cpu(cpu, wq) { 3045 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3046 int i; 3047 3048 for (i = 0; i < WORK_NR_COLORS; i++) 3049 BUG_ON(cwq->nr_in_flight[i]); 3050 BUG_ON(cwq->nr_active); 3051 BUG_ON(!list_empty(&cwq->delayed_works)); 3052 } 3053 3054 if (wq->flags & WQ_RESCUER) { 3055 kthread_stop(wq->rescuer->task); 3056 free_mayday_mask(wq->mayday_mask); 3057 kfree(wq->rescuer); 3058 } 3059 3060 free_cwqs(wq); 3061 kfree(wq); 3062 } 3063 EXPORT_SYMBOL_GPL(destroy_workqueue); 3064 3065 /** 3066 * workqueue_set_max_active - adjust max_active of a workqueue 3067 * @wq: target workqueue 3068 * @max_active: new max_active value. 3069 * 3070 * Set max_active of @wq to @max_active. 3071 * 3072 * CONTEXT: 3073 * Don't call from IRQ context. 3074 */ 3075 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 3076 { 3077 unsigned int cpu; 3078 3079 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 3080 3081 spin_lock(&workqueue_lock); 3082 3083 wq->saved_max_active = max_active; 3084 3085 for_each_cwq_cpu(cpu, wq) { 3086 struct global_cwq *gcwq = get_gcwq(cpu); 3087 3088 spin_lock_irq(&gcwq->lock); 3089 3090 if (!(wq->flags & WQ_FREEZABLE) || 3091 !(gcwq->flags & GCWQ_FREEZING)) 3092 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3093 3094 spin_unlock_irq(&gcwq->lock); 3095 } 3096 3097 spin_unlock(&workqueue_lock); 3098 } 3099 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 3100 3101 /** 3102 * workqueue_congested - test whether a workqueue is congested 3103 * @cpu: CPU in question 3104 * @wq: target workqueue 3105 * 3106 * Test whether @wq's cpu workqueue for @cpu is congested. There is 3107 * no synchronization around this function and the test result is 3108 * unreliable and only useful as advisory hints or for debugging. 3109 * 3110 * RETURNS: 3111 * %true if congested, %false otherwise. 3112 */ 3113 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) 3114 { 3115 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3116 3117 return !list_empty(&cwq->delayed_works); 3118 } 3119 EXPORT_SYMBOL_GPL(workqueue_congested); 3120 3121 /** 3122 * work_cpu - return the last known associated cpu for @work 3123 * @work: the work of interest 3124 * 3125 * RETURNS: 3126 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise. 3127 */ 3128 unsigned int work_cpu(struct work_struct *work) 3129 { 3130 struct global_cwq *gcwq = get_work_gcwq(work); 3131 3132 return gcwq ? gcwq->cpu : WORK_CPU_NONE; 3133 } 3134 EXPORT_SYMBOL_GPL(work_cpu); 3135 3136 /** 3137 * work_busy - test whether a work is currently pending or running 3138 * @work: the work to be tested 3139 * 3140 * Test whether @work is currently pending or running. There is no 3141 * synchronization around this function and the test result is 3142 * unreliable and only useful as advisory hints or for debugging. 3143 * Especially for reentrant wqs, the pending state might hide the 3144 * running state. 3145 * 3146 * RETURNS: 3147 * OR'd bitmask of WORK_BUSY_* bits. 3148 */ 3149 unsigned int work_busy(struct work_struct *work) 3150 { 3151 struct global_cwq *gcwq = get_work_gcwq(work); 3152 unsigned long flags; 3153 unsigned int ret = 0; 3154 3155 if (!gcwq) 3156 return false; 3157 3158 spin_lock_irqsave(&gcwq->lock, flags); 3159 3160 if (work_pending(work)) 3161 ret |= WORK_BUSY_PENDING; 3162 if (find_worker_executing_work(gcwq, work)) 3163 ret |= WORK_BUSY_RUNNING; 3164 3165 spin_unlock_irqrestore(&gcwq->lock, flags); 3166 3167 return ret; 3168 } 3169 EXPORT_SYMBOL_GPL(work_busy); 3170 3171 /* 3172 * CPU hotplug. 3173 * 3174 * There are two challenges in supporting CPU hotplug. Firstly, there 3175 * are a lot of assumptions on strong associations among work, cwq and 3176 * gcwq which make migrating pending and scheduled works very 3177 * difficult to implement without impacting hot paths. Secondly, 3178 * gcwqs serve mix of short, long and very long running works making 3179 * blocked draining impractical. 3180 * 3181 * This is solved by allowing a gcwq to be detached from CPU, running 3182 * it with unbound (rogue) workers and allowing it to be reattached 3183 * later if the cpu comes back online. A separate thread is created 3184 * to govern a gcwq in such state and is called the trustee of the 3185 * gcwq. 3186 * 3187 * Trustee states and their descriptions. 3188 * 3189 * START Command state used on startup. On CPU_DOWN_PREPARE, a 3190 * new trustee is started with this state. 3191 * 3192 * IN_CHARGE Once started, trustee will enter this state after 3193 * assuming the manager role and making all existing 3194 * workers rogue. DOWN_PREPARE waits for trustee to 3195 * enter this state. After reaching IN_CHARGE, trustee 3196 * tries to execute the pending worklist until it's empty 3197 * and the state is set to BUTCHER, or the state is set 3198 * to RELEASE. 3199 * 3200 * BUTCHER Command state which is set by the cpu callback after 3201 * the cpu has went down. Once this state is set trustee 3202 * knows that there will be no new works on the worklist 3203 * and once the worklist is empty it can proceed to 3204 * killing idle workers. 3205 * 3206 * RELEASE Command state which is set by the cpu callback if the 3207 * cpu down has been canceled or it has come online 3208 * again. After recognizing this state, trustee stops 3209 * trying to drain or butcher and clears ROGUE, rebinds 3210 * all remaining workers back to the cpu and releases 3211 * manager role. 3212 * 3213 * DONE Trustee will enter this state after BUTCHER or RELEASE 3214 * is complete. 3215 * 3216 * trustee CPU draining 3217 * took over down complete 3218 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE 3219 * | | ^ 3220 * | CPU is back online v return workers | 3221 * ----------------> RELEASE -------------- 3222 */ 3223 3224 /** 3225 * trustee_wait_event_timeout - timed event wait for trustee 3226 * @cond: condition to wait for 3227 * @timeout: timeout in jiffies 3228 * 3229 * wait_event_timeout() for trustee to use. Handles locking and 3230 * checks for RELEASE request. 3231 * 3232 * CONTEXT: 3233 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 3234 * multiple times. To be used by trustee. 3235 * 3236 * RETURNS: 3237 * Positive indicating left time if @cond is satisfied, 0 if timed 3238 * out, -1 if canceled. 3239 */ 3240 #define trustee_wait_event_timeout(cond, timeout) ({ \ 3241 long __ret = (timeout); \ 3242 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \ 3243 __ret) { \ 3244 spin_unlock_irq(&gcwq->lock); \ 3245 __wait_event_timeout(gcwq->trustee_wait, (cond) || \ 3246 (gcwq->trustee_state == TRUSTEE_RELEASE), \ 3247 __ret); \ 3248 spin_lock_irq(&gcwq->lock); \ 3249 } \ 3250 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \ 3251 }) 3252 3253 /** 3254 * trustee_wait_event - event wait for trustee 3255 * @cond: condition to wait for 3256 * 3257 * wait_event() for trustee to use. Automatically handles locking and 3258 * checks for CANCEL request. 3259 * 3260 * CONTEXT: 3261 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 3262 * multiple times. To be used by trustee. 3263 * 3264 * RETURNS: 3265 * 0 if @cond is satisfied, -1 if canceled. 3266 */ 3267 #define trustee_wait_event(cond) ({ \ 3268 long __ret1; \ 3269 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\ 3270 __ret1 < 0 ? -1 : 0; \ 3271 }) 3272 3273 static int __cpuinit trustee_thread(void *__gcwq) 3274 { 3275 struct global_cwq *gcwq = __gcwq; 3276 struct worker *worker; 3277 struct work_struct *work; 3278 struct hlist_node *pos; 3279 long rc; 3280 int i; 3281 3282 BUG_ON(gcwq->cpu != smp_processor_id()); 3283 3284 spin_lock_irq(&gcwq->lock); 3285 /* 3286 * Claim the manager position and make all workers rogue. 3287 * Trustee must be bound to the target cpu and can't be 3288 * cancelled. 3289 */ 3290 BUG_ON(gcwq->cpu != smp_processor_id()); 3291 rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS)); 3292 BUG_ON(rc < 0); 3293 3294 gcwq->flags |= GCWQ_MANAGING_WORKERS; 3295 3296 list_for_each_entry(worker, &gcwq->idle_list, entry) 3297 worker->flags |= WORKER_ROGUE; 3298 3299 for_each_busy_worker(worker, i, pos, gcwq) 3300 worker->flags |= WORKER_ROGUE; 3301 3302 /* 3303 * Call schedule() so that we cross rq->lock and thus can 3304 * guarantee sched callbacks see the rogue flag. This is 3305 * necessary as scheduler callbacks may be invoked from other 3306 * cpus. 3307 */ 3308 spin_unlock_irq(&gcwq->lock); 3309 schedule(); 3310 spin_lock_irq(&gcwq->lock); 3311 3312 /* 3313 * Sched callbacks are disabled now. Zap nr_running. After 3314 * this, nr_running stays zero and need_more_worker() and 3315 * keep_working() are always true as long as the worklist is 3316 * not empty. 3317 */ 3318 atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); 3319 3320 spin_unlock_irq(&gcwq->lock); 3321 del_timer_sync(&gcwq->idle_timer); 3322 spin_lock_irq(&gcwq->lock); 3323 3324 /* 3325 * We're now in charge. Notify and proceed to drain. We need 3326 * to keep the gcwq running during the whole CPU down 3327 * procedure as other cpu hotunplug callbacks may need to 3328 * flush currently running tasks. 3329 */ 3330 gcwq->trustee_state = TRUSTEE_IN_CHARGE; 3331 wake_up_all(&gcwq->trustee_wait); 3332 3333 /* 3334 * The original cpu is in the process of dying and may go away 3335 * anytime now. When that happens, we and all workers would 3336 * be migrated to other cpus. Try draining any left work. We 3337 * want to get it over with ASAP - spam rescuers, wake up as 3338 * many idlers as necessary and create new ones till the 3339 * worklist is empty. Note that if the gcwq is frozen, there 3340 * may be frozen works in freezable cwqs. Don't declare 3341 * completion while frozen. 3342 */ 3343 while (gcwq->nr_workers != gcwq->nr_idle || 3344 gcwq->flags & GCWQ_FREEZING || 3345 gcwq->trustee_state == TRUSTEE_IN_CHARGE) { 3346 int nr_works = 0; 3347 3348 list_for_each_entry(work, &gcwq->worklist, entry) { 3349 send_mayday(work); 3350 nr_works++; 3351 } 3352 3353 list_for_each_entry(worker, &gcwq->idle_list, entry) { 3354 if (!nr_works--) 3355 break; 3356 wake_up_process(worker->task); 3357 } 3358 3359 if (need_to_create_worker(gcwq)) { 3360 spin_unlock_irq(&gcwq->lock); 3361 worker = create_worker(gcwq, false); 3362 spin_lock_irq(&gcwq->lock); 3363 if (worker) { 3364 worker->flags |= WORKER_ROGUE; 3365 start_worker(worker); 3366 } 3367 } 3368 3369 /* give a breather */ 3370 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) 3371 break; 3372 } 3373 3374 /* 3375 * Either all works have been scheduled and cpu is down, or 3376 * cpu down has already been canceled. Wait for and butcher 3377 * all workers till we're canceled. 3378 */ 3379 do { 3380 rc = trustee_wait_event(!list_empty(&gcwq->idle_list)); 3381 while (!list_empty(&gcwq->idle_list)) 3382 destroy_worker(list_first_entry(&gcwq->idle_list, 3383 struct worker, entry)); 3384 } while (gcwq->nr_workers && rc >= 0); 3385 3386 /* 3387 * At this point, either draining has completed and no worker 3388 * is left, or cpu down has been canceled or the cpu is being 3389 * brought back up. There shouldn't be any idle one left. 3390 * Tell the remaining busy ones to rebind once it finishes the 3391 * currently scheduled works by scheduling the rebind_work. 3392 */ 3393 WARN_ON(!list_empty(&gcwq->idle_list)); 3394 3395 for_each_busy_worker(worker, i, pos, gcwq) { 3396 struct work_struct *rebind_work = &worker->rebind_work; 3397 3398 /* 3399 * Rebind_work may race with future cpu hotplug 3400 * operations. Use a separate flag to mark that 3401 * rebinding is scheduled. 3402 */ 3403 worker->flags |= WORKER_REBIND; 3404 worker->flags &= ~WORKER_ROGUE; 3405 3406 /* queue rebind_work, wq doesn't matter, use the default one */ 3407 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, 3408 work_data_bits(rebind_work))) 3409 continue; 3410 3411 debug_work_activate(rebind_work); 3412 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, 3413 worker->scheduled.next, 3414 work_color_to_flags(WORK_NO_COLOR)); 3415 } 3416 3417 /* relinquish manager role */ 3418 gcwq->flags &= ~GCWQ_MANAGING_WORKERS; 3419 3420 /* notify completion */ 3421 gcwq->trustee = NULL; 3422 gcwq->trustee_state = TRUSTEE_DONE; 3423 wake_up_all(&gcwq->trustee_wait); 3424 spin_unlock_irq(&gcwq->lock); 3425 return 0; 3426 } 3427 3428 /** 3429 * wait_trustee_state - wait for trustee to enter the specified state 3430 * @gcwq: gcwq the trustee of interest belongs to 3431 * @state: target state to wait for 3432 * 3433 * Wait for the trustee to reach @state. DONE is already matched. 3434 * 3435 * CONTEXT: 3436 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 3437 * multiple times. To be used by cpu_callback. 3438 */ 3439 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3440 __releases(&gcwq->lock) 3441 __acquires(&gcwq->lock) 3442 { 3443 if (!(gcwq->trustee_state == state || 3444 gcwq->trustee_state == TRUSTEE_DONE)) { 3445 spin_unlock_irq(&gcwq->lock); 3446 __wait_event(gcwq->trustee_wait, 3447 gcwq->trustee_state == state || 3448 gcwq->trustee_state == TRUSTEE_DONE); 3449 spin_lock_irq(&gcwq->lock); 3450 } 3451 } 3452 3453 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 3454 unsigned long action, 3455 void *hcpu) 3456 { 3457 unsigned int cpu = (unsigned long)hcpu; 3458 struct global_cwq *gcwq = get_gcwq(cpu); 3459 struct task_struct *new_trustee = NULL; 3460 struct worker *uninitialized_var(new_worker); 3461 unsigned long flags; 3462 3463 action &= ~CPU_TASKS_FROZEN; 3464 3465 switch (action) { 3466 case CPU_DOWN_PREPARE: 3467 new_trustee = kthread_create(trustee_thread, gcwq, 3468 "workqueue_trustee/%d\n", cpu); 3469 if (IS_ERR(new_trustee)) 3470 return notifier_from_errno(PTR_ERR(new_trustee)); 3471 kthread_bind(new_trustee, cpu); 3472 /* fall through */ 3473 case CPU_UP_PREPARE: 3474 BUG_ON(gcwq->first_idle); 3475 new_worker = create_worker(gcwq, false); 3476 if (!new_worker) { 3477 if (new_trustee) 3478 kthread_stop(new_trustee); 3479 return NOTIFY_BAD; 3480 } 3481 } 3482 3483 /* some are called w/ irq disabled, don't disturb irq status */ 3484 spin_lock_irqsave(&gcwq->lock, flags); 3485 3486 switch (action) { 3487 case CPU_DOWN_PREPARE: 3488 /* initialize trustee and tell it to acquire the gcwq */ 3489 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE); 3490 gcwq->trustee = new_trustee; 3491 gcwq->trustee_state = TRUSTEE_START; 3492 wake_up_process(gcwq->trustee); 3493 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); 3494 /* fall through */ 3495 case CPU_UP_PREPARE: 3496 BUG_ON(gcwq->first_idle); 3497 gcwq->first_idle = new_worker; 3498 break; 3499 3500 case CPU_DYING: 3501 /* 3502 * Before this, the trustee and all workers except for 3503 * the ones which are still executing works from 3504 * before the last CPU down must be on the cpu. After 3505 * this, they'll all be diasporas. 3506 */ 3507 gcwq->flags |= GCWQ_DISASSOCIATED; 3508 break; 3509 3510 case CPU_POST_DEAD: 3511 gcwq->trustee_state = TRUSTEE_BUTCHER; 3512 /* fall through */ 3513 case CPU_UP_CANCELED: 3514 destroy_worker(gcwq->first_idle); 3515 gcwq->first_idle = NULL; 3516 break; 3517 3518 case CPU_DOWN_FAILED: 3519 case CPU_ONLINE: 3520 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3521 if (gcwq->trustee_state != TRUSTEE_DONE) { 3522 gcwq->trustee_state = TRUSTEE_RELEASE; 3523 wake_up_process(gcwq->trustee); 3524 wait_trustee_state(gcwq, TRUSTEE_DONE); 3525 } 3526 3527 /* 3528 * Trustee is done and there might be no worker left. 3529 * Put the first_idle in and request a real manager to 3530 * take a look. 3531 */ 3532 spin_unlock_irq(&gcwq->lock); 3533 kthread_bind(gcwq->first_idle->task, cpu); 3534 spin_lock_irq(&gcwq->lock); 3535 gcwq->flags |= GCWQ_MANAGE_WORKERS; 3536 start_worker(gcwq->first_idle); 3537 gcwq->first_idle = NULL; 3538 break; 3539 } 3540 3541 spin_unlock_irqrestore(&gcwq->lock, flags); 3542 3543 return notifier_from_errno(0); 3544 } 3545 3546 #ifdef CONFIG_SMP 3547 3548 struct work_for_cpu { 3549 struct completion completion; 3550 long (*fn)(void *); 3551 void *arg; 3552 long ret; 3553 }; 3554 3555 static int do_work_for_cpu(void *_wfc) 3556 { 3557 struct work_for_cpu *wfc = _wfc; 3558 wfc->ret = wfc->fn(wfc->arg); 3559 complete(&wfc->completion); 3560 return 0; 3561 } 3562 3563 /** 3564 * work_on_cpu - run a function in user context on a particular cpu 3565 * @cpu: the cpu to run on 3566 * @fn: the function to run 3567 * @arg: the function arg 3568 * 3569 * This will return the value @fn returns. 3570 * It is up to the caller to ensure that the cpu doesn't go offline. 3571 * The caller must not hold any locks which would prevent @fn from completing. 3572 */ 3573 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 3574 { 3575 struct task_struct *sub_thread; 3576 struct work_for_cpu wfc = { 3577 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), 3578 .fn = fn, 3579 .arg = arg, 3580 }; 3581 3582 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 3583 if (IS_ERR(sub_thread)) 3584 return PTR_ERR(sub_thread); 3585 kthread_bind(sub_thread, cpu); 3586 wake_up_process(sub_thread); 3587 wait_for_completion(&wfc.completion); 3588 return wfc.ret; 3589 } 3590 EXPORT_SYMBOL_GPL(work_on_cpu); 3591 #endif /* CONFIG_SMP */ 3592 3593 #ifdef CONFIG_FREEZER 3594 3595 /** 3596 * freeze_workqueues_begin - begin freezing workqueues 3597 * 3598 * Start freezing workqueues. After this function returns, all freezable 3599 * workqueues will queue new works to their frozen_works list instead of 3600 * gcwq->worklist. 3601 * 3602 * CONTEXT: 3603 * Grabs and releases workqueue_lock and gcwq->lock's. 3604 */ 3605 void freeze_workqueues_begin(void) 3606 { 3607 unsigned int cpu; 3608 3609 spin_lock(&workqueue_lock); 3610 3611 BUG_ON(workqueue_freezing); 3612 workqueue_freezing = true; 3613 3614 for_each_gcwq_cpu(cpu) { 3615 struct global_cwq *gcwq = get_gcwq(cpu); 3616 struct workqueue_struct *wq; 3617 3618 spin_lock_irq(&gcwq->lock); 3619 3620 BUG_ON(gcwq->flags & GCWQ_FREEZING); 3621 gcwq->flags |= GCWQ_FREEZING; 3622 3623 list_for_each_entry(wq, &workqueues, list) { 3624 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3625 3626 if (cwq && wq->flags & WQ_FREEZABLE) 3627 cwq->max_active = 0; 3628 } 3629 3630 spin_unlock_irq(&gcwq->lock); 3631 } 3632 3633 spin_unlock(&workqueue_lock); 3634 } 3635 3636 /** 3637 * freeze_workqueues_busy - are freezable workqueues still busy? 3638 * 3639 * Check whether freezing is complete. This function must be called 3640 * between freeze_workqueues_begin() and thaw_workqueues(). 3641 * 3642 * CONTEXT: 3643 * Grabs and releases workqueue_lock. 3644 * 3645 * RETURNS: 3646 * %true if some freezable workqueues are still busy. %false if freezing 3647 * is complete. 3648 */ 3649 bool freeze_workqueues_busy(void) 3650 { 3651 unsigned int cpu; 3652 bool busy = false; 3653 3654 spin_lock(&workqueue_lock); 3655 3656 BUG_ON(!workqueue_freezing); 3657 3658 for_each_gcwq_cpu(cpu) { 3659 struct workqueue_struct *wq; 3660 /* 3661 * nr_active is monotonically decreasing. It's safe 3662 * to peek without lock. 3663 */ 3664 list_for_each_entry(wq, &workqueues, list) { 3665 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3666 3667 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3668 continue; 3669 3670 BUG_ON(cwq->nr_active < 0); 3671 if (cwq->nr_active) { 3672 busy = true; 3673 goto out_unlock; 3674 } 3675 } 3676 } 3677 out_unlock: 3678 spin_unlock(&workqueue_lock); 3679 return busy; 3680 } 3681 3682 /** 3683 * thaw_workqueues - thaw workqueues 3684 * 3685 * Thaw workqueues. Normal queueing is restored and all collected 3686 * frozen works are transferred to their respective gcwq worklists. 3687 * 3688 * CONTEXT: 3689 * Grabs and releases workqueue_lock and gcwq->lock's. 3690 */ 3691 void thaw_workqueues(void) 3692 { 3693 unsigned int cpu; 3694 3695 spin_lock(&workqueue_lock); 3696 3697 if (!workqueue_freezing) 3698 goto out_unlock; 3699 3700 for_each_gcwq_cpu(cpu) { 3701 struct global_cwq *gcwq = get_gcwq(cpu); 3702 struct workqueue_struct *wq; 3703 3704 spin_lock_irq(&gcwq->lock); 3705 3706 BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); 3707 gcwq->flags &= ~GCWQ_FREEZING; 3708 3709 list_for_each_entry(wq, &workqueues, list) { 3710 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3711 3712 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3713 continue; 3714 3715 /* restore max_active and repopulate worklist */ 3716 cwq->max_active = wq->saved_max_active; 3717 3718 while (!list_empty(&cwq->delayed_works) && 3719 cwq->nr_active < cwq->max_active) 3720 cwq_activate_first_delayed(cwq); 3721 } 3722 3723 wake_up_worker(gcwq); 3724 3725 spin_unlock_irq(&gcwq->lock); 3726 } 3727 3728 workqueue_freezing = false; 3729 out_unlock: 3730 spin_unlock(&workqueue_lock); 3731 } 3732 #endif /* CONFIG_FREEZER */ 3733 3734 static int __init init_workqueues(void) 3735 { 3736 unsigned int cpu; 3737 int i; 3738 3739 cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); 3740 3741 /* initialize gcwqs */ 3742 for_each_gcwq_cpu(cpu) { 3743 struct global_cwq *gcwq = get_gcwq(cpu); 3744 3745 spin_lock_init(&gcwq->lock); 3746 INIT_LIST_HEAD(&gcwq->worklist); 3747 gcwq->cpu = cpu; 3748 gcwq->flags |= GCWQ_DISASSOCIATED; 3749 3750 INIT_LIST_HEAD(&gcwq->idle_list); 3751 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3752 INIT_HLIST_HEAD(&gcwq->busy_hash[i]); 3753 3754 init_timer_deferrable(&gcwq->idle_timer); 3755 gcwq->idle_timer.function = idle_worker_timeout; 3756 gcwq->idle_timer.data = (unsigned long)gcwq; 3757 3758 setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout, 3759 (unsigned long)gcwq); 3760 3761 ida_init(&gcwq->worker_ida); 3762 3763 gcwq->trustee_state = TRUSTEE_DONE; 3764 init_waitqueue_head(&gcwq->trustee_wait); 3765 } 3766 3767 /* create the initial worker */ 3768 for_each_online_gcwq_cpu(cpu) { 3769 struct global_cwq *gcwq = get_gcwq(cpu); 3770 struct worker *worker; 3771 3772 if (cpu != WORK_CPU_UNBOUND) 3773 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3774 worker = create_worker(gcwq, true); 3775 BUG_ON(!worker); 3776 spin_lock_irq(&gcwq->lock); 3777 start_worker(worker); 3778 spin_unlock_irq(&gcwq->lock); 3779 } 3780 3781 system_wq = alloc_workqueue("events", 0, 0); 3782 system_long_wq = alloc_workqueue("events_long", 0, 0); 3783 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); 3784 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 3785 WQ_UNBOUND_MAX_ACTIVE); 3786 system_freezable_wq = alloc_workqueue("events_freezable", 3787 WQ_FREEZABLE, 0); 3788 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || 3789 !system_unbound_wq || !system_freezable_wq); 3790 return 0; 3791 } 3792 early_initcall(init_workqueues); 3793