xref: /linux/kernel/workqueue.c (revision 606b2f490fb80e55d05cf0e6cec0b6c0ff0fc18f)
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #include <linux/idr.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/workqueue.h>
40 
41 #include "workqueue_sched.h"
42 
43 enum {
44 	/* global_cwq flags */
45 	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
46 	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */
47 	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
48 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
49 	GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */
50 
51 	/* worker flags */
52 	WORKER_STARTED		= 1 << 0,	/* started */
53 	WORKER_DIE		= 1 << 1,	/* die die die */
54 	WORKER_IDLE		= 1 << 2,	/* is idle */
55 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
56 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
57 	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
58 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
59 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
60 
61 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
62 				  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
63 
64 	/* gcwq->trustee_state */
65 	TRUSTEE_START		= 0,		/* start */
66 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
67 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
68 	TRUSTEE_RELEASE		= 3,		/* release workers */
69 	TRUSTEE_DONE		= 4,		/* trustee is done */
70 
71 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
72 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
73 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
74 
75 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
76 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
77 
78 	MAYDAY_INITIAL_TIMEOUT	= HZ / 100,	/* call for help after 10ms */
79 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
80 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
81 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
82 
83 	/*
84 	 * Rescue workers are used only on emergencies and shared by
85 	 * all cpus.  Give -20.
86 	 */
87 	RESCUER_NICE_LEVEL	= -20,
88 };
89 
90 /*
91  * Structure fields follow one of the following exclusion rules.
92  *
93  * I: Set during initialization and read-only afterwards.
94  *
95  * P: Preemption protected.  Disabling preemption is enough and should
96  *    only be modified and accessed from the local cpu.
97  *
98  * L: gcwq->lock protected.  Access with gcwq->lock held.
99  *
100  * X: During normal operation, modification requires gcwq->lock and
101  *    should be done only from local cpu.  Either disabling preemption
102  *    on local cpu or grabbing gcwq->lock is enough for read access.
103  *    If GCWQ_DISASSOCIATED is set, it's identical to L.
104  *
105  * F: wq->flush_mutex protected.
106  *
107  * W: workqueue_lock protected.
108  */
109 
110 struct global_cwq;
111 
112 /*
113  * The poor guys doing the actual heavy lifting.  All on-duty workers
114  * are either serving the manager role, on idle list or on busy hash.
115  */
116 struct worker {
117 	/* on idle list while idle, on busy hash table while busy */
118 	union {
119 		struct list_head	entry;	/* L: while idle */
120 		struct hlist_node	hentry;	/* L: while busy */
121 	};
122 
123 	struct work_struct	*current_work;	/* L: work being processed */
124 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
125 	struct list_head	scheduled;	/* L: scheduled works */
126 	struct task_struct	*task;		/* I: worker task */
127 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
128 	/* 64 bytes boundary on 64bit, 32 on 32bit */
129 	unsigned long		last_active;	/* L: last active timestamp */
130 	unsigned int		flags;		/* X: flags */
131 	int			id;		/* I: worker id */
132 	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
133 };
134 
135 /*
136  * Global per-cpu workqueue.  There's one and only one for each cpu
137  * and all works are queued and processed here regardless of their
138  * target workqueues.
139  */
140 struct global_cwq {
141 	spinlock_t		lock;		/* the gcwq lock */
142 	struct list_head	worklist;	/* L: list of pending works */
143 	unsigned int		cpu;		/* I: the associated cpu */
144 	unsigned int		flags;		/* L: GCWQ_* flags */
145 
146 	int			nr_workers;	/* L: total number of workers */
147 	int			nr_idle;	/* L: currently idle ones */
148 
149 	/* workers are chained either in the idle_list or busy_hash */
150 	struct list_head	idle_list;	/* X: list of idle workers */
151 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
152 						/* L: hash of busy workers */
153 
154 	struct timer_list	idle_timer;	/* L: worker idle timeout */
155 	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */
156 
157 	struct ida		worker_ida;	/* L: for worker IDs */
158 
159 	struct task_struct	*trustee;	/* L: for gcwq shutdown */
160 	unsigned int		trustee_state;	/* L: trustee state */
161 	wait_queue_head_t	trustee_wait;	/* trustee wait */
162 	struct worker		*first_idle;	/* L: first idle worker */
163 } ____cacheline_aligned_in_smp;
164 
165 /*
166  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
167  * work_struct->data are used for flags and thus cwqs need to be
168  * aligned at two's power of the number of flag bits.
169  */
170 struct cpu_workqueue_struct {
171 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
172 	struct workqueue_struct *wq;		/* I: the owning workqueue */
173 	int			work_color;	/* L: current color */
174 	int			flush_color;	/* L: flushing color */
175 	int			nr_in_flight[WORK_NR_COLORS];
176 						/* L: nr of in_flight works */
177 	int			nr_active;	/* L: nr of active works */
178 	int			max_active;	/* L: max active works */
179 	struct list_head	delayed_works;	/* L: delayed works */
180 };
181 
182 /*
183  * Structure used to wait for workqueue flush.
184  */
185 struct wq_flusher {
186 	struct list_head	list;		/* F: list of flushers */
187 	int			flush_color;	/* F: flush color waiting for */
188 	struct completion	done;		/* flush completion */
189 };
190 
191 /*
192  * All cpumasks are assumed to be always set on UP and thus can't be
193  * used to determine whether there's something to be done.
194  */
195 #ifdef CONFIG_SMP
196 typedef cpumask_var_t mayday_mask_t;
197 #define mayday_test_and_set_cpu(cpu, mask)	\
198 	cpumask_test_and_set_cpu((cpu), (mask))
199 #define mayday_clear_cpu(cpu, mask)		cpumask_clear_cpu((cpu), (mask))
200 #define for_each_mayday_cpu(cpu, mask)		for_each_cpu((cpu), (mask))
201 #define alloc_mayday_mask(maskp, gfp)		alloc_cpumask_var((maskp), (gfp))
202 #define free_mayday_mask(mask)			free_cpumask_var((mask))
203 #else
204 typedef unsigned long mayday_mask_t;
205 #define mayday_test_and_set_cpu(cpu, mask)	test_and_set_bit(0, &(mask))
206 #define mayday_clear_cpu(cpu, mask)		clear_bit(0, &(mask))
207 #define for_each_mayday_cpu(cpu, mask)		if ((cpu) = 0, (mask))
208 #define alloc_mayday_mask(maskp, gfp)		true
209 #define free_mayday_mask(mask)			do { } while (0)
210 #endif
211 
212 /*
213  * The externally visible workqueue abstraction is an array of
214  * per-CPU workqueues:
215  */
216 struct workqueue_struct {
217 	unsigned int		flags;		/* I: WQ_* flags */
218 	union {
219 		struct cpu_workqueue_struct __percpu	*pcpu;
220 		struct cpu_workqueue_struct		*single;
221 		unsigned long				v;
222 	} cpu_wq;				/* I: cwq's */
223 	struct list_head	list;		/* W: list of all workqueues */
224 
225 	struct mutex		flush_mutex;	/* protects wq flushing */
226 	int			work_color;	/* F: current work color */
227 	int			flush_color;	/* F: current flush color */
228 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
229 	struct wq_flusher	*first_flusher;	/* F: first flusher */
230 	struct list_head	flusher_queue;	/* F: flush waiters */
231 	struct list_head	flusher_overflow; /* F: flush overflow list */
232 
233 	mayday_mask_t		mayday_mask;	/* cpus requesting rescue */
234 	struct worker		*rescuer;	/* I: rescue worker */
235 
236 	int			saved_max_active; /* W: saved cwq max_active */
237 	const char		*name;		/* I: workqueue name */
238 #ifdef CONFIG_LOCKDEP
239 	struct lockdep_map	lockdep_map;
240 #endif
241 };
242 
243 struct workqueue_struct *system_wq __read_mostly;
244 struct workqueue_struct *system_long_wq __read_mostly;
245 struct workqueue_struct *system_nrt_wq __read_mostly;
246 struct workqueue_struct *system_unbound_wq __read_mostly;
247 EXPORT_SYMBOL_GPL(system_wq);
248 EXPORT_SYMBOL_GPL(system_long_wq);
249 EXPORT_SYMBOL_GPL(system_nrt_wq);
250 EXPORT_SYMBOL_GPL(system_unbound_wq);
251 
252 #define for_each_busy_worker(worker, i, pos, gcwq)			\
253 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
254 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
255 
256 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
257 				  unsigned int sw)
258 {
259 	if (cpu < nr_cpu_ids) {
260 		if (sw & 1) {
261 			cpu = cpumask_next(cpu, mask);
262 			if (cpu < nr_cpu_ids)
263 				return cpu;
264 		}
265 		if (sw & 2)
266 			return WORK_CPU_UNBOUND;
267 	}
268 	return WORK_CPU_NONE;
269 }
270 
271 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
272 				struct workqueue_struct *wq)
273 {
274 	return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
275 }
276 
277 /*
278  * CPU iterators
279  *
280  * An extra gcwq is defined for an invalid cpu number
281  * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
282  * specific CPU.  The following iterators are similar to
283  * for_each_*_cpu() iterators but also considers the unbound gcwq.
284  *
285  * for_each_gcwq_cpu()		: possible CPUs + WORK_CPU_UNBOUND
286  * for_each_online_gcwq_cpu()	: online CPUs + WORK_CPU_UNBOUND
287  * for_each_cwq_cpu()		: possible CPUs for bound workqueues,
288  *				  WORK_CPU_UNBOUND for unbound workqueues
289  */
290 #define for_each_gcwq_cpu(cpu)						\
291 	for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\
292 	     (cpu) < WORK_CPU_NONE;					\
293 	     (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
294 
295 #define for_each_online_gcwq_cpu(cpu)					\
296 	for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\
297 	     (cpu) < WORK_CPU_NONE;					\
298 	     (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
299 
300 #define for_each_cwq_cpu(cpu, wq)					\
301 	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\
302 	     (cpu) < WORK_CPU_NONE;					\
303 	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
304 
305 #ifdef CONFIG_LOCKDEP
306 /**
307  * in_workqueue_context() - in context of specified workqueue?
308  * @wq: the workqueue of interest
309  *
310  * Checks lockdep state to see if the current task is executing from
311  * within a workqueue item.  This function exists only if lockdep is
312  * enabled.
313  */
314 int in_workqueue_context(struct workqueue_struct *wq)
315 {
316 	return lock_is_held(&wq->lockdep_map);
317 }
318 #endif
319 
320 #ifdef CONFIG_DEBUG_OBJECTS_WORK
321 
322 static struct debug_obj_descr work_debug_descr;
323 
324 /*
325  * fixup_init is called when:
326  * - an active object is initialized
327  */
328 static int work_fixup_init(void *addr, enum debug_obj_state state)
329 {
330 	struct work_struct *work = addr;
331 
332 	switch (state) {
333 	case ODEBUG_STATE_ACTIVE:
334 		cancel_work_sync(work);
335 		debug_object_init(work, &work_debug_descr);
336 		return 1;
337 	default:
338 		return 0;
339 	}
340 }
341 
342 /*
343  * fixup_activate is called when:
344  * - an active object is activated
345  * - an unknown object is activated (might be a statically initialized object)
346  */
347 static int work_fixup_activate(void *addr, enum debug_obj_state state)
348 {
349 	struct work_struct *work = addr;
350 
351 	switch (state) {
352 
353 	case ODEBUG_STATE_NOTAVAILABLE:
354 		/*
355 		 * This is not really a fixup. The work struct was
356 		 * statically initialized. We just make sure that it
357 		 * is tracked in the object tracker.
358 		 */
359 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
360 			debug_object_init(work, &work_debug_descr);
361 			debug_object_activate(work, &work_debug_descr);
362 			return 0;
363 		}
364 		WARN_ON_ONCE(1);
365 		return 0;
366 
367 	case ODEBUG_STATE_ACTIVE:
368 		WARN_ON(1);
369 
370 	default:
371 		return 0;
372 	}
373 }
374 
375 /*
376  * fixup_free is called when:
377  * - an active object is freed
378  */
379 static int work_fixup_free(void *addr, enum debug_obj_state state)
380 {
381 	struct work_struct *work = addr;
382 
383 	switch (state) {
384 	case ODEBUG_STATE_ACTIVE:
385 		cancel_work_sync(work);
386 		debug_object_free(work, &work_debug_descr);
387 		return 1;
388 	default:
389 		return 0;
390 	}
391 }
392 
393 static struct debug_obj_descr work_debug_descr = {
394 	.name		= "work_struct",
395 	.fixup_init	= work_fixup_init,
396 	.fixup_activate	= work_fixup_activate,
397 	.fixup_free	= work_fixup_free,
398 };
399 
400 static inline void debug_work_activate(struct work_struct *work)
401 {
402 	debug_object_activate(work, &work_debug_descr);
403 }
404 
405 static inline void debug_work_deactivate(struct work_struct *work)
406 {
407 	debug_object_deactivate(work, &work_debug_descr);
408 }
409 
410 void __init_work(struct work_struct *work, int onstack)
411 {
412 	if (onstack)
413 		debug_object_init_on_stack(work, &work_debug_descr);
414 	else
415 		debug_object_init(work, &work_debug_descr);
416 }
417 EXPORT_SYMBOL_GPL(__init_work);
418 
419 void destroy_work_on_stack(struct work_struct *work)
420 {
421 	debug_object_free(work, &work_debug_descr);
422 }
423 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
424 
425 #else
426 static inline void debug_work_activate(struct work_struct *work) { }
427 static inline void debug_work_deactivate(struct work_struct *work) { }
428 #endif
429 
430 /* Serializes the accesses to the list of workqueues. */
431 static DEFINE_SPINLOCK(workqueue_lock);
432 static LIST_HEAD(workqueues);
433 static bool workqueue_freezing;		/* W: have wqs started freezing? */
434 
435 /*
436  * The almighty global cpu workqueues.  nr_running is the only field
437  * which is expected to be used frequently by other cpus via
438  * try_to_wake_up().  Put it in a separate cacheline.
439  */
440 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
441 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
442 
443 /*
444  * Global cpu workqueue and nr_running counter for unbound gcwq.  The
445  * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
446  * workers have WORKER_UNBOUND set.
447  */
448 static struct global_cwq unbound_global_cwq;
449 static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);	/* always 0 */
450 
451 static int worker_thread(void *__worker);
452 
453 static struct global_cwq *get_gcwq(unsigned int cpu)
454 {
455 	if (cpu != WORK_CPU_UNBOUND)
456 		return &per_cpu(global_cwq, cpu);
457 	else
458 		return &unbound_global_cwq;
459 }
460 
461 static atomic_t *get_gcwq_nr_running(unsigned int cpu)
462 {
463 	if (cpu != WORK_CPU_UNBOUND)
464 		return &per_cpu(gcwq_nr_running, cpu);
465 	else
466 		return &unbound_gcwq_nr_running;
467 }
468 
469 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
470 					    struct workqueue_struct *wq)
471 {
472 	if (!(wq->flags & WQ_UNBOUND)) {
473 		if (likely(cpu < nr_cpu_ids)) {
474 #ifdef CONFIG_SMP
475 			return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
476 #else
477 			return wq->cpu_wq.single;
478 #endif
479 		}
480 	} else if (likely(cpu == WORK_CPU_UNBOUND))
481 		return wq->cpu_wq.single;
482 	return NULL;
483 }
484 
485 static unsigned int work_color_to_flags(int color)
486 {
487 	return color << WORK_STRUCT_COLOR_SHIFT;
488 }
489 
490 static int get_work_color(struct work_struct *work)
491 {
492 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
493 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
494 }
495 
496 static int work_next_color(int color)
497 {
498 	return (color + 1) % WORK_NR_COLORS;
499 }
500 
501 /*
502  * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
503  * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
504  * cleared and the work data contains the cpu number it was last on.
505  *
506  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
507  * cwq, cpu or clear work->data.  These functions should only be
508  * called while the work is owned - ie. while the PENDING bit is set.
509  *
510  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
511  * corresponding to a work.  gcwq is available once the work has been
512  * queued anywhere after initialization.  cwq is available only from
513  * queueing until execution starts.
514  */
515 static inline void set_work_data(struct work_struct *work, unsigned long data,
516 				 unsigned long flags)
517 {
518 	BUG_ON(!work_pending(work));
519 	atomic_long_set(&work->data, data | flags | work_static(work));
520 }
521 
522 static void set_work_cwq(struct work_struct *work,
523 			 struct cpu_workqueue_struct *cwq,
524 			 unsigned long extra_flags)
525 {
526 	set_work_data(work, (unsigned long)cwq,
527 		      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
528 }
529 
530 static void set_work_cpu(struct work_struct *work, unsigned int cpu)
531 {
532 	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
533 }
534 
535 static void clear_work_data(struct work_struct *work)
536 {
537 	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
538 }
539 
540 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
541 {
542 	unsigned long data = atomic_long_read(&work->data);
543 
544 	if (data & WORK_STRUCT_CWQ)
545 		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
546 	else
547 		return NULL;
548 }
549 
550 static struct global_cwq *get_work_gcwq(struct work_struct *work)
551 {
552 	unsigned long data = atomic_long_read(&work->data);
553 	unsigned int cpu;
554 
555 	if (data & WORK_STRUCT_CWQ)
556 		return ((struct cpu_workqueue_struct *)
557 			(data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
558 
559 	cpu = data >> WORK_STRUCT_FLAG_BITS;
560 	if (cpu == WORK_CPU_NONE)
561 		return NULL;
562 
563 	BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
564 	return get_gcwq(cpu);
565 }
566 
567 /*
568  * Policy functions.  These define the policies on how the global
569  * worker pool is managed.  Unless noted otherwise, these functions
570  * assume that they're being called with gcwq->lock held.
571  */
572 
573 static bool __need_more_worker(struct global_cwq *gcwq)
574 {
575 	return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
576 		gcwq->flags & GCWQ_HIGHPRI_PENDING;
577 }
578 
579 /*
580  * Need to wake up a worker?  Called from anything but currently
581  * running workers.
582  */
583 static bool need_more_worker(struct global_cwq *gcwq)
584 {
585 	return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
586 }
587 
588 /* Can I start working?  Called from busy but !running workers. */
589 static bool may_start_working(struct global_cwq *gcwq)
590 {
591 	return gcwq->nr_idle;
592 }
593 
594 /* Do I need to keep working?  Called from currently running workers. */
595 static bool keep_working(struct global_cwq *gcwq)
596 {
597 	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
598 
599 	return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
600 }
601 
602 /* Do we need a new worker?  Called from manager. */
603 static bool need_to_create_worker(struct global_cwq *gcwq)
604 {
605 	return need_more_worker(gcwq) && !may_start_working(gcwq);
606 }
607 
608 /* Do I need to be the manager? */
609 static bool need_to_manage_workers(struct global_cwq *gcwq)
610 {
611 	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
612 }
613 
614 /* Do we have too many workers and should some go away? */
615 static bool too_many_workers(struct global_cwq *gcwq)
616 {
617 	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
618 	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
619 	int nr_busy = gcwq->nr_workers - nr_idle;
620 
621 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
622 }
623 
624 /*
625  * Wake up functions.
626  */
627 
628 /* Return the first worker.  Safe with preemption disabled */
629 static struct worker *first_worker(struct global_cwq *gcwq)
630 {
631 	if (unlikely(list_empty(&gcwq->idle_list)))
632 		return NULL;
633 
634 	return list_first_entry(&gcwq->idle_list, struct worker, entry);
635 }
636 
637 /**
638  * wake_up_worker - wake up an idle worker
639  * @gcwq: gcwq to wake worker for
640  *
641  * Wake up the first idle worker of @gcwq.
642  *
643  * CONTEXT:
644  * spin_lock_irq(gcwq->lock).
645  */
646 static void wake_up_worker(struct global_cwq *gcwq)
647 {
648 	struct worker *worker = first_worker(gcwq);
649 
650 	if (likely(worker))
651 		wake_up_process(worker->task);
652 }
653 
654 /**
655  * wq_worker_waking_up - a worker is waking up
656  * @task: task waking up
657  * @cpu: CPU @task is waking up to
658  *
659  * This function is called during try_to_wake_up() when a worker is
660  * being awoken.
661  *
662  * CONTEXT:
663  * spin_lock_irq(rq->lock)
664  */
665 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
666 {
667 	struct worker *worker = kthread_data(task);
668 
669 	if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
670 		atomic_inc(get_gcwq_nr_running(cpu));
671 }
672 
673 /**
674  * wq_worker_sleeping - a worker is going to sleep
675  * @task: task going to sleep
676  * @cpu: CPU in question, must be the current CPU number
677  *
678  * This function is called during schedule() when a busy worker is
679  * going to sleep.  Worker on the same cpu can be woken up by
680  * returning pointer to its task.
681  *
682  * CONTEXT:
683  * spin_lock_irq(rq->lock)
684  *
685  * RETURNS:
686  * Worker task on @cpu to wake up, %NULL if none.
687  */
688 struct task_struct *wq_worker_sleeping(struct task_struct *task,
689 				       unsigned int cpu)
690 {
691 	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
692 	struct global_cwq *gcwq = get_gcwq(cpu);
693 	atomic_t *nr_running = get_gcwq_nr_running(cpu);
694 
695 	if (unlikely(worker->flags & WORKER_NOT_RUNNING))
696 		return NULL;
697 
698 	/* this can only happen on the local cpu */
699 	BUG_ON(cpu != raw_smp_processor_id());
700 
701 	/*
702 	 * The counterpart of the following dec_and_test, implied mb,
703 	 * worklist not empty test sequence is in insert_work().
704 	 * Please read comment there.
705 	 *
706 	 * NOT_RUNNING is clear.  This means that trustee is not in
707 	 * charge and we're running on the local cpu w/ rq lock held
708 	 * and preemption disabled, which in turn means that none else
709 	 * could be manipulating idle_list, so dereferencing idle_list
710 	 * without gcwq lock is safe.
711 	 */
712 	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
713 		to_wakeup = first_worker(gcwq);
714 	return to_wakeup ? to_wakeup->task : NULL;
715 }
716 
717 /**
718  * worker_set_flags - set worker flags and adjust nr_running accordingly
719  * @worker: self
720  * @flags: flags to set
721  * @wakeup: wakeup an idle worker if necessary
722  *
723  * Set @flags in @worker->flags and adjust nr_running accordingly.  If
724  * nr_running becomes zero and @wakeup is %true, an idle worker is
725  * woken up.
726  *
727  * CONTEXT:
728  * spin_lock_irq(gcwq->lock)
729  */
730 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
731 				    bool wakeup)
732 {
733 	struct global_cwq *gcwq = worker->gcwq;
734 
735 	WARN_ON_ONCE(worker->task != current);
736 
737 	/*
738 	 * If transitioning into NOT_RUNNING, adjust nr_running and
739 	 * wake up an idle worker as necessary if requested by
740 	 * @wakeup.
741 	 */
742 	if ((flags & WORKER_NOT_RUNNING) &&
743 	    !(worker->flags & WORKER_NOT_RUNNING)) {
744 		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
745 
746 		if (wakeup) {
747 			if (atomic_dec_and_test(nr_running) &&
748 			    !list_empty(&gcwq->worklist))
749 				wake_up_worker(gcwq);
750 		} else
751 			atomic_dec(nr_running);
752 	}
753 
754 	worker->flags |= flags;
755 }
756 
757 /**
758  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
759  * @worker: self
760  * @flags: flags to clear
761  *
762  * Clear @flags in @worker->flags and adjust nr_running accordingly.
763  *
764  * CONTEXT:
765  * spin_lock_irq(gcwq->lock)
766  */
767 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
768 {
769 	struct global_cwq *gcwq = worker->gcwq;
770 	unsigned int oflags = worker->flags;
771 
772 	WARN_ON_ONCE(worker->task != current);
773 
774 	worker->flags &= ~flags;
775 
776 	/* if transitioning out of NOT_RUNNING, increment nr_running */
777 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
778 		if (!(worker->flags & WORKER_NOT_RUNNING))
779 			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
780 }
781 
782 /**
783  * busy_worker_head - return the busy hash head for a work
784  * @gcwq: gcwq of interest
785  * @work: work to be hashed
786  *
787  * Return hash head of @gcwq for @work.
788  *
789  * CONTEXT:
790  * spin_lock_irq(gcwq->lock).
791  *
792  * RETURNS:
793  * Pointer to the hash head.
794  */
795 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
796 					   struct work_struct *work)
797 {
798 	const int base_shift = ilog2(sizeof(struct work_struct));
799 	unsigned long v = (unsigned long)work;
800 
801 	/* simple shift and fold hash, do we need something better? */
802 	v >>= base_shift;
803 	v += v >> BUSY_WORKER_HASH_ORDER;
804 	v &= BUSY_WORKER_HASH_MASK;
805 
806 	return &gcwq->busy_hash[v];
807 }
808 
809 /**
810  * __find_worker_executing_work - find worker which is executing a work
811  * @gcwq: gcwq of interest
812  * @bwh: hash head as returned by busy_worker_head()
813  * @work: work to find worker for
814  *
815  * Find a worker which is executing @work on @gcwq.  @bwh should be
816  * the hash head obtained by calling busy_worker_head() with the same
817  * work.
818  *
819  * CONTEXT:
820  * spin_lock_irq(gcwq->lock).
821  *
822  * RETURNS:
823  * Pointer to worker which is executing @work if found, NULL
824  * otherwise.
825  */
826 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
827 						   struct hlist_head *bwh,
828 						   struct work_struct *work)
829 {
830 	struct worker *worker;
831 	struct hlist_node *tmp;
832 
833 	hlist_for_each_entry(worker, tmp, bwh, hentry)
834 		if (worker->current_work == work)
835 			return worker;
836 	return NULL;
837 }
838 
839 /**
840  * find_worker_executing_work - find worker which is executing a work
841  * @gcwq: gcwq of interest
842  * @work: work to find worker for
843  *
844  * Find a worker which is executing @work on @gcwq.  This function is
845  * identical to __find_worker_executing_work() except that this
846  * function calculates @bwh itself.
847  *
848  * CONTEXT:
849  * spin_lock_irq(gcwq->lock).
850  *
851  * RETURNS:
852  * Pointer to worker which is executing @work if found, NULL
853  * otherwise.
854  */
855 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
856 						 struct work_struct *work)
857 {
858 	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
859 					    work);
860 }
861 
862 /**
863  * gcwq_determine_ins_pos - find insertion position
864  * @gcwq: gcwq of interest
865  * @cwq: cwq a work is being queued for
866  *
867  * A work for @cwq is about to be queued on @gcwq, determine insertion
868  * position for the work.  If @cwq is for HIGHPRI wq, the work is
869  * queued at the head of the queue but in FIFO order with respect to
870  * other HIGHPRI works; otherwise, at the end of the queue.  This
871  * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
872  * there are HIGHPRI works pending.
873  *
874  * CONTEXT:
875  * spin_lock_irq(gcwq->lock).
876  *
877  * RETURNS:
878  * Pointer to inserstion position.
879  */
880 static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
881 					       struct cpu_workqueue_struct *cwq)
882 {
883 	struct work_struct *twork;
884 
885 	if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
886 		return &gcwq->worklist;
887 
888 	list_for_each_entry(twork, &gcwq->worklist, entry) {
889 		struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
890 
891 		if (!(tcwq->wq->flags & WQ_HIGHPRI))
892 			break;
893 	}
894 
895 	gcwq->flags |= GCWQ_HIGHPRI_PENDING;
896 	return &twork->entry;
897 }
898 
899 /**
900  * insert_work - insert a work into gcwq
901  * @cwq: cwq @work belongs to
902  * @work: work to insert
903  * @head: insertion point
904  * @extra_flags: extra WORK_STRUCT_* flags to set
905  *
906  * Insert @work which belongs to @cwq into @gcwq after @head.
907  * @extra_flags is or'd to work_struct flags.
908  *
909  * CONTEXT:
910  * spin_lock_irq(gcwq->lock).
911  */
912 static void insert_work(struct cpu_workqueue_struct *cwq,
913 			struct work_struct *work, struct list_head *head,
914 			unsigned int extra_flags)
915 {
916 	struct global_cwq *gcwq = cwq->gcwq;
917 
918 	/* we own @work, set data and link */
919 	set_work_cwq(work, cwq, extra_flags);
920 
921 	/*
922 	 * Ensure that we get the right work->data if we see the
923 	 * result of list_add() below, see try_to_grab_pending().
924 	 */
925 	smp_wmb();
926 
927 	list_add_tail(&work->entry, head);
928 
929 	/*
930 	 * Ensure either worker_sched_deactivated() sees the above
931 	 * list_add_tail() or we see zero nr_running to avoid workers
932 	 * lying around lazily while there are works to be processed.
933 	 */
934 	smp_mb();
935 
936 	if (__need_more_worker(gcwq))
937 		wake_up_worker(gcwq);
938 }
939 
940 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
941 			 struct work_struct *work)
942 {
943 	struct global_cwq *gcwq;
944 	struct cpu_workqueue_struct *cwq;
945 	struct list_head *worklist;
946 	unsigned long flags;
947 
948 	debug_work_activate(work);
949 
950 	/* determine gcwq to use */
951 	if (!(wq->flags & WQ_UNBOUND)) {
952 		struct global_cwq *last_gcwq;
953 
954 		if (unlikely(cpu == WORK_CPU_UNBOUND))
955 			cpu = raw_smp_processor_id();
956 
957 		/*
958 		 * It's multi cpu.  If @wq is non-reentrant and @work
959 		 * was previously on a different cpu, it might still
960 		 * be running there, in which case the work needs to
961 		 * be queued on that cpu to guarantee non-reentrance.
962 		 */
963 		gcwq = get_gcwq(cpu);
964 		if (wq->flags & WQ_NON_REENTRANT &&
965 		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
966 			struct worker *worker;
967 
968 			spin_lock_irqsave(&last_gcwq->lock, flags);
969 
970 			worker = find_worker_executing_work(last_gcwq, work);
971 
972 			if (worker && worker->current_cwq->wq == wq)
973 				gcwq = last_gcwq;
974 			else {
975 				/* meh... not running there, queue here */
976 				spin_unlock_irqrestore(&last_gcwq->lock, flags);
977 				spin_lock_irqsave(&gcwq->lock, flags);
978 			}
979 		} else
980 			spin_lock_irqsave(&gcwq->lock, flags);
981 	} else {
982 		gcwq = get_gcwq(WORK_CPU_UNBOUND);
983 		spin_lock_irqsave(&gcwq->lock, flags);
984 	}
985 
986 	/* gcwq determined, get cwq and queue */
987 	cwq = get_cwq(gcwq->cpu, wq);
988 
989 	BUG_ON(!list_empty(&work->entry));
990 
991 	cwq->nr_in_flight[cwq->work_color]++;
992 
993 	if (likely(cwq->nr_active < cwq->max_active)) {
994 		cwq->nr_active++;
995 		worklist = gcwq_determine_ins_pos(gcwq, cwq);
996 	} else
997 		worklist = &cwq->delayed_works;
998 
999 	insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
1000 
1001 	spin_unlock_irqrestore(&gcwq->lock, flags);
1002 }
1003 
1004 /**
1005  * queue_work - queue work on a workqueue
1006  * @wq: workqueue to use
1007  * @work: work to queue
1008  *
1009  * Returns 0 if @work was already on a queue, non-zero otherwise.
1010  *
1011  * We queue the work to the CPU on which it was submitted, but if the CPU dies
1012  * it can be processed by another CPU.
1013  */
1014 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1015 {
1016 	int ret;
1017 
1018 	ret = queue_work_on(get_cpu(), wq, work);
1019 	put_cpu();
1020 
1021 	return ret;
1022 }
1023 EXPORT_SYMBOL_GPL(queue_work);
1024 
1025 /**
1026  * queue_work_on - queue work on specific cpu
1027  * @cpu: CPU number to execute work on
1028  * @wq: workqueue to use
1029  * @work: work to queue
1030  *
1031  * Returns 0 if @work was already on a queue, non-zero otherwise.
1032  *
1033  * We queue the work to a specific CPU, the caller must ensure it
1034  * can't go away.
1035  */
1036 int
1037 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1038 {
1039 	int ret = 0;
1040 
1041 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1042 		__queue_work(cpu, wq, work);
1043 		ret = 1;
1044 	}
1045 	return ret;
1046 }
1047 EXPORT_SYMBOL_GPL(queue_work_on);
1048 
1049 static void delayed_work_timer_fn(unsigned long __data)
1050 {
1051 	struct delayed_work *dwork = (struct delayed_work *)__data;
1052 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1053 
1054 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1055 }
1056 
1057 /**
1058  * queue_delayed_work - queue work on a workqueue after delay
1059  * @wq: workqueue to use
1060  * @dwork: delayable work to queue
1061  * @delay: number of jiffies to wait before queueing
1062  *
1063  * Returns 0 if @work was already on a queue, non-zero otherwise.
1064  */
1065 int queue_delayed_work(struct workqueue_struct *wq,
1066 			struct delayed_work *dwork, unsigned long delay)
1067 {
1068 	if (delay == 0)
1069 		return queue_work(wq, &dwork->work);
1070 
1071 	return queue_delayed_work_on(-1, wq, dwork, delay);
1072 }
1073 EXPORT_SYMBOL_GPL(queue_delayed_work);
1074 
1075 /**
1076  * queue_delayed_work_on - queue work on specific CPU after delay
1077  * @cpu: CPU number to execute work on
1078  * @wq: workqueue to use
1079  * @dwork: work to queue
1080  * @delay: number of jiffies to wait before queueing
1081  *
1082  * Returns 0 if @work was already on a queue, non-zero otherwise.
1083  */
1084 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1085 			struct delayed_work *dwork, unsigned long delay)
1086 {
1087 	int ret = 0;
1088 	struct timer_list *timer = &dwork->timer;
1089 	struct work_struct *work = &dwork->work;
1090 
1091 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1092 		unsigned int lcpu;
1093 
1094 		BUG_ON(timer_pending(timer));
1095 		BUG_ON(!list_empty(&work->entry));
1096 
1097 		timer_stats_timer_set_start_info(&dwork->timer);
1098 
1099 		/*
1100 		 * This stores cwq for the moment, for the timer_fn.
1101 		 * Note that the work's gcwq is preserved to allow
1102 		 * reentrance detection for delayed works.
1103 		 */
1104 		if (!(wq->flags & WQ_UNBOUND)) {
1105 			struct global_cwq *gcwq = get_work_gcwq(work);
1106 
1107 			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1108 				lcpu = gcwq->cpu;
1109 			else
1110 				lcpu = raw_smp_processor_id();
1111 		} else
1112 			lcpu = WORK_CPU_UNBOUND;
1113 
1114 		set_work_cwq(work, get_cwq(lcpu, wq), 0);
1115 
1116 		timer->expires = jiffies + delay;
1117 		timer->data = (unsigned long)dwork;
1118 		timer->function = delayed_work_timer_fn;
1119 
1120 		if (unlikely(cpu >= 0))
1121 			add_timer_on(timer, cpu);
1122 		else
1123 			add_timer(timer);
1124 		ret = 1;
1125 	}
1126 	return ret;
1127 }
1128 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1129 
1130 /**
1131  * worker_enter_idle - enter idle state
1132  * @worker: worker which is entering idle state
1133  *
1134  * @worker is entering idle state.  Update stats and idle timer if
1135  * necessary.
1136  *
1137  * LOCKING:
1138  * spin_lock_irq(gcwq->lock).
1139  */
1140 static void worker_enter_idle(struct worker *worker)
1141 {
1142 	struct global_cwq *gcwq = worker->gcwq;
1143 
1144 	BUG_ON(worker->flags & WORKER_IDLE);
1145 	BUG_ON(!list_empty(&worker->entry) &&
1146 	       (worker->hentry.next || worker->hentry.pprev));
1147 
1148 	/* can't use worker_set_flags(), also called from start_worker() */
1149 	worker->flags |= WORKER_IDLE;
1150 	gcwq->nr_idle++;
1151 	worker->last_active = jiffies;
1152 
1153 	/* idle_list is LIFO */
1154 	list_add(&worker->entry, &gcwq->idle_list);
1155 
1156 	if (likely(!(worker->flags & WORKER_ROGUE))) {
1157 		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1158 			mod_timer(&gcwq->idle_timer,
1159 				  jiffies + IDLE_WORKER_TIMEOUT);
1160 	} else
1161 		wake_up_all(&gcwq->trustee_wait);
1162 
1163 	/* sanity check nr_running */
1164 	WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1165 		     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1166 }
1167 
1168 /**
1169  * worker_leave_idle - leave idle state
1170  * @worker: worker which is leaving idle state
1171  *
1172  * @worker is leaving idle state.  Update stats.
1173  *
1174  * LOCKING:
1175  * spin_lock_irq(gcwq->lock).
1176  */
1177 static void worker_leave_idle(struct worker *worker)
1178 {
1179 	struct global_cwq *gcwq = worker->gcwq;
1180 
1181 	BUG_ON(!(worker->flags & WORKER_IDLE));
1182 	worker_clr_flags(worker, WORKER_IDLE);
1183 	gcwq->nr_idle--;
1184 	list_del_init(&worker->entry);
1185 }
1186 
1187 /**
1188  * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1189  * @worker: self
1190  *
1191  * Works which are scheduled while the cpu is online must at least be
1192  * scheduled to a worker which is bound to the cpu so that if they are
1193  * flushed from cpu callbacks while cpu is going down, they are
1194  * guaranteed to execute on the cpu.
1195  *
1196  * This function is to be used by rogue workers and rescuers to bind
1197  * themselves to the target cpu and may race with cpu going down or
1198  * coming online.  kthread_bind() can't be used because it may put the
1199  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1200  * verbatim as it's best effort and blocking and gcwq may be
1201  * [dis]associated in the meantime.
1202  *
1203  * This function tries set_cpus_allowed() and locks gcwq and verifies
1204  * the binding against GCWQ_DISASSOCIATED which is set during
1205  * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1206  * idle state or fetches works without dropping lock, it can guarantee
1207  * the scheduling requirement described in the first paragraph.
1208  *
1209  * CONTEXT:
1210  * Might sleep.  Called without any lock but returns with gcwq->lock
1211  * held.
1212  *
1213  * RETURNS:
1214  * %true if the associated gcwq is online (@worker is successfully
1215  * bound), %false if offline.
1216  */
1217 static bool worker_maybe_bind_and_lock(struct worker *worker)
1218 {
1219 	struct global_cwq *gcwq = worker->gcwq;
1220 	struct task_struct *task = worker->task;
1221 
1222 	while (true) {
1223 		/*
1224 		 * The following call may fail, succeed or succeed
1225 		 * without actually migrating the task to the cpu if
1226 		 * it races with cpu hotunplug operation.  Verify
1227 		 * against GCWQ_DISASSOCIATED.
1228 		 */
1229 		if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1230 			set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1231 
1232 		spin_lock_irq(&gcwq->lock);
1233 		if (gcwq->flags & GCWQ_DISASSOCIATED)
1234 			return false;
1235 		if (task_cpu(task) == gcwq->cpu &&
1236 		    cpumask_equal(&current->cpus_allowed,
1237 				  get_cpu_mask(gcwq->cpu)))
1238 			return true;
1239 		spin_unlock_irq(&gcwq->lock);
1240 
1241 		/* CPU has come up inbetween, retry migration */
1242 		cpu_relax();
1243 	}
1244 }
1245 
1246 /*
1247  * Function for worker->rebind_work used to rebind rogue busy workers
1248  * to the associated cpu which is coming back online.  This is
1249  * scheduled by cpu up but can race with other cpu hotplug operations
1250  * and may be executed twice without intervening cpu down.
1251  */
1252 static void worker_rebind_fn(struct work_struct *work)
1253 {
1254 	struct worker *worker = container_of(work, struct worker, rebind_work);
1255 	struct global_cwq *gcwq = worker->gcwq;
1256 
1257 	if (worker_maybe_bind_and_lock(worker))
1258 		worker_clr_flags(worker, WORKER_REBIND);
1259 
1260 	spin_unlock_irq(&gcwq->lock);
1261 }
1262 
1263 static struct worker *alloc_worker(void)
1264 {
1265 	struct worker *worker;
1266 
1267 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1268 	if (worker) {
1269 		INIT_LIST_HEAD(&worker->entry);
1270 		INIT_LIST_HEAD(&worker->scheduled);
1271 		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1272 		/* on creation a worker is in !idle && prep state */
1273 		worker->flags = WORKER_PREP;
1274 	}
1275 	return worker;
1276 }
1277 
1278 /**
1279  * create_worker - create a new workqueue worker
1280  * @gcwq: gcwq the new worker will belong to
1281  * @bind: whether to set affinity to @cpu or not
1282  *
1283  * Create a new worker which is bound to @gcwq.  The returned worker
1284  * can be started by calling start_worker() or destroyed using
1285  * destroy_worker().
1286  *
1287  * CONTEXT:
1288  * Might sleep.  Does GFP_KERNEL allocations.
1289  *
1290  * RETURNS:
1291  * Pointer to the newly created worker.
1292  */
1293 static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1294 {
1295 	bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1296 	struct worker *worker = NULL;
1297 	int id = -1;
1298 
1299 	spin_lock_irq(&gcwq->lock);
1300 	while (ida_get_new(&gcwq->worker_ida, &id)) {
1301 		spin_unlock_irq(&gcwq->lock);
1302 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1303 			goto fail;
1304 		spin_lock_irq(&gcwq->lock);
1305 	}
1306 	spin_unlock_irq(&gcwq->lock);
1307 
1308 	worker = alloc_worker();
1309 	if (!worker)
1310 		goto fail;
1311 
1312 	worker->gcwq = gcwq;
1313 	worker->id = id;
1314 
1315 	if (!on_unbound_cpu)
1316 		worker->task = kthread_create(worker_thread, worker,
1317 					      "kworker/%u:%d", gcwq->cpu, id);
1318 	else
1319 		worker->task = kthread_create(worker_thread, worker,
1320 					      "kworker/u:%d", id);
1321 	if (IS_ERR(worker->task))
1322 		goto fail;
1323 
1324 	/*
1325 	 * A rogue worker will become a regular one if CPU comes
1326 	 * online later on.  Make sure every worker has
1327 	 * PF_THREAD_BOUND set.
1328 	 */
1329 	if (bind && !on_unbound_cpu)
1330 		kthread_bind(worker->task, gcwq->cpu);
1331 	else {
1332 		worker->task->flags |= PF_THREAD_BOUND;
1333 		if (on_unbound_cpu)
1334 			worker->flags |= WORKER_UNBOUND;
1335 	}
1336 
1337 	return worker;
1338 fail:
1339 	if (id >= 0) {
1340 		spin_lock_irq(&gcwq->lock);
1341 		ida_remove(&gcwq->worker_ida, id);
1342 		spin_unlock_irq(&gcwq->lock);
1343 	}
1344 	kfree(worker);
1345 	return NULL;
1346 }
1347 
1348 /**
1349  * start_worker - start a newly created worker
1350  * @worker: worker to start
1351  *
1352  * Make the gcwq aware of @worker and start it.
1353  *
1354  * CONTEXT:
1355  * spin_lock_irq(gcwq->lock).
1356  */
1357 static void start_worker(struct worker *worker)
1358 {
1359 	worker->flags |= WORKER_STARTED;
1360 	worker->gcwq->nr_workers++;
1361 	worker_enter_idle(worker);
1362 	wake_up_process(worker->task);
1363 }
1364 
1365 /**
1366  * destroy_worker - destroy a workqueue worker
1367  * @worker: worker to be destroyed
1368  *
1369  * Destroy @worker and adjust @gcwq stats accordingly.
1370  *
1371  * CONTEXT:
1372  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1373  */
1374 static void destroy_worker(struct worker *worker)
1375 {
1376 	struct global_cwq *gcwq = worker->gcwq;
1377 	int id = worker->id;
1378 
1379 	/* sanity check frenzy */
1380 	BUG_ON(worker->current_work);
1381 	BUG_ON(!list_empty(&worker->scheduled));
1382 
1383 	if (worker->flags & WORKER_STARTED)
1384 		gcwq->nr_workers--;
1385 	if (worker->flags & WORKER_IDLE)
1386 		gcwq->nr_idle--;
1387 
1388 	list_del_init(&worker->entry);
1389 	worker->flags |= WORKER_DIE;
1390 
1391 	spin_unlock_irq(&gcwq->lock);
1392 
1393 	kthread_stop(worker->task);
1394 	kfree(worker);
1395 
1396 	spin_lock_irq(&gcwq->lock);
1397 	ida_remove(&gcwq->worker_ida, id);
1398 }
1399 
1400 static void idle_worker_timeout(unsigned long __gcwq)
1401 {
1402 	struct global_cwq *gcwq = (void *)__gcwq;
1403 
1404 	spin_lock_irq(&gcwq->lock);
1405 
1406 	if (too_many_workers(gcwq)) {
1407 		struct worker *worker;
1408 		unsigned long expires;
1409 
1410 		/* idle_list is kept in LIFO order, check the last one */
1411 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1412 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1413 
1414 		if (time_before(jiffies, expires))
1415 			mod_timer(&gcwq->idle_timer, expires);
1416 		else {
1417 			/* it's been idle for too long, wake up manager */
1418 			gcwq->flags |= GCWQ_MANAGE_WORKERS;
1419 			wake_up_worker(gcwq);
1420 		}
1421 	}
1422 
1423 	spin_unlock_irq(&gcwq->lock);
1424 }
1425 
1426 static bool send_mayday(struct work_struct *work)
1427 {
1428 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1429 	struct workqueue_struct *wq = cwq->wq;
1430 	unsigned int cpu;
1431 
1432 	if (!(wq->flags & WQ_RESCUER))
1433 		return false;
1434 
1435 	/* mayday mayday mayday */
1436 	cpu = cwq->gcwq->cpu;
1437 	/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1438 	if (cpu == WORK_CPU_UNBOUND)
1439 		cpu = 0;
1440 	if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1441 		wake_up_process(wq->rescuer->task);
1442 	return true;
1443 }
1444 
1445 static void gcwq_mayday_timeout(unsigned long __gcwq)
1446 {
1447 	struct global_cwq *gcwq = (void *)__gcwq;
1448 	struct work_struct *work;
1449 
1450 	spin_lock_irq(&gcwq->lock);
1451 
1452 	if (need_to_create_worker(gcwq)) {
1453 		/*
1454 		 * We've been trying to create a new worker but
1455 		 * haven't been successful.  We might be hitting an
1456 		 * allocation deadlock.  Send distress signals to
1457 		 * rescuers.
1458 		 */
1459 		list_for_each_entry(work, &gcwq->worklist, entry)
1460 			send_mayday(work);
1461 	}
1462 
1463 	spin_unlock_irq(&gcwq->lock);
1464 
1465 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1466 }
1467 
1468 /**
1469  * maybe_create_worker - create a new worker if necessary
1470  * @gcwq: gcwq to create a new worker for
1471  *
1472  * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1473  * have at least one idle worker on return from this function.  If
1474  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1475  * sent to all rescuers with works scheduled on @gcwq to resolve
1476  * possible allocation deadlock.
1477  *
1478  * On return, need_to_create_worker() is guaranteed to be false and
1479  * may_start_working() true.
1480  *
1481  * LOCKING:
1482  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1483  * multiple times.  Does GFP_KERNEL allocations.  Called only from
1484  * manager.
1485  *
1486  * RETURNS:
1487  * false if no action was taken and gcwq->lock stayed locked, true
1488  * otherwise.
1489  */
1490 static bool maybe_create_worker(struct global_cwq *gcwq)
1491 {
1492 	if (!need_to_create_worker(gcwq))
1493 		return false;
1494 restart:
1495 	spin_unlock_irq(&gcwq->lock);
1496 
1497 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1498 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1499 
1500 	while (true) {
1501 		struct worker *worker;
1502 
1503 		worker = create_worker(gcwq, true);
1504 		if (worker) {
1505 			del_timer_sync(&gcwq->mayday_timer);
1506 			spin_lock_irq(&gcwq->lock);
1507 			start_worker(worker);
1508 			BUG_ON(need_to_create_worker(gcwq));
1509 			return true;
1510 		}
1511 
1512 		if (!need_to_create_worker(gcwq))
1513 			break;
1514 
1515 		__set_current_state(TASK_INTERRUPTIBLE);
1516 		schedule_timeout(CREATE_COOLDOWN);
1517 
1518 		if (!need_to_create_worker(gcwq))
1519 			break;
1520 	}
1521 
1522 	del_timer_sync(&gcwq->mayday_timer);
1523 	spin_lock_irq(&gcwq->lock);
1524 	if (need_to_create_worker(gcwq))
1525 		goto restart;
1526 	return true;
1527 }
1528 
1529 /**
1530  * maybe_destroy_worker - destroy workers which have been idle for a while
1531  * @gcwq: gcwq to destroy workers for
1532  *
1533  * Destroy @gcwq workers which have been idle for longer than
1534  * IDLE_WORKER_TIMEOUT.
1535  *
1536  * LOCKING:
1537  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1538  * multiple times.  Called only from manager.
1539  *
1540  * RETURNS:
1541  * false if no action was taken and gcwq->lock stayed locked, true
1542  * otherwise.
1543  */
1544 static bool maybe_destroy_workers(struct global_cwq *gcwq)
1545 {
1546 	bool ret = false;
1547 
1548 	while (too_many_workers(gcwq)) {
1549 		struct worker *worker;
1550 		unsigned long expires;
1551 
1552 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1553 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1554 
1555 		if (time_before(jiffies, expires)) {
1556 			mod_timer(&gcwq->idle_timer, expires);
1557 			break;
1558 		}
1559 
1560 		destroy_worker(worker);
1561 		ret = true;
1562 	}
1563 
1564 	return ret;
1565 }
1566 
1567 /**
1568  * manage_workers - manage worker pool
1569  * @worker: self
1570  *
1571  * Assume the manager role and manage gcwq worker pool @worker belongs
1572  * to.  At any given time, there can be only zero or one manager per
1573  * gcwq.  The exclusion is handled automatically by this function.
1574  *
1575  * The caller can safely start processing works on false return.  On
1576  * true return, it's guaranteed that need_to_create_worker() is false
1577  * and may_start_working() is true.
1578  *
1579  * CONTEXT:
1580  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1581  * multiple times.  Does GFP_KERNEL allocations.
1582  *
1583  * RETURNS:
1584  * false if no action was taken and gcwq->lock stayed locked, true if
1585  * some action was taken.
1586  */
1587 static bool manage_workers(struct worker *worker)
1588 {
1589 	struct global_cwq *gcwq = worker->gcwq;
1590 	bool ret = false;
1591 
1592 	if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1593 		return ret;
1594 
1595 	gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1596 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
1597 
1598 	/*
1599 	 * Destroy and then create so that may_start_working() is true
1600 	 * on return.
1601 	 */
1602 	ret |= maybe_destroy_workers(gcwq);
1603 	ret |= maybe_create_worker(gcwq);
1604 
1605 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1606 
1607 	/*
1608 	 * The trustee might be waiting to take over the manager
1609 	 * position, tell it we're done.
1610 	 */
1611 	if (unlikely(gcwq->trustee))
1612 		wake_up_all(&gcwq->trustee_wait);
1613 
1614 	return ret;
1615 }
1616 
1617 /**
1618  * move_linked_works - move linked works to a list
1619  * @work: start of series of works to be scheduled
1620  * @head: target list to append @work to
1621  * @nextp: out paramter for nested worklist walking
1622  *
1623  * Schedule linked works starting from @work to @head.  Work series to
1624  * be scheduled starts at @work and includes any consecutive work with
1625  * WORK_STRUCT_LINKED set in its predecessor.
1626  *
1627  * If @nextp is not NULL, it's updated to point to the next work of
1628  * the last scheduled work.  This allows move_linked_works() to be
1629  * nested inside outer list_for_each_entry_safe().
1630  *
1631  * CONTEXT:
1632  * spin_lock_irq(gcwq->lock).
1633  */
1634 static void move_linked_works(struct work_struct *work, struct list_head *head,
1635 			      struct work_struct **nextp)
1636 {
1637 	struct work_struct *n;
1638 
1639 	/*
1640 	 * Linked worklist will always end before the end of the list,
1641 	 * use NULL for list head.
1642 	 */
1643 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1644 		list_move_tail(&work->entry, head);
1645 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1646 			break;
1647 	}
1648 
1649 	/*
1650 	 * If we're already inside safe list traversal and have moved
1651 	 * multiple works to the scheduled queue, the next position
1652 	 * needs to be updated.
1653 	 */
1654 	if (nextp)
1655 		*nextp = n;
1656 }
1657 
1658 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1659 {
1660 	struct work_struct *work = list_first_entry(&cwq->delayed_works,
1661 						    struct work_struct, entry);
1662 	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1663 
1664 	move_linked_works(work, pos, NULL);
1665 	cwq->nr_active++;
1666 }
1667 
1668 /**
1669  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1670  * @cwq: cwq of interest
1671  * @color: color of work which left the queue
1672  *
1673  * A work either has completed or is removed from pending queue,
1674  * decrement nr_in_flight of its cwq and handle workqueue flushing.
1675  *
1676  * CONTEXT:
1677  * spin_lock_irq(gcwq->lock).
1678  */
1679 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1680 {
1681 	/* ignore uncolored works */
1682 	if (color == WORK_NO_COLOR)
1683 		return;
1684 
1685 	cwq->nr_in_flight[color]--;
1686 	cwq->nr_active--;
1687 
1688 	if (!list_empty(&cwq->delayed_works)) {
1689 		/* one down, submit a delayed one */
1690 		if (cwq->nr_active < cwq->max_active)
1691 			cwq_activate_first_delayed(cwq);
1692 	}
1693 
1694 	/* is flush in progress and are we at the flushing tip? */
1695 	if (likely(cwq->flush_color != color))
1696 		return;
1697 
1698 	/* are there still in-flight works? */
1699 	if (cwq->nr_in_flight[color])
1700 		return;
1701 
1702 	/* this cwq is done, clear flush_color */
1703 	cwq->flush_color = -1;
1704 
1705 	/*
1706 	 * If this was the last cwq, wake up the first flusher.  It
1707 	 * will handle the rest.
1708 	 */
1709 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1710 		complete(&cwq->wq->first_flusher->done);
1711 }
1712 
1713 /**
1714  * process_one_work - process single work
1715  * @worker: self
1716  * @work: work to process
1717  *
1718  * Process @work.  This function contains all the logics necessary to
1719  * process a single work including synchronization against and
1720  * interaction with other workers on the same cpu, queueing and
1721  * flushing.  As long as context requirement is met, any worker can
1722  * call this function to process a work.
1723  *
1724  * CONTEXT:
1725  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1726  */
1727 static void process_one_work(struct worker *worker, struct work_struct *work)
1728 {
1729 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1730 	struct global_cwq *gcwq = cwq->gcwq;
1731 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
1732 	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1733 	work_func_t f = work->func;
1734 	int work_color;
1735 	struct worker *collision;
1736 #ifdef CONFIG_LOCKDEP
1737 	/*
1738 	 * It is permissible to free the struct work_struct from
1739 	 * inside the function that is called from it, this we need to
1740 	 * take into account for lockdep too.  To avoid bogus "held
1741 	 * lock freed" warnings as well as problems when looking into
1742 	 * work->lockdep_map, make a copy and use that here.
1743 	 */
1744 	struct lockdep_map lockdep_map = work->lockdep_map;
1745 #endif
1746 	/*
1747 	 * A single work shouldn't be executed concurrently by
1748 	 * multiple workers on a single cpu.  Check whether anyone is
1749 	 * already processing the work.  If so, defer the work to the
1750 	 * currently executing one.
1751 	 */
1752 	collision = __find_worker_executing_work(gcwq, bwh, work);
1753 	if (unlikely(collision)) {
1754 		move_linked_works(work, &collision->scheduled, NULL);
1755 		return;
1756 	}
1757 
1758 	/* claim and process */
1759 	debug_work_deactivate(work);
1760 	hlist_add_head(&worker->hentry, bwh);
1761 	worker->current_work = work;
1762 	worker->current_cwq = cwq;
1763 	work_color = get_work_color(work);
1764 
1765 	/* record the current cpu number in the work data and dequeue */
1766 	set_work_cpu(work, gcwq->cpu);
1767 	list_del_init(&work->entry);
1768 
1769 	/*
1770 	 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1771 	 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1772 	 */
1773 	if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1774 		struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1775 						struct work_struct, entry);
1776 
1777 		if (!list_empty(&gcwq->worklist) &&
1778 		    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1779 			wake_up_worker(gcwq);
1780 		else
1781 			gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1782 	}
1783 
1784 	/*
1785 	 * CPU intensive works don't participate in concurrency
1786 	 * management.  They're the scheduler's responsibility.
1787 	 */
1788 	if (unlikely(cpu_intensive))
1789 		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1790 
1791 	spin_unlock_irq(&gcwq->lock);
1792 
1793 	work_clear_pending(work);
1794 	lock_map_acquire(&cwq->wq->lockdep_map);
1795 	lock_map_acquire(&lockdep_map);
1796 	trace_workqueue_execute_start(work);
1797 	f(work);
1798 	/*
1799 	 * While we must be careful to not use "work" after this, the trace
1800 	 * point will only record its address.
1801 	 */
1802 	trace_workqueue_execute_end(work);
1803 	lock_map_release(&lockdep_map);
1804 	lock_map_release(&cwq->wq->lockdep_map);
1805 
1806 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1807 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1808 		       "%s/0x%08x/%d\n",
1809 		       current->comm, preempt_count(), task_pid_nr(current));
1810 		printk(KERN_ERR "    last function: ");
1811 		print_symbol("%s\n", (unsigned long)f);
1812 		debug_show_held_locks(current);
1813 		dump_stack();
1814 	}
1815 
1816 	spin_lock_irq(&gcwq->lock);
1817 
1818 	/* clear cpu intensive status */
1819 	if (unlikely(cpu_intensive))
1820 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1821 
1822 	/* we're done with it, release */
1823 	hlist_del_init(&worker->hentry);
1824 	worker->current_work = NULL;
1825 	worker->current_cwq = NULL;
1826 	cwq_dec_nr_in_flight(cwq, work_color);
1827 }
1828 
1829 /**
1830  * process_scheduled_works - process scheduled works
1831  * @worker: self
1832  *
1833  * Process all scheduled works.  Please note that the scheduled list
1834  * may change while processing a work, so this function repeatedly
1835  * fetches a work from the top and executes it.
1836  *
1837  * CONTEXT:
1838  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1839  * multiple times.
1840  */
1841 static void process_scheduled_works(struct worker *worker)
1842 {
1843 	while (!list_empty(&worker->scheduled)) {
1844 		struct work_struct *work = list_first_entry(&worker->scheduled,
1845 						struct work_struct, entry);
1846 		process_one_work(worker, work);
1847 	}
1848 }
1849 
1850 /**
1851  * worker_thread - the worker thread function
1852  * @__worker: self
1853  *
1854  * The gcwq worker thread function.  There's a single dynamic pool of
1855  * these per each cpu.  These workers process all works regardless of
1856  * their specific target workqueue.  The only exception is works which
1857  * belong to workqueues with a rescuer which will be explained in
1858  * rescuer_thread().
1859  */
1860 static int worker_thread(void *__worker)
1861 {
1862 	struct worker *worker = __worker;
1863 	struct global_cwq *gcwq = worker->gcwq;
1864 
1865 	/* tell the scheduler that this is a workqueue worker */
1866 	worker->task->flags |= PF_WQ_WORKER;
1867 woke_up:
1868 	spin_lock_irq(&gcwq->lock);
1869 
1870 	/* DIE can be set only while we're idle, checking here is enough */
1871 	if (worker->flags & WORKER_DIE) {
1872 		spin_unlock_irq(&gcwq->lock);
1873 		worker->task->flags &= ~PF_WQ_WORKER;
1874 		return 0;
1875 	}
1876 
1877 	worker_leave_idle(worker);
1878 recheck:
1879 	/* no more worker necessary? */
1880 	if (!need_more_worker(gcwq))
1881 		goto sleep;
1882 
1883 	/* do we need to manage? */
1884 	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1885 		goto recheck;
1886 
1887 	/*
1888 	 * ->scheduled list can only be filled while a worker is
1889 	 * preparing to process a work or actually processing it.
1890 	 * Make sure nobody diddled with it while I was sleeping.
1891 	 */
1892 	BUG_ON(!list_empty(&worker->scheduled));
1893 
1894 	/*
1895 	 * When control reaches this point, we're guaranteed to have
1896 	 * at least one idle worker or that someone else has already
1897 	 * assumed the manager role.
1898 	 */
1899 	worker_clr_flags(worker, WORKER_PREP);
1900 
1901 	do {
1902 		struct work_struct *work =
1903 			list_first_entry(&gcwq->worklist,
1904 					 struct work_struct, entry);
1905 
1906 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1907 			/* optimization path, not strictly necessary */
1908 			process_one_work(worker, work);
1909 			if (unlikely(!list_empty(&worker->scheduled)))
1910 				process_scheduled_works(worker);
1911 		} else {
1912 			move_linked_works(work, &worker->scheduled, NULL);
1913 			process_scheduled_works(worker);
1914 		}
1915 	} while (keep_working(gcwq));
1916 
1917 	worker_set_flags(worker, WORKER_PREP, false);
1918 sleep:
1919 	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1920 		goto recheck;
1921 
1922 	/*
1923 	 * gcwq->lock is held and there's no work to process and no
1924 	 * need to manage, sleep.  Workers are woken up only while
1925 	 * holding gcwq->lock or from local cpu, so setting the
1926 	 * current state before releasing gcwq->lock is enough to
1927 	 * prevent losing any event.
1928 	 */
1929 	worker_enter_idle(worker);
1930 	__set_current_state(TASK_INTERRUPTIBLE);
1931 	spin_unlock_irq(&gcwq->lock);
1932 	schedule();
1933 	goto woke_up;
1934 }
1935 
1936 /**
1937  * rescuer_thread - the rescuer thread function
1938  * @__wq: the associated workqueue
1939  *
1940  * Workqueue rescuer thread function.  There's one rescuer for each
1941  * workqueue which has WQ_RESCUER set.
1942  *
1943  * Regular work processing on a gcwq may block trying to create a new
1944  * worker which uses GFP_KERNEL allocation which has slight chance of
1945  * developing into deadlock if some works currently on the same queue
1946  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
1947  * the problem rescuer solves.
1948  *
1949  * When such condition is possible, the gcwq summons rescuers of all
1950  * workqueues which have works queued on the gcwq and let them process
1951  * those works so that forward progress can be guaranteed.
1952  *
1953  * This should happen rarely.
1954  */
1955 static int rescuer_thread(void *__wq)
1956 {
1957 	struct workqueue_struct *wq = __wq;
1958 	struct worker *rescuer = wq->rescuer;
1959 	struct list_head *scheduled = &rescuer->scheduled;
1960 	bool is_unbound = wq->flags & WQ_UNBOUND;
1961 	unsigned int cpu;
1962 
1963 	set_user_nice(current, RESCUER_NICE_LEVEL);
1964 repeat:
1965 	set_current_state(TASK_INTERRUPTIBLE);
1966 
1967 	if (kthread_should_stop())
1968 		return 0;
1969 
1970 	/*
1971 	 * See whether any cpu is asking for help.  Unbounded
1972 	 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
1973 	 */
1974 	for_each_mayday_cpu(cpu, wq->mayday_mask) {
1975 		unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
1976 		struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
1977 		struct global_cwq *gcwq = cwq->gcwq;
1978 		struct work_struct *work, *n;
1979 
1980 		__set_current_state(TASK_RUNNING);
1981 		mayday_clear_cpu(cpu, wq->mayday_mask);
1982 
1983 		/* migrate to the target cpu if possible */
1984 		rescuer->gcwq = gcwq;
1985 		worker_maybe_bind_and_lock(rescuer);
1986 
1987 		/*
1988 		 * Slurp in all works issued via this workqueue and
1989 		 * process'em.
1990 		 */
1991 		BUG_ON(!list_empty(&rescuer->scheduled));
1992 		list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
1993 			if (get_work_cwq(work) == cwq)
1994 				move_linked_works(work, scheduled, &n);
1995 
1996 		process_scheduled_works(rescuer);
1997 		spin_unlock_irq(&gcwq->lock);
1998 	}
1999 
2000 	schedule();
2001 	goto repeat;
2002 }
2003 
2004 struct wq_barrier {
2005 	struct work_struct	work;
2006 	struct completion	done;
2007 };
2008 
2009 static void wq_barrier_func(struct work_struct *work)
2010 {
2011 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2012 	complete(&barr->done);
2013 }
2014 
2015 /**
2016  * insert_wq_barrier - insert a barrier work
2017  * @cwq: cwq to insert barrier into
2018  * @barr: wq_barrier to insert
2019  * @target: target work to attach @barr to
2020  * @worker: worker currently executing @target, NULL if @target is not executing
2021  *
2022  * @barr is linked to @target such that @barr is completed only after
2023  * @target finishes execution.  Please note that the ordering
2024  * guarantee is observed only with respect to @target and on the local
2025  * cpu.
2026  *
2027  * Currently, a queued barrier can't be canceled.  This is because
2028  * try_to_grab_pending() can't determine whether the work to be
2029  * grabbed is at the head of the queue and thus can't clear LINKED
2030  * flag of the previous work while there must be a valid next work
2031  * after a work with LINKED flag set.
2032  *
2033  * Note that when @worker is non-NULL, @target may be modified
2034  * underneath us, so we can't reliably determine cwq from @target.
2035  *
2036  * CONTEXT:
2037  * spin_lock_irq(gcwq->lock).
2038  */
2039 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2040 			      struct wq_barrier *barr,
2041 			      struct work_struct *target, struct worker *worker)
2042 {
2043 	struct list_head *head;
2044 	unsigned int linked = 0;
2045 
2046 	/*
2047 	 * debugobject calls are safe here even with gcwq->lock locked
2048 	 * as we know for sure that this will not trigger any of the
2049 	 * checks and call back into the fixup functions where we
2050 	 * might deadlock.
2051 	 */
2052 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
2053 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2054 	init_completion(&barr->done);
2055 
2056 	/*
2057 	 * If @target is currently being executed, schedule the
2058 	 * barrier to the worker; otherwise, put it after @target.
2059 	 */
2060 	if (worker)
2061 		head = worker->scheduled.next;
2062 	else {
2063 		unsigned long *bits = work_data_bits(target);
2064 
2065 		head = target->entry.next;
2066 		/* there can already be other linked works, inherit and set */
2067 		linked = *bits & WORK_STRUCT_LINKED;
2068 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2069 	}
2070 
2071 	debug_work_activate(&barr->work);
2072 	insert_work(cwq, &barr->work, head,
2073 		    work_color_to_flags(WORK_NO_COLOR) | linked);
2074 }
2075 
2076 /**
2077  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2078  * @wq: workqueue being flushed
2079  * @flush_color: new flush color, < 0 for no-op
2080  * @work_color: new work color, < 0 for no-op
2081  *
2082  * Prepare cwqs for workqueue flushing.
2083  *
2084  * If @flush_color is non-negative, flush_color on all cwqs should be
2085  * -1.  If no cwq has in-flight commands at the specified color, all
2086  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
2087  * has in flight commands, its cwq->flush_color is set to
2088  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2089  * wakeup logic is armed and %true is returned.
2090  *
2091  * The caller should have initialized @wq->first_flusher prior to
2092  * calling this function with non-negative @flush_color.  If
2093  * @flush_color is negative, no flush color update is done and %false
2094  * is returned.
2095  *
2096  * If @work_color is non-negative, all cwqs should have the same
2097  * work_color which is previous to @work_color and all will be
2098  * advanced to @work_color.
2099  *
2100  * CONTEXT:
2101  * mutex_lock(wq->flush_mutex).
2102  *
2103  * RETURNS:
2104  * %true if @flush_color >= 0 and there's something to flush.  %false
2105  * otherwise.
2106  */
2107 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2108 				      int flush_color, int work_color)
2109 {
2110 	bool wait = false;
2111 	unsigned int cpu;
2112 
2113 	if (flush_color >= 0) {
2114 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2115 		atomic_set(&wq->nr_cwqs_to_flush, 1);
2116 	}
2117 
2118 	for_each_cwq_cpu(cpu, wq) {
2119 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2120 		struct global_cwq *gcwq = cwq->gcwq;
2121 
2122 		spin_lock_irq(&gcwq->lock);
2123 
2124 		if (flush_color >= 0) {
2125 			BUG_ON(cwq->flush_color != -1);
2126 
2127 			if (cwq->nr_in_flight[flush_color]) {
2128 				cwq->flush_color = flush_color;
2129 				atomic_inc(&wq->nr_cwqs_to_flush);
2130 				wait = true;
2131 			}
2132 		}
2133 
2134 		if (work_color >= 0) {
2135 			BUG_ON(work_color != work_next_color(cwq->work_color));
2136 			cwq->work_color = work_color;
2137 		}
2138 
2139 		spin_unlock_irq(&gcwq->lock);
2140 	}
2141 
2142 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2143 		complete(&wq->first_flusher->done);
2144 
2145 	return wait;
2146 }
2147 
2148 /**
2149  * flush_workqueue - ensure that any scheduled work has run to completion.
2150  * @wq: workqueue to flush
2151  *
2152  * Forces execution of the workqueue and blocks until its completion.
2153  * This is typically used in driver shutdown handlers.
2154  *
2155  * We sleep until all works which were queued on entry have been handled,
2156  * but we are not livelocked by new incoming ones.
2157  */
2158 void flush_workqueue(struct workqueue_struct *wq)
2159 {
2160 	struct wq_flusher this_flusher = {
2161 		.list = LIST_HEAD_INIT(this_flusher.list),
2162 		.flush_color = -1,
2163 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2164 	};
2165 	int next_color;
2166 
2167 	lock_map_acquire(&wq->lockdep_map);
2168 	lock_map_release(&wq->lockdep_map);
2169 
2170 	mutex_lock(&wq->flush_mutex);
2171 
2172 	/*
2173 	 * Start-to-wait phase
2174 	 */
2175 	next_color = work_next_color(wq->work_color);
2176 
2177 	if (next_color != wq->flush_color) {
2178 		/*
2179 		 * Color space is not full.  The current work_color
2180 		 * becomes our flush_color and work_color is advanced
2181 		 * by one.
2182 		 */
2183 		BUG_ON(!list_empty(&wq->flusher_overflow));
2184 		this_flusher.flush_color = wq->work_color;
2185 		wq->work_color = next_color;
2186 
2187 		if (!wq->first_flusher) {
2188 			/* no flush in progress, become the first flusher */
2189 			BUG_ON(wq->flush_color != this_flusher.flush_color);
2190 
2191 			wq->first_flusher = &this_flusher;
2192 
2193 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2194 						       wq->work_color)) {
2195 				/* nothing to flush, done */
2196 				wq->flush_color = next_color;
2197 				wq->first_flusher = NULL;
2198 				goto out_unlock;
2199 			}
2200 		} else {
2201 			/* wait in queue */
2202 			BUG_ON(wq->flush_color == this_flusher.flush_color);
2203 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
2204 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2205 		}
2206 	} else {
2207 		/*
2208 		 * Oops, color space is full, wait on overflow queue.
2209 		 * The next flush completion will assign us
2210 		 * flush_color and transfer to flusher_queue.
2211 		 */
2212 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2213 	}
2214 
2215 	mutex_unlock(&wq->flush_mutex);
2216 
2217 	wait_for_completion(&this_flusher.done);
2218 
2219 	/*
2220 	 * Wake-up-and-cascade phase
2221 	 *
2222 	 * First flushers are responsible for cascading flushes and
2223 	 * handling overflow.  Non-first flushers can simply return.
2224 	 */
2225 	if (wq->first_flusher != &this_flusher)
2226 		return;
2227 
2228 	mutex_lock(&wq->flush_mutex);
2229 
2230 	/* we might have raced, check again with mutex held */
2231 	if (wq->first_flusher != &this_flusher)
2232 		goto out_unlock;
2233 
2234 	wq->first_flusher = NULL;
2235 
2236 	BUG_ON(!list_empty(&this_flusher.list));
2237 	BUG_ON(wq->flush_color != this_flusher.flush_color);
2238 
2239 	while (true) {
2240 		struct wq_flusher *next, *tmp;
2241 
2242 		/* complete all the flushers sharing the current flush color */
2243 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2244 			if (next->flush_color != wq->flush_color)
2245 				break;
2246 			list_del_init(&next->list);
2247 			complete(&next->done);
2248 		}
2249 
2250 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
2251 		       wq->flush_color != work_next_color(wq->work_color));
2252 
2253 		/* this flush_color is finished, advance by one */
2254 		wq->flush_color = work_next_color(wq->flush_color);
2255 
2256 		/* one color has been freed, handle overflow queue */
2257 		if (!list_empty(&wq->flusher_overflow)) {
2258 			/*
2259 			 * Assign the same color to all overflowed
2260 			 * flushers, advance work_color and append to
2261 			 * flusher_queue.  This is the start-to-wait
2262 			 * phase for these overflowed flushers.
2263 			 */
2264 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
2265 				tmp->flush_color = wq->work_color;
2266 
2267 			wq->work_color = work_next_color(wq->work_color);
2268 
2269 			list_splice_tail_init(&wq->flusher_overflow,
2270 					      &wq->flusher_queue);
2271 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2272 		}
2273 
2274 		if (list_empty(&wq->flusher_queue)) {
2275 			BUG_ON(wq->flush_color != wq->work_color);
2276 			break;
2277 		}
2278 
2279 		/*
2280 		 * Need to flush more colors.  Make the next flusher
2281 		 * the new first flusher and arm cwqs.
2282 		 */
2283 		BUG_ON(wq->flush_color == wq->work_color);
2284 		BUG_ON(wq->flush_color != next->flush_color);
2285 
2286 		list_del_init(&next->list);
2287 		wq->first_flusher = next;
2288 
2289 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2290 			break;
2291 
2292 		/*
2293 		 * Meh... this color is already done, clear first
2294 		 * flusher and repeat cascading.
2295 		 */
2296 		wq->first_flusher = NULL;
2297 	}
2298 
2299 out_unlock:
2300 	mutex_unlock(&wq->flush_mutex);
2301 }
2302 EXPORT_SYMBOL_GPL(flush_workqueue);
2303 
2304 /**
2305  * flush_work - block until a work_struct's callback has terminated
2306  * @work: the work which is to be flushed
2307  *
2308  * Returns false if @work has already terminated.
2309  *
2310  * It is expected that, prior to calling flush_work(), the caller has
2311  * arranged for the work to not be requeued, otherwise it doesn't make
2312  * sense to use this function.
2313  */
2314 int flush_work(struct work_struct *work)
2315 {
2316 	struct worker *worker = NULL;
2317 	struct global_cwq *gcwq;
2318 	struct cpu_workqueue_struct *cwq;
2319 	struct wq_barrier barr;
2320 
2321 	might_sleep();
2322 	gcwq = get_work_gcwq(work);
2323 	if (!gcwq)
2324 		return 0;
2325 
2326 	spin_lock_irq(&gcwq->lock);
2327 	if (!list_empty(&work->entry)) {
2328 		/*
2329 		 * See the comment near try_to_grab_pending()->smp_rmb().
2330 		 * If it was re-queued to a different gcwq under us, we
2331 		 * are not going to wait.
2332 		 */
2333 		smp_rmb();
2334 		cwq = get_work_cwq(work);
2335 		if (unlikely(!cwq || gcwq != cwq->gcwq))
2336 			goto already_gone;
2337 	} else {
2338 		worker = find_worker_executing_work(gcwq, work);
2339 		if (!worker)
2340 			goto already_gone;
2341 		cwq = worker->current_cwq;
2342 	}
2343 
2344 	insert_wq_barrier(cwq, &barr, work, worker);
2345 	spin_unlock_irq(&gcwq->lock);
2346 
2347 	lock_map_acquire(&cwq->wq->lockdep_map);
2348 	lock_map_release(&cwq->wq->lockdep_map);
2349 
2350 	wait_for_completion(&barr.done);
2351 	destroy_work_on_stack(&barr.work);
2352 	return 1;
2353 already_gone:
2354 	spin_unlock_irq(&gcwq->lock);
2355 	return 0;
2356 }
2357 EXPORT_SYMBOL_GPL(flush_work);
2358 
2359 /*
2360  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2361  * so this work can't be re-armed in any way.
2362  */
2363 static int try_to_grab_pending(struct work_struct *work)
2364 {
2365 	struct global_cwq *gcwq;
2366 	int ret = -1;
2367 
2368 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2369 		return 0;
2370 
2371 	/*
2372 	 * The queueing is in progress, or it is already queued. Try to
2373 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2374 	 */
2375 	gcwq = get_work_gcwq(work);
2376 	if (!gcwq)
2377 		return ret;
2378 
2379 	spin_lock_irq(&gcwq->lock);
2380 	if (!list_empty(&work->entry)) {
2381 		/*
2382 		 * This work is queued, but perhaps we locked the wrong gcwq.
2383 		 * In that case we must see the new value after rmb(), see
2384 		 * insert_work()->wmb().
2385 		 */
2386 		smp_rmb();
2387 		if (gcwq == get_work_gcwq(work)) {
2388 			debug_work_deactivate(work);
2389 			list_del_init(&work->entry);
2390 			cwq_dec_nr_in_flight(get_work_cwq(work),
2391 					     get_work_color(work));
2392 			ret = 1;
2393 		}
2394 	}
2395 	spin_unlock_irq(&gcwq->lock);
2396 
2397 	return ret;
2398 }
2399 
2400 static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2401 {
2402 	struct wq_barrier barr;
2403 	struct worker *worker;
2404 
2405 	spin_lock_irq(&gcwq->lock);
2406 
2407 	worker = find_worker_executing_work(gcwq, work);
2408 	if (unlikely(worker))
2409 		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2410 
2411 	spin_unlock_irq(&gcwq->lock);
2412 
2413 	if (unlikely(worker)) {
2414 		wait_for_completion(&barr.done);
2415 		destroy_work_on_stack(&barr.work);
2416 	}
2417 }
2418 
2419 static void wait_on_work(struct work_struct *work)
2420 {
2421 	int cpu;
2422 
2423 	might_sleep();
2424 
2425 	lock_map_acquire(&work->lockdep_map);
2426 	lock_map_release(&work->lockdep_map);
2427 
2428 	for_each_gcwq_cpu(cpu)
2429 		wait_on_cpu_work(get_gcwq(cpu), work);
2430 }
2431 
2432 static int __cancel_work_timer(struct work_struct *work,
2433 				struct timer_list* timer)
2434 {
2435 	int ret;
2436 
2437 	do {
2438 		ret = (timer && likely(del_timer(timer)));
2439 		if (!ret)
2440 			ret = try_to_grab_pending(work);
2441 		wait_on_work(work);
2442 	} while (unlikely(ret < 0));
2443 
2444 	clear_work_data(work);
2445 	return ret;
2446 }
2447 
2448 /**
2449  * cancel_work_sync - block until a work_struct's callback has terminated
2450  * @work: the work which is to be flushed
2451  *
2452  * Returns true if @work was pending.
2453  *
2454  * cancel_work_sync() will cancel the work if it is queued. If the work's
2455  * callback appears to be running, cancel_work_sync() will block until it
2456  * has completed.
2457  *
2458  * It is possible to use this function if the work re-queues itself. It can
2459  * cancel the work even if it migrates to another workqueue, however in that
2460  * case it only guarantees that work->func() has completed on the last queued
2461  * workqueue.
2462  *
2463  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
2464  * pending, otherwise it goes into a busy-wait loop until the timer expires.
2465  *
2466  * The caller must ensure that workqueue_struct on which this work was last
2467  * queued can't be destroyed before this function returns.
2468  */
2469 int cancel_work_sync(struct work_struct *work)
2470 {
2471 	return __cancel_work_timer(work, NULL);
2472 }
2473 EXPORT_SYMBOL_GPL(cancel_work_sync);
2474 
2475 /**
2476  * cancel_delayed_work_sync - reliably kill off a delayed work.
2477  * @dwork: the delayed work struct
2478  *
2479  * Returns true if @dwork was pending.
2480  *
2481  * It is possible to use this function if @dwork rearms itself via queue_work()
2482  * or queue_delayed_work(). See also the comment for cancel_work_sync().
2483  */
2484 int cancel_delayed_work_sync(struct delayed_work *dwork)
2485 {
2486 	return __cancel_work_timer(&dwork->work, &dwork->timer);
2487 }
2488 EXPORT_SYMBOL(cancel_delayed_work_sync);
2489 
2490 /**
2491  * schedule_work - put work task in global workqueue
2492  * @work: job to be done
2493  *
2494  * Returns zero if @work was already on the kernel-global workqueue and
2495  * non-zero otherwise.
2496  *
2497  * This puts a job in the kernel-global workqueue if it was not already
2498  * queued and leaves it in the same position on the kernel-global
2499  * workqueue otherwise.
2500  */
2501 int schedule_work(struct work_struct *work)
2502 {
2503 	return queue_work(system_wq, work);
2504 }
2505 EXPORT_SYMBOL(schedule_work);
2506 
2507 /*
2508  * schedule_work_on - put work task on a specific cpu
2509  * @cpu: cpu to put the work task on
2510  * @work: job to be done
2511  *
2512  * This puts a job on a specific cpu
2513  */
2514 int schedule_work_on(int cpu, struct work_struct *work)
2515 {
2516 	return queue_work_on(cpu, system_wq, work);
2517 }
2518 EXPORT_SYMBOL(schedule_work_on);
2519 
2520 /**
2521  * schedule_delayed_work - put work task in global workqueue after delay
2522  * @dwork: job to be done
2523  * @delay: number of jiffies to wait or 0 for immediate execution
2524  *
2525  * After waiting for a given time this puts a job in the kernel-global
2526  * workqueue.
2527  */
2528 int schedule_delayed_work(struct delayed_work *dwork,
2529 					unsigned long delay)
2530 {
2531 	return queue_delayed_work(system_wq, dwork, delay);
2532 }
2533 EXPORT_SYMBOL(schedule_delayed_work);
2534 
2535 /**
2536  * flush_delayed_work - block until a dwork_struct's callback has terminated
2537  * @dwork: the delayed work which is to be flushed
2538  *
2539  * Any timeout is cancelled, and any pending work is run immediately.
2540  */
2541 void flush_delayed_work(struct delayed_work *dwork)
2542 {
2543 	if (del_timer_sync(&dwork->timer)) {
2544 		__queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
2545 			     &dwork->work);
2546 		put_cpu();
2547 	}
2548 	flush_work(&dwork->work);
2549 }
2550 EXPORT_SYMBOL(flush_delayed_work);
2551 
2552 /**
2553  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2554  * @cpu: cpu to use
2555  * @dwork: job to be done
2556  * @delay: number of jiffies to wait
2557  *
2558  * After waiting for a given time this puts a job in the kernel-global
2559  * workqueue on the specified CPU.
2560  */
2561 int schedule_delayed_work_on(int cpu,
2562 			struct delayed_work *dwork, unsigned long delay)
2563 {
2564 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2565 }
2566 EXPORT_SYMBOL(schedule_delayed_work_on);
2567 
2568 /**
2569  * schedule_on_each_cpu - call a function on each online CPU from keventd
2570  * @func: the function to call
2571  *
2572  * Returns zero on success.
2573  * Returns -ve errno on failure.
2574  *
2575  * schedule_on_each_cpu() is very slow.
2576  */
2577 int schedule_on_each_cpu(work_func_t func)
2578 {
2579 	int cpu;
2580 	struct work_struct __percpu *works;
2581 
2582 	works = alloc_percpu(struct work_struct);
2583 	if (!works)
2584 		return -ENOMEM;
2585 
2586 	get_online_cpus();
2587 
2588 	for_each_online_cpu(cpu) {
2589 		struct work_struct *work = per_cpu_ptr(works, cpu);
2590 
2591 		INIT_WORK(work, func);
2592 		schedule_work_on(cpu, work);
2593 	}
2594 
2595 	for_each_online_cpu(cpu)
2596 		flush_work(per_cpu_ptr(works, cpu));
2597 
2598 	put_online_cpus();
2599 	free_percpu(works);
2600 	return 0;
2601 }
2602 
2603 /**
2604  * flush_scheduled_work - ensure that any scheduled work has run to completion.
2605  *
2606  * Forces execution of the kernel-global workqueue and blocks until its
2607  * completion.
2608  *
2609  * Think twice before calling this function!  It's very easy to get into
2610  * trouble if you don't take great care.  Either of the following situations
2611  * will lead to deadlock:
2612  *
2613  *	One of the work items currently on the workqueue needs to acquire
2614  *	a lock held by your code or its caller.
2615  *
2616  *	Your code is running in the context of a work routine.
2617  *
2618  * They will be detected by lockdep when they occur, but the first might not
2619  * occur very often.  It depends on what work items are on the workqueue and
2620  * what locks they need, which you have no control over.
2621  *
2622  * In most situations flushing the entire workqueue is overkill; you merely
2623  * need to know that a particular work item isn't queued and isn't running.
2624  * In such cases you should use cancel_delayed_work_sync() or
2625  * cancel_work_sync() instead.
2626  */
2627 void flush_scheduled_work(void)
2628 {
2629 	flush_workqueue(system_wq);
2630 }
2631 EXPORT_SYMBOL(flush_scheduled_work);
2632 
2633 /**
2634  * execute_in_process_context - reliably execute the routine with user context
2635  * @fn:		the function to execute
2636  * @ew:		guaranteed storage for the execute work structure (must
2637  *		be available when the work executes)
2638  *
2639  * Executes the function immediately if process context is available,
2640  * otherwise schedules the function for delayed execution.
2641  *
2642  * Returns:	0 - function was executed
2643  *		1 - function was scheduled for execution
2644  */
2645 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2646 {
2647 	if (!in_interrupt()) {
2648 		fn(&ew->work);
2649 		return 0;
2650 	}
2651 
2652 	INIT_WORK(&ew->work, fn);
2653 	schedule_work(&ew->work);
2654 
2655 	return 1;
2656 }
2657 EXPORT_SYMBOL_GPL(execute_in_process_context);
2658 
2659 int keventd_up(void)
2660 {
2661 	return system_wq != NULL;
2662 }
2663 
2664 static int alloc_cwqs(struct workqueue_struct *wq)
2665 {
2666 	/*
2667 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2668 	 * Make sure that the alignment isn't lower than that of
2669 	 * unsigned long long.
2670 	 */
2671 	const size_t size = sizeof(struct cpu_workqueue_struct);
2672 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2673 				   __alignof__(unsigned long long));
2674 #ifdef CONFIG_SMP
2675 	bool percpu = !(wq->flags & WQ_UNBOUND);
2676 #else
2677 	bool percpu = false;
2678 #endif
2679 
2680 	if (percpu)
2681 		wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2682 	else {
2683 		void *ptr;
2684 
2685 		/*
2686 		 * Allocate enough room to align cwq and put an extra
2687 		 * pointer at the end pointing back to the originally
2688 		 * allocated pointer which will be used for free.
2689 		 */
2690 		ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2691 		if (ptr) {
2692 			wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2693 			*(void **)(wq->cpu_wq.single + 1) = ptr;
2694 		}
2695 	}
2696 
2697 	/* just in case, make sure it's actually aligned */
2698 	BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2699 	return wq->cpu_wq.v ? 0 : -ENOMEM;
2700 }
2701 
2702 static void free_cwqs(struct workqueue_struct *wq)
2703 {
2704 #ifdef CONFIG_SMP
2705 	bool percpu = !(wq->flags & WQ_UNBOUND);
2706 #else
2707 	bool percpu = false;
2708 #endif
2709 
2710 	if (percpu)
2711 		free_percpu(wq->cpu_wq.pcpu);
2712 	else if (wq->cpu_wq.single) {
2713 		/* the pointer to free is stored right after the cwq */
2714 		kfree(*(void **)(wq->cpu_wq.single + 1));
2715 	}
2716 }
2717 
2718 static int wq_clamp_max_active(int max_active, unsigned int flags,
2719 			       const char *name)
2720 {
2721 	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2722 
2723 	if (max_active < 1 || max_active > lim)
2724 		printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2725 		       "is out of range, clamping between %d and %d\n",
2726 		       max_active, name, 1, lim);
2727 
2728 	return clamp_val(max_active, 1, lim);
2729 }
2730 
2731 struct workqueue_struct *__alloc_workqueue_key(const char *name,
2732 					       unsigned int flags,
2733 					       int max_active,
2734 					       struct lock_class_key *key,
2735 					       const char *lock_name)
2736 {
2737 	struct workqueue_struct *wq;
2738 	unsigned int cpu;
2739 
2740 	/*
2741 	 * Unbound workqueues aren't concurrency managed and should be
2742 	 * dispatched to workers immediately.
2743 	 */
2744 	if (flags & WQ_UNBOUND)
2745 		flags |= WQ_HIGHPRI;
2746 
2747 	max_active = max_active ?: WQ_DFL_ACTIVE;
2748 	max_active = wq_clamp_max_active(max_active, flags, name);
2749 
2750 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
2751 	if (!wq)
2752 		goto err;
2753 
2754 	wq->flags = flags;
2755 	wq->saved_max_active = max_active;
2756 	mutex_init(&wq->flush_mutex);
2757 	atomic_set(&wq->nr_cwqs_to_flush, 0);
2758 	INIT_LIST_HEAD(&wq->flusher_queue);
2759 	INIT_LIST_HEAD(&wq->flusher_overflow);
2760 
2761 	wq->name = name;
2762 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2763 	INIT_LIST_HEAD(&wq->list);
2764 
2765 	if (alloc_cwqs(wq) < 0)
2766 		goto err;
2767 
2768 	for_each_cwq_cpu(cpu, wq) {
2769 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2770 		struct global_cwq *gcwq = get_gcwq(cpu);
2771 
2772 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
2773 		cwq->gcwq = gcwq;
2774 		cwq->wq = wq;
2775 		cwq->flush_color = -1;
2776 		cwq->max_active = max_active;
2777 		INIT_LIST_HEAD(&cwq->delayed_works);
2778 	}
2779 
2780 	if (flags & WQ_RESCUER) {
2781 		struct worker *rescuer;
2782 
2783 		if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
2784 			goto err;
2785 
2786 		wq->rescuer = rescuer = alloc_worker();
2787 		if (!rescuer)
2788 			goto err;
2789 
2790 		rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2791 		if (IS_ERR(rescuer->task))
2792 			goto err;
2793 
2794 		wq->rescuer = rescuer;
2795 		rescuer->task->flags |= PF_THREAD_BOUND;
2796 		wake_up_process(rescuer->task);
2797 	}
2798 
2799 	/*
2800 	 * workqueue_lock protects global freeze state and workqueues
2801 	 * list.  Grab it, set max_active accordingly and add the new
2802 	 * workqueue to workqueues list.
2803 	 */
2804 	spin_lock(&workqueue_lock);
2805 
2806 	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
2807 		for_each_cwq_cpu(cpu, wq)
2808 			get_cwq(cpu, wq)->max_active = 0;
2809 
2810 	list_add(&wq->list, &workqueues);
2811 
2812 	spin_unlock(&workqueue_lock);
2813 
2814 	return wq;
2815 err:
2816 	if (wq) {
2817 		free_cwqs(wq);
2818 		free_mayday_mask(wq->mayday_mask);
2819 		kfree(wq->rescuer);
2820 		kfree(wq);
2821 	}
2822 	return NULL;
2823 }
2824 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
2825 
2826 /**
2827  * destroy_workqueue - safely terminate a workqueue
2828  * @wq: target workqueue
2829  *
2830  * Safely destroy a workqueue. All work currently pending will be done first.
2831  */
2832 void destroy_workqueue(struct workqueue_struct *wq)
2833 {
2834 	unsigned int cpu;
2835 
2836 	flush_workqueue(wq);
2837 
2838 	/*
2839 	 * wq list is used to freeze wq, remove from list after
2840 	 * flushing is complete in case freeze races us.
2841 	 */
2842 	spin_lock(&workqueue_lock);
2843 	list_del(&wq->list);
2844 	spin_unlock(&workqueue_lock);
2845 
2846 	/* sanity check */
2847 	for_each_cwq_cpu(cpu, wq) {
2848 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2849 		int i;
2850 
2851 		for (i = 0; i < WORK_NR_COLORS; i++)
2852 			BUG_ON(cwq->nr_in_flight[i]);
2853 		BUG_ON(cwq->nr_active);
2854 		BUG_ON(!list_empty(&cwq->delayed_works));
2855 	}
2856 
2857 	if (wq->flags & WQ_RESCUER) {
2858 		kthread_stop(wq->rescuer->task);
2859 		free_mayday_mask(wq->mayday_mask);
2860 	}
2861 
2862 	free_cwqs(wq);
2863 	kfree(wq);
2864 }
2865 EXPORT_SYMBOL_GPL(destroy_workqueue);
2866 
2867 /**
2868  * workqueue_set_max_active - adjust max_active of a workqueue
2869  * @wq: target workqueue
2870  * @max_active: new max_active value.
2871  *
2872  * Set max_active of @wq to @max_active.
2873  *
2874  * CONTEXT:
2875  * Don't call from IRQ context.
2876  */
2877 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2878 {
2879 	unsigned int cpu;
2880 
2881 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
2882 
2883 	spin_lock(&workqueue_lock);
2884 
2885 	wq->saved_max_active = max_active;
2886 
2887 	for_each_cwq_cpu(cpu, wq) {
2888 		struct global_cwq *gcwq = get_gcwq(cpu);
2889 
2890 		spin_lock_irq(&gcwq->lock);
2891 
2892 		if (!(wq->flags & WQ_FREEZEABLE) ||
2893 		    !(gcwq->flags & GCWQ_FREEZING))
2894 			get_cwq(gcwq->cpu, wq)->max_active = max_active;
2895 
2896 		spin_unlock_irq(&gcwq->lock);
2897 	}
2898 
2899 	spin_unlock(&workqueue_lock);
2900 }
2901 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
2902 
2903 /**
2904  * workqueue_congested - test whether a workqueue is congested
2905  * @cpu: CPU in question
2906  * @wq: target workqueue
2907  *
2908  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
2909  * no synchronization around this function and the test result is
2910  * unreliable and only useful as advisory hints or for debugging.
2911  *
2912  * RETURNS:
2913  * %true if congested, %false otherwise.
2914  */
2915 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
2916 {
2917 	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2918 
2919 	return !list_empty(&cwq->delayed_works);
2920 }
2921 EXPORT_SYMBOL_GPL(workqueue_congested);
2922 
2923 /**
2924  * work_cpu - return the last known associated cpu for @work
2925  * @work: the work of interest
2926  *
2927  * RETURNS:
2928  * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
2929  */
2930 unsigned int work_cpu(struct work_struct *work)
2931 {
2932 	struct global_cwq *gcwq = get_work_gcwq(work);
2933 
2934 	return gcwq ? gcwq->cpu : WORK_CPU_NONE;
2935 }
2936 EXPORT_SYMBOL_GPL(work_cpu);
2937 
2938 /**
2939  * work_busy - test whether a work is currently pending or running
2940  * @work: the work to be tested
2941  *
2942  * Test whether @work is currently pending or running.  There is no
2943  * synchronization around this function and the test result is
2944  * unreliable and only useful as advisory hints or for debugging.
2945  * Especially for reentrant wqs, the pending state might hide the
2946  * running state.
2947  *
2948  * RETURNS:
2949  * OR'd bitmask of WORK_BUSY_* bits.
2950  */
2951 unsigned int work_busy(struct work_struct *work)
2952 {
2953 	struct global_cwq *gcwq = get_work_gcwq(work);
2954 	unsigned long flags;
2955 	unsigned int ret = 0;
2956 
2957 	if (!gcwq)
2958 		return false;
2959 
2960 	spin_lock_irqsave(&gcwq->lock, flags);
2961 
2962 	if (work_pending(work))
2963 		ret |= WORK_BUSY_PENDING;
2964 	if (find_worker_executing_work(gcwq, work))
2965 		ret |= WORK_BUSY_RUNNING;
2966 
2967 	spin_unlock_irqrestore(&gcwq->lock, flags);
2968 
2969 	return ret;
2970 }
2971 EXPORT_SYMBOL_GPL(work_busy);
2972 
2973 /*
2974  * CPU hotplug.
2975  *
2976  * There are two challenges in supporting CPU hotplug.  Firstly, there
2977  * are a lot of assumptions on strong associations among work, cwq and
2978  * gcwq which make migrating pending and scheduled works very
2979  * difficult to implement without impacting hot paths.  Secondly,
2980  * gcwqs serve mix of short, long and very long running works making
2981  * blocked draining impractical.
2982  *
2983  * This is solved by allowing a gcwq to be detached from CPU, running
2984  * it with unbound (rogue) workers and allowing it to be reattached
2985  * later if the cpu comes back online.  A separate thread is created
2986  * to govern a gcwq in such state and is called the trustee of the
2987  * gcwq.
2988  *
2989  * Trustee states and their descriptions.
2990  *
2991  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
2992  *		new trustee is started with this state.
2993  *
2994  * IN_CHARGE	Once started, trustee will enter this state after
2995  *		assuming the manager role and making all existing
2996  *		workers rogue.  DOWN_PREPARE waits for trustee to
2997  *		enter this state.  After reaching IN_CHARGE, trustee
2998  *		tries to execute the pending worklist until it's empty
2999  *		and the state is set to BUTCHER, or the state is set
3000  *		to RELEASE.
3001  *
3002  * BUTCHER	Command state which is set by the cpu callback after
3003  *		the cpu has went down.  Once this state is set trustee
3004  *		knows that there will be no new works on the worklist
3005  *		and once the worklist is empty it can proceed to
3006  *		killing idle workers.
3007  *
3008  * RELEASE	Command state which is set by the cpu callback if the
3009  *		cpu down has been canceled or it has come online
3010  *		again.  After recognizing this state, trustee stops
3011  *		trying to drain or butcher and clears ROGUE, rebinds
3012  *		all remaining workers back to the cpu and releases
3013  *		manager role.
3014  *
3015  * DONE		Trustee will enter this state after BUTCHER or RELEASE
3016  *		is complete.
3017  *
3018  *          trustee                 CPU                draining
3019  *         took over                down               complete
3020  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3021  *                        |                     |                  ^
3022  *                        | CPU is back online  v   return workers |
3023  *                         ----------------> RELEASE --------------
3024  */
3025 
3026 /**
3027  * trustee_wait_event_timeout - timed event wait for trustee
3028  * @cond: condition to wait for
3029  * @timeout: timeout in jiffies
3030  *
3031  * wait_event_timeout() for trustee to use.  Handles locking and
3032  * checks for RELEASE request.
3033  *
3034  * CONTEXT:
3035  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3036  * multiple times.  To be used by trustee.
3037  *
3038  * RETURNS:
3039  * Positive indicating left time if @cond is satisfied, 0 if timed
3040  * out, -1 if canceled.
3041  */
3042 #define trustee_wait_event_timeout(cond, timeout) ({			\
3043 	long __ret = (timeout);						\
3044 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
3045 	       __ret) {							\
3046 		spin_unlock_irq(&gcwq->lock);				\
3047 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
3048 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
3049 			__ret);						\
3050 		spin_lock_irq(&gcwq->lock);				\
3051 	}								\
3052 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
3053 })
3054 
3055 /**
3056  * trustee_wait_event - event wait for trustee
3057  * @cond: condition to wait for
3058  *
3059  * wait_event() for trustee to use.  Automatically handles locking and
3060  * checks for CANCEL request.
3061  *
3062  * CONTEXT:
3063  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3064  * multiple times.  To be used by trustee.
3065  *
3066  * RETURNS:
3067  * 0 if @cond is satisfied, -1 if canceled.
3068  */
3069 #define trustee_wait_event(cond) ({					\
3070 	long __ret1;							\
3071 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3072 	__ret1 < 0 ? -1 : 0;						\
3073 })
3074 
3075 static int __cpuinit trustee_thread(void *__gcwq)
3076 {
3077 	struct global_cwq *gcwq = __gcwq;
3078 	struct worker *worker;
3079 	struct work_struct *work;
3080 	struct hlist_node *pos;
3081 	long rc;
3082 	int i;
3083 
3084 	BUG_ON(gcwq->cpu != smp_processor_id());
3085 
3086 	spin_lock_irq(&gcwq->lock);
3087 	/*
3088 	 * Claim the manager position and make all workers rogue.
3089 	 * Trustee must be bound to the target cpu and can't be
3090 	 * cancelled.
3091 	 */
3092 	BUG_ON(gcwq->cpu != smp_processor_id());
3093 	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3094 	BUG_ON(rc < 0);
3095 
3096 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
3097 
3098 	list_for_each_entry(worker, &gcwq->idle_list, entry)
3099 		worker->flags |= WORKER_ROGUE;
3100 
3101 	for_each_busy_worker(worker, i, pos, gcwq)
3102 		worker->flags |= WORKER_ROGUE;
3103 
3104 	/*
3105 	 * Call schedule() so that we cross rq->lock and thus can
3106 	 * guarantee sched callbacks see the rogue flag.  This is
3107 	 * necessary as scheduler callbacks may be invoked from other
3108 	 * cpus.
3109 	 */
3110 	spin_unlock_irq(&gcwq->lock);
3111 	schedule();
3112 	spin_lock_irq(&gcwq->lock);
3113 
3114 	/*
3115 	 * Sched callbacks are disabled now.  Zap nr_running.  After
3116 	 * this, nr_running stays zero and need_more_worker() and
3117 	 * keep_working() are always true as long as the worklist is
3118 	 * not empty.
3119 	 */
3120 	atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3121 
3122 	spin_unlock_irq(&gcwq->lock);
3123 	del_timer_sync(&gcwq->idle_timer);
3124 	spin_lock_irq(&gcwq->lock);
3125 
3126 	/*
3127 	 * We're now in charge.  Notify and proceed to drain.  We need
3128 	 * to keep the gcwq running during the whole CPU down
3129 	 * procedure as other cpu hotunplug callbacks may need to
3130 	 * flush currently running tasks.
3131 	 */
3132 	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3133 	wake_up_all(&gcwq->trustee_wait);
3134 
3135 	/*
3136 	 * The original cpu is in the process of dying and may go away
3137 	 * anytime now.  When that happens, we and all workers would
3138 	 * be migrated to other cpus.  Try draining any left work.  We
3139 	 * want to get it over with ASAP - spam rescuers, wake up as
3140 	 * many idlers as necessary and create new ones till the
3141 	 * worklist is empty.  Note that if the gcwq is frozen, there
3142 	 * may be frozen works in freezeable cwqs.  Don't declare
3143 	 * completion while frozen.
3144 	 */
3145 	while (gcwq->nr_workers != gcwq->nr_idle ||
3146 	       gcwq->flags & GCWQ_FREEZING ||
3147 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3148 		int nr_works = 0;
3149 
3150 		list_for_each_entry(work, &gcwq->worklist, entry) {
3151 			send_mayday(work);
3152 			nr_works++;
3153 		}
3154 
3155 		list_for_each_entry(worker, &gcwq->idle_list, entry) {
3156 			if (!nr_works--)
3157 				break;
3158 			wake_up_process(worker->task);
3159 		}
3160 
3161 		if (need_to_create_worker(gcwq)) {
3162 			spin_unlock_irq(&gcwq->lock);
3163 			worker = create_worker(gcwq, false);
3164 			spin_lock_irq(&gcwq->lock);
3165 			if (worker) {
3166 				worker->flags |= WORKER_ROGUE;
3167 				start_worker(worker);
3168 			}
3169 		}
3170 
3171 		/* give a breather */
3172 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3173 			break;
3174 	}
3175 
3176 	/*
3177 	 * Either all works have been scheduled and cpu is down, or
3178 	 * cpu down has already been canceled.  Wait for and butcher
3179 	 * all workers till we're canceled.
3180 	 */
3181 	do {
3182 		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3183 		while (!list_empty(&gcwq->idle_list))
3184 			destroy_worker(list_first_entry(&gcwq->idle_list,
3185 							struct worker, entry));
3186 	} while (gcwq->nr_workers && rc >= 0);
3187 
3188 	/*
3189 	 * At this point, either draining has completed and no worker
3190 	 * is left, or cpu down has been canceled or the cpu is being
3191 	 * brought back up.  There shouldn't be any idle one left.
3192 	 * Tell the remaining busy ones to rebind once it finishes the
3193 	 * currently scheduled works by scheduling the rebind_work.
3194 	 */
3195 	WARN_ON(!list_empty(&gcwq->idle_list));
3196 
3197 	for_each_busy_worker(worker, i, pos, gcwq) {
3198 		struct work_struct *rebind_work = &worker->rebind_work;
3199 
3200 		/*
3201 		 * Rebind_work may race with future cpu hotplug
3202 		 * operations.  Use a separate flag to mark that
3203 		 * rebinding is scheduled.
3204 		 */
3205 		worker->flags |= WORKER_REBIND;
3206 		worker->flags &= ~WORKER_ROGUE;
3207 
3208 		/* queue rebind_work, wq doesn't matter, use the default one */
3209 		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3210 				     work_data_bits(rebind_work)))
3211 			continue;
3212 
3213 		debug_work_activate(rebind_work);
3214 		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3215 			    worker->scheduled.next,
3216 			    work_color_to_flags(WORK_NO_COLOR));
3217 	}
3218 
3219 	/* relinquish manager role */
3220 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3221 
3222 	/* notify completion */
3223 	gcwq->trustee = NULL;
3224 	gcwq->trustee_state = TRUSTEE_DONE;
3225 	wake_up_all(&gcwq->trustee_wait);
3226 	spin_unlock_irq(&gcwq->lock);
3227 	return 0;
3228 }
3229 
3230 /**
3231  * wait_trustee_state - wait for trustee to enter the specified state
3232  * @gcwq: gcwq the trustee of interest belongs to
3233  * @state: target state to wait for
3234  *
3235  * Wait for the trustee to reach @state.  DONE is already matched.
3236  *
3237  * CONTEXT:
3238  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3239  * multiple times.  To be used by cpu_callback.
3240  */
3241 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3242 {
3243 	if (!(gcwq->trustee_state == state ||
3244 	      gcwq->trustee_state == TRUSTEE_DONE)) {
3245 		spin_unlock_irq(&gcwq->lock);
3246 		__wait_event(gcwq->trustee_wait,
3247 			     gcwq->trustee_state == state ||
3248 			     gcwq->trustee_state == TRUSTEE_DONE);
3249 		spin_lock_irq(&gcwq->lock);
3250 	}
3251 }
3252 
3253 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3254 						unsigned long action,
3255 						void *hcpu)
3256 {
3257 	unsigned int cpu = (unsigned long)hcpu;
3258 	struct global_cwq *gcwq = get_gcwq(cpu);
3259 	struct task_struct *new_trustee = NULL;
3260 	struct worker *uninitialized_var(new_worker);
3261 	unsigned long flags;
3262 
3263 	action &= ~CPU_TASKS_FROZEN;
3264 
3265 	switch (action) {
3266 	case CPU_DOWN_PREPARE:
3267 		new_trustee = kthread_create(trustee_thread, gcwq,
3268 					     "workqueue_trustee/%d\n", cpu);
3269 		if (IS_ERR(new_trustee))
3270 			return notifier_from_errno(PTR_ERR(new_trustee));
3271 		kthread_bind(new_trustee, cpu);
3272 		/* fall through */
3273 	case CPU_UP_PREPARE:
3274 		BUG_ON(gcwq->first_idle);
3275 		new_worker = create_worker(gcwq, false);
3276 		if (!new_worker) {
3277 			if (new_trustee)
3278 				kthread_stop(new_trustee);
3279 			return NOTIFY_BAD;
3280 		}
3281 	}
3282 
3283 	/* some are called w/ irq disabled, don't disturb irq status */
3284 	spin_lock_irqsave(&gcwq->lock, flags);
3285 
3286 	switch (action) {
3287 	case CPU_DOWN_PREPARE:
3288 		/* initialize trustee and tell it to acquire the gcwq */
3289 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3290 		gcwq->trustee = new_trustee;
3291 		gcwq->trustee_state = TRUSTEE_START;
3292 		wake_up_process(gcwq->trustee);
3293 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3294 		/* fall through */
3295 	case CPU_UP_PREPARE:
3296 		BUG_ON(gcwq->first_idle);
3297 		gcwq->first_idle = new_worker;
3298 		break;
3299 
3300 	case CPU_DYING:
3301 		/*
3302 		 * Before this, the trustee and all workers except for
3303 		 * the ones which are still executing works from
3304 		 * before the last CPU down must be on the cpu.  After
3305 		 * this, they'll all be diasporas.
3306 		 */
3307 		gcwq->flags |= GCWQ_DISASSOCIATED;
3308 		break;
3309 
3310 	case CPU_POST_DEAD:
3311 		gcwq->trustee_state = TRUSTEE_BUTCHER;
3312 		/* fall through */
3313 	case CPU_UP_CANCELED:
3314 		destroy_worker(gcwq->first_idle);
3315 		gcwq->first_idle = NULL;
3316 		break;
3317 
3318 	case CPU_DOWN_FAILED:
3319 	case CPU_ONLINE:
3320 		gcwq->flags &= ~GCWQ_DISASSOCIATED;
3321 		if (gcwq->trustee_state != TRUSTEE_DONE) {
3322 			gcwq->trustee_state = TRUSTEE_RELEASE;
3323 			wake_up_process(gcwq->trustee);
3324 			wait_trustee_state(gcwq, TRUSTEE_DONE);
3325 		}
3326 
3327 		/*
3328 		 * Trustee is done and there might be no worker left.
3329 		 * Put the first_idle in and request a real manager to
3330 		 * take a look.
3331 		 */
3332 		spin_unlock_irq(&gcwq->lock);
3333 		kthread_bind(gcwq->first_idle->task, cpu);
3334 		spin_lock_irq(&gcwq->lock);
3335 		gcwq->flags |= GCWQ_MANAGE_WORKERS;
3336 		start_worker(gcwq->first_idle);
3337 		gcwq->first_idle = NULL;
3338 		break;
3339 	}
3340 
3341 	spin_unlock_irqrestore(&gcwq->lock, flags);
3342 
3343 	return notifier_from_errno(0);
3344 }
3345 
3346 #ifdef CONFIG_SMP
3347 
3348 struct work_for_cpu {
3349 	struct completion completion;
3350 	long (*fn)(void *);
3351 	void *arg;
3352 	long ret;
3353 };
3354 
3355 static int do_work_for_cpu(void *_wfc)
3356 {
3357 	struct work_for_cpu *wfc = _wfc;
3358 	wfc->ret = wfc->fn(wfc->arg);
3359 	complete(&wfc->completion);
3360 	return 0;
3361 }
3362 
3363 /**
3364  * work_on_cpu - run a function in user context on a particular cpu
3365  * @cpu: the cpu to run on
3366  * @fn: the function to run
3367  * @arg: the function arg
3368  *
3369  * This will return the value @fn returns.
3370  * It is up to the caller to ensure that the cpu doesn't go offline.
3371  * The caller must not hold any locks which would prevent @fn from completing.
3372  */
3373 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3374 {
3375 	struct task_struct *sub_thread;
3376 	struct work_for_cpu wfc = {
3377 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3378 		.fn = fn,
3379 		.arg = arg,
3380 	};
3381 
3382 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3383 	if (IS_ERR(sub_thread))
3384 		return PTR_ERR(sub_thread);
3385 	kthread_bind(sub_thread, cpu);
3386 	wake_up_process(sub_thread);
3387 	wait_for_completion(&wfc.completion);
3388 	return wfc.ret;
3389 }
3390 EXPORT_SYMBOL_GPL(work_on_cpu);
3391 #endif /* CONFIG_SMP */
3392 
3393 #ifdef CONFIG_FREEZER
3394 
3395 /**
3396  * freeze_workqueues_begin - begin freezing workqueues
3397  *
3398  * Start freezing workqueues.  After this function returns, all
3399  * freezeable workqueues will queue new works to their frozen_works
3400  * list instead of gcwq->worklist.
3401  *
3402  * CONTEXT:
3403  * Grabs and releases workqueue_lock and gcwq->lock's.
3404  */
3405 void freeze_workqueues_begin(void)
3406 {
3407 	unsigned int cpu;
3408 
3409 	spin_lock(&workqueue_lock);
3410 
3411 	BUG_ON(workqueue_freezing);
3412 	workqueue_freezing = true;
3413 
3414 	for_each_gcwq_cpu(cpu) {
3415 		struct global_cwq *gcwq = get_gcwq(cpu);
3416 		struct workqueue_struct *wq;
3417 
3418 		spin_lock_irq(&gcwq->lock);
3419 
3420 		BUG_ON(gcwq->flags & GCWQ_FREEZING);
3421 		gcwq->flags |= GCWQ_FREEZING;
3422 
3423 		list_for_each_entry(wq, &workqueues, list) {
3424 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3425 
3426 			if (cwq && wq->flags & WQ_FREEZEABLE)
3427 				cwq->max_active = 0;
3428 		}
3429 
3430 		spin_unlock_irq(&gcwq->lock);
3431 	}
3432 
3433 	spin_unlock(&workqueue_lock);
3434 }
3435 
3436 /**
3437  * freeze_workqueues_busy - are freezeable workqueues still busy?
3438  *
3439  * Check whether freezing is complete.  This function must be called
3440  * between freeze_workqueues_begin() and thaw_workqueues().
3441  *
3442  * CONTEXT:
3443  * Grabs and releases workqueue_lock.
3444  *
3445  * RETURNS:
3446  * %true if some freezeable workqueues are still busy.  %false if
3447  * freezing is complete.
3448  */
3449 bool freeze_workqueues_busy(void)
3450 {
3451 	unsigned int cpu;
3452 	bool busy = false;
3453 
3454 	spin_lock(&workqueue_lock);
3455 
3456 	BUG_ON(!workqueue_freezing);
3457 
3458 	for_each_gcwq_cpu(cpu) {
3459 		struct workqueue_struct *wq;
3460 		/*
3461 		 * nr_active is monotonically decreasing.  It's safe
3462 		 * to peek without lock.
3463 		 */
3464 		list_for_each_entry(wq, &workqueues, list) {
3465 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3466 
3467 			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3468 				continue;
3469 
3470 			BUG_ON(cwq->nr_active < 0);
3471 			if (cwq->nr_active) {
3472 				busy = true;
3473 				goto out_unlock;
3474 			}
3475 		}
3476 	}
3477 out_unlock:
3478 	spin_unlock(&workqueue_lock);
3479 	return busy;
3480 }
3481 
3482 /**
3483  * thaw_workqueues - thaw workqueues
3484  *
3485  * Thaw workqueues.  Normal queueing is restored and all collected
3486  * frozen works are transferred to their respective gcwq worklists.
3487  *
3488  * CONTEXT:
3489  * Grabs and releases workqueue_lock and gcwq->lock's.
3490  */
3491 void thaw_workqueues(void)
3492 {
3493 	unsigned int cpu;
3494 
3495 	spin_lock(&workqueue_lock);
3496 
3497 	if (!workqueue_freezing)
3498 		goto out_unlock;
3499 
3500 	for_each_gcwq_cpu(cpu) {
3501 		struct global_cwq *gcwq = get_gcwq(cpu);
3502 		struct workqueue_struct *wq;
3503 
3504 		spin_lock_irq(&gcwq->lock);
3505 
3506 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3507 		gcwq->flags &= ~GCWQ_FREEZING;
3508 
3509 		list_for_each_entry(wq, &workqueues, list) {
3510 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3511 
3512 			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3513 				continue;
3514 
3515 			/* restore max_active and repopulate worklist */
3516 			cwq->max_active = wq->saved_max_active;
3517 
3518 			while (!list_empty(&cwq->delayed_works) &&
3519 			       cwq->nr_active < cwq->max_active)
3520 				cwq_activate_first_delayed(cwq);
3521 		}
3522 
3523 		wake_up_worker(gcwq);
3524 
3525 		spin_unlock_irq(&gcwq->lock);
3526 	}
3527 
3528 	workqueue_freezing = false;
3529 out_unlock:
3530 	spin_unlock(&workqueue_lock);
3531 }
3532 #endif /* CONFIG_FREEZER */
3533 
3534 static int __init init_workqueues(void)
3535 {
3536 	unsigned int cpu;
3537 	int i;
3538 
3539 	cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
3540 
3541 	/* initialize gcwqs */
3542 	for_each_gcwq_cpu(cpu) {
3543 		struct global_cwq *gcwq = get_gcwq(cpu);
3544 
3545 		spin_lock_init(&gcwq->lock);
3546 		INIT_LIST_HEAD(&gcwq->worklist);
3547 		gcwq->cpu = cpu;
3548 		if (cpu == WORK_CPU_UNBOUND)
3549 			gcwq->flags |= GCWQ_DISASSOCIATED;
3550 
3551 		INIT_LIST_HEAD(&gcwq->idle_list);
3552 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3553 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3554 
3555 		init_timer_deferrable(&gcwq->idle_timer);
3556 		gcwq->idle_timer.function = idle_worker_timeout;
3557 		gcwq->idle_timer.data = (unsigned long)gcwq;
3558 
3559 		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3560 			    (unsigned long)gcwq);
3561 
3562 		ida_init(&gcwq->worker_ida);
3563 
3564 		gcwq->trustee_state = TRUSTEE_DONE;
3565 		init_waitqueue_head(&gcwq->trustee_wait);
3566 	}
3567 
3568 	/* create the initial worker */
3569 	for_each_online_gcwq_cpu(cpu) {
3570 		struct global_cwq *gcwq = get_gcwq(cpu);
3571 		struct worker *worker;
3572 
3573 		worker = create_worker(gcwq, true);
3574 		BUG_ON(!worker);
3575 		spin_lock_irq(&gcwq->lock);
3576 		start_worker(worker);
3577 		spin_unlock_irq(&gcwq->lock);
3578 	}
3579 
3580 	system_wq = alloc_workqueue("events", 0, 0);
3581 	system_long_wq = alloc_workqueue("events_long", 0, 0);
3582 	system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3583 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3584 					    WQ_UNBOUND_MAX_ACTIVE);
3585 	BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
3586 	return 0;
3587 }
3588 early_initcall(init_workqueues);
3589