xref: /linux/kernel/workqueue.c (revision c28054d4b31d78272f65c0d11db0796f50fb9569)
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton <andrewm@uow.edu.au>
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 
36 /*
37  * The per-CPU workqueue (if single thread, we always use the first
38  * possible cpu).
39  */
40 struct cpu_workqueue_struct {
41 
42 	spinlock_t lock;
43 
44 	struct list_head worklist;
45 	wait_queue_head_t more_work;
46 	struct work_struct *current_work;
47 
48 	struct workqueue_struct *wq;
49 	struct task_struct *thread;
50 
51 	int run_depth;		/* Detect run_workqueue() recursion depth */
52 } ____cacheline_aligned;
53 
54 /*
55  * The externally visible workqueue abstraction is an array of
56  * per-CPU workqueues:
57  */
58 struct workqueue_struct {
59 	struct cpu_workqueue_struct *cpu_wq;
60 	struct list_head list;
61 	const char *name;
62 	int singlethread;
63 	int freezeable;		/* Freeze threads during suspend */
64 };
65 
66 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67    threads to each one as cpus come/go. */
68 static DEFINE_MUTEX(workqueue_mutex);
69 static LIST_HEAD(workqueues);
70 
71 static int singlethread_cpu __read_mostly;
72 static cpumask_t cpu_singlethread_map __read_mostly;
73 /*
74  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
75  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
76  * which comes in between can't use for_each_online_cpu(). We could
77  * use cpu_possible_map, the cpumask below is more a documentation
78  * than optimization.
79  */
80 static cpumask_t cpu_populated_map __read_mostly;
81 
82 /* If it's single threaded, it isn't in the list of workqueues. */
83 static inline int is_single_threaded(struct workqueue_struct *wq)
84 {
85 	return wq->singlethread;
86 }
87 
88 static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
89 {
90 	return is_single_threaded(wq)
91 		? &cpu_singlethread_map : &cpu_populated_map;
92 }
93 
94 static
95 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
96 {
97 	if (unlikely(is_single_threaded(wq)))
98 		cpu = singlethread_cpu;
99 	return per_cpu_ptr(wq->cpu_wq, cpu);
100 }
101 
102 /*
103  * Set the workqueue on which a work item is to be run
104  * - Must *only* be called if the pending flag is set
105  */
106 static inline void set_wq_data(struct work_struct *work,
107 				struct cpu_workqueue_struct *cwq)
108 {
109 	unsigned long new;
110 
111 	BUG_ON(!work_pending(work));
112 
113 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
114 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
115 	atomic_long_set(&work->data, new);
116 }
117 
118 static inline
119 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
120 {
121 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
122 }
123 
124 static void insert_work(struct cpu_workqueue_struct *cwq,
125 				struct work_struct *work, int tail)
126 {
127 	set_wq_data(work, cwq);
128 	/*
129 	 * Ensure that we get the right work->data if we see the
130 	 * result of list_add() below, see try_to_grab_pending().
131 	 */
132 	smp_wmb();
133 	if (tail)
134 		list_add_tail(&work->entry, &cwq->worklist);
135 	else
136 		list_add(&work->entry, &cwq->worklist);
137 	wake_up(&cwq->more_work);
138 }
139 
140 /* Preempt must be disabled. */
141 static void __queue_work(struct cpu_workqueue_struct *cwq,
142 			 struct work_struct *work)
143 {
144 	unsigned long flags;
145 
146 	spin_lock_irqsave(&cwq->lock, flags);
147 	insert_work(cwq, work, 1);
148 	spin_unlock_irqrestore(&cwq->lock, flags);
149 }
150 
151 /**
152  * queue_work - queue work on a workqueue
153  * @wq: workqueue to use
154  * @work: work to queue
155  *
156  * Returns 0 if @work was already on a queue, non-zero otherwise.
157  *
158  * We queue the work to the CPU it was submitted, but there is no
159  * guarantee that it will be processed by that CPU.
160  */
161 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
162 {
163 	int ret = 0;
164 
165 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
166 		BUG_ON(!list_empty(&work->entry));
167 		__queue_work(wq_per_cpu(wq, get_cpu()), work);
168 		put_cpu();
169 		ret = 1;
170 	}
171 	return ret;
172 }
173 EXPORT_SYMBOL_GPL(queue_work);
174 
175 void delayed_work_timer_fn(unsigned long __data)
176 {
177 	struct delayed_work *dwork = (struct delayed_work *)__data;
178 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
179 	struct workqueue_struct *wq = cwq->wq;
180 
181 	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
182 }
183 
184 /**
185  * queue_delayed_work - queue work on a workqueue after delay
186  * @wq: workqueue to use
187  * @dwork: delayable work to queue
188  * @delay: number of jiffies to wait before queueing
189  *
190  * Returns 0 if @work was already on a queue, non-zero otherwise.
191  */
192 int fastcall queue_delayed_work(struct workqueue_struct *wq,
193 			struct delayed_work *dwork, unsigned long delay)
194 {
195 	timer_stats_timer_set_start_info(&dwork->timer);
196 	if (delay == 0)
197 		return queue_work(wq, &dwork->work);
198 
199 	return queue_delayed_work_on(-1, wq, dwork, delay);
200 }
201 EXPORT_SYMBOL_GPL(queue_delayed_work);
202 
203 /**
204  * queue_delayed_work_on - queue work on specific CPU after delay
205  * @cpu: CPU number to execute work on
206  * @wq: workqueue to use
207  * @dwork: work to queue
208  * @delay: number of jiffies to wait before queueing
209  *
210  * Returns 0 if @work was already on a queue, non-zero otherwise.
211  */
212 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
213 			struct delayed_work *dwork, unsigned long delay)
214 {
215 	int ret = 0;
216 	struct timer_list *timer = &dwork->timer;
217 	struct work_struct *work = &dwork->work;
218 
219 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
220 		BUG_ON(timer_pending(timer));
221 		BUG_ON(!list_empty(&work->entry));
222 
223 		/* This stores cwq for the moment, for the timer_fn */
224 		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
225 		timer->expires = jiffies + delay;
226 		timer->data = (unsigned long)dwork;
227 		timer->function = delayed_work_timer_fn;
228 
229 		if (unlikely(cpu >= 0))
230 			add_timer_on(timer, cpu);
231 		else
232 			add_timer(timer);
233 		ret = 1;
234 	}
235 	return ret;
236 }
237 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
238 
239 static void run_workqueue(struct cpu_workqueue_struct *cwq)
240 {
241 	spin_lock_irq(&cwq->lock);
242 	cwq->run_depth++;
243 	if (cwq->run_depth > 3) {
244 		/* morton gets to eat his hat */
245 		printk("%s: recursion depth exceeded: %d\n",
246 			__FUNCTION__, cwq->run_depth);
247 		dump_stack();
248 	}
249 	while (!list_empty(&cwq->worklist)) {
250 		struct work_struct *work = list_entry(cwq->worklist.next,
251 						struct work_struct, entry);
252 		work_func_t f = work->func;
253 
254 		cwq->current_work = work;
255 		list_del_init(cwq->worklist.next);
256 		spin_unlock_irq(&cwq->lock);
257 
258 		BUG_ON(get_wq_data(work) != cwq);
259 		work_clear_pending(work);
260 		f(work);
261 
262 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
263 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
264 					"%s/0x%08x/%d\n",
265 					current->comm, preempt_count(),
266 				       	current->pid);
267 			printk(KERN_ERR "    last function: ");
268 			print_symbol("%s\n", (unsigned long)f);
269 			debug_show_held_locks(current);
270 			dump_stack();
271 		}
272 
273 		spin_lock_irq(&cwq->lock);
274 		cwq->current_work = NULL;
275 	}
276 	cwq->run_depth--;
277 	spin_unlock_irq(&cwq->lock);
278 }
279 
280 static int worker_thread(void *__cwq)
281 {
282 	struct cpu_workqueue_struct *cwq = __cwq;
283 	DEFINE_WAIT(wait);
284 
285 	if (cwq->wq->freezeable)
286 		set_freezable();
287 
288 	set_user_nice(current, -5);
289 
290 	for (;;) {
291 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
292 		if (!freezing(current) &&
293 		    !kthread_should_stop() &&
294 		    list_empty(&cwq->worklist))
295 			schedule();
296 		finish_wait(&cwq->more_work, &wait);
297 
298 		try_to_freeze();
299 
300 		if (kthread_should_stop())
301 			break;
302 
303 		run_workqueue(cwq);
304 	}
305 
306 	return 0;
307 }
308 
309 struct wq_barrier {
310 	struct work_struct	work;
311 	struct completion	done;
312 };
313 
314 static void wq_barrier_func(struct work_struct *work)
315 {
316 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
317 	complete(&barr->done);
318 }
319 
320 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
321 					struct wq_barrier *barr, int tail)
322 {
323 	INIT_WORK(&barr->work, wq_barrier_func);
324 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
325 
326 	init_completion(&barr->done);
327 
328 	insert_work(cwq, &barr->work, tail);
329 }
330 
331 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
332 {
333 	int active;
334 
335 	if (cwq->thread == current) {
336 		/*
337 		 * Probably keventd trying to flush its own queue. So simply run
338 		 * it by hand rather than deadlocking.
339 		 */
340 		run_workqueue(cwq);
341 		active = 1;
342 	} else {
343 		struct wq_barrier barr;
344 
345 		active = 0;
346 		spin_lock_irq(&cwq->lock);
347 		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
348 			insert_wq_barrier(cwq, &barr, 1);
349 			active = 1;
350 		}
351 		spin_unlock_irq(&cwq->lock);
352 
353 		if (active)
354 			wait_for_completion(&barr.done);
355 	}
356 
357 	return active;
358 }
359 
360 /**
361  * flush_workqueue - ensure that any scheduled work has run to completion.
362  * @wq: workqueue to flush
363  *
364  * Forces execution of the workqueue and blocks until its completion.
365  * This is typically used in driver shutdown handlers.
366  *
367  * We sleep until all works which were queued on entry have been handled,
368  * but we are not livelocked by new incoming ones.
369  *
370  * This function used to run the workqueues itself.  Now we just wait for the
371  * helper threads to do it.
372  */
373 void fastcall flush_workqueue(struct workqueue_struct *wq)
374 {
375 	const cpumask_t *cpu_map = wq_cpu_map(wq);
376 	int cpu;
377 
378 	might_sleep();
379 	for_each_cpu_mask(cpu, *cpu_map)
380 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
381 }
382 EXPORT_SYMBOL_GPL(flush_workqueue);
383 
384 /*
385  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
386  * so this work can't be re-armed in any way.
387  */
388 static int try_to_grab_pending(struct work_struct *work)
389 {
390 	struct cpu_workqueue_struct *cwq;
391 	int ret = -1;
392 
393 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
394 		return 0;
395 
396 	/*
397 	 * The queueing is in progress, or it is already queued. Try to
398 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
399 	 */
400 
401 	cwq = get_wq_data(work);
402 	if (!cwq)
403 		return ret;
404 
405 	spin_lock_irq(&cwq->lock);
406 	if (!list_empty(&work->entry)) {
407 		/*
408 		 * This work is queued, but perhaps we locked the wrong cwq.
409 		 * In that case we must see the new value after rmb(), see
410 		 * insert_work()->wmb().
411 		 */
412 		smp_rmb();
413 		if (cwq == get_wq_data(work)) {
414 			list_del_init(&work->entry);
415 			ret = 1;
416 		}
417 	}
418 	spin_unlock_irq(&cwq->lock);
419 
420 	return ret;
421 }
422 
423 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
424 				struct work_struct *work)
425 {
426 	struct wq_barrier barr;
427 	int running = 0;
428 
429 	spin_lock_irq(&cwq->lock);
430 	if (unlikely(cwq->current_work == work)) {
431 		insert_wq_barrier(cwq, &barr, 0);
432 		running = 1;
433 	}
434 	spin_unlock_irq(&cwq->lock);
435 
436 	if (unlikely(running))
437 		wait_for_completion(&barr.done);
438 }
439 
440 static void wait_on_work(struct work_struct *work)
441 {
442 	struct cpu_workqueue_struct *cwq;
443 	struct workqueue_struct *wq;
444 	const cpumask_t *cpu_map;
445 	int cpu;
446 
447 	might_sleep();
448 
449 	cwq = get_wq_data(work);
450 	if (!cwq)
451 		return;
452 
453 	wq = cwq->wq;
454 	cpu_map = wq_cpu_map(wq);
455 
456 	for_each_cpu_mask(cpu, *cpu_map)
457 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
458 }
459 
460 static int __cancel_work_timer(struct work_struct *work,
461 				struct timer_list* timer)
462 {
463 	int ret;
464 
465 	do {
466 		ret = (timer && likely(del_timer(timer)));
467 		if (!ret)
468 			ret = try_to_grab_pending(work);
469 		wait_on_work(work);
470 	} while (unlikely(ret < 0));
471 
472 	work_clear_pending(work);
473 	return ret;
474 }
475 
476 /**
477  * cancel_work_sync - block until a work_struct's callback has terminated
478  * @work: the work which is to be flushed
479  *
480  * Returns true if @work was pending.
481  *
482  * cancel_work_sync() will cancel the work if it is queued. If the work's
483  * callback appears to be running, cancel_work_sync() will block until it
484  * has completed.
485  *
486  * It is possible to use this function if the work re-queues itself. It can
487  * cancel the work even if it migrates to another workqueue, however in that
488  * case it only guarantees that work->func() has completed on the last queued
489  * workqueue.
490  *
491  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
492  * pending, otherwise it goes into a busy-wait loop until the timer expires.
493  *
494  * The caller must ensure that workqueue_struct on which this work was last
495  * queued can't be destroyed before this function returns.
496  */
497 int cancel_work_sync(struct work_struct *work)
498 {
499 	return __cancel_work_timer(work, NULL);
500 }
501 EXPORT_SYMBOL_GPL(cancel_work_sync);
502 
503 /**
504  * cancel_delayed_work_sync - reliably kill off a delayed work.
505  * @dwork: the delayed work struct
506  *
507  * Returns true if @dwork was pending.
508  *
509  * It is possible to use this function if @dwork rearms itself via queue_work()
510  * or queue_delayed_work(). See also the comment for cancel_work_sync().
511  */
512 int cancel_delayed_work_sync(struct delayed_work *dwork)
513 {
514 	return __cancel_work_timer(&dwork->work, &dwork->timer);
515 }
516 EXPORT_SYMBOL(cancel_delayed_work_sync);
517 
518 static struct workqueue_struct *keventd_wq __read_mostly;
519 
520 /**
521  * schedule_work - put work task in global workqueue
522  * @work: job to be done
523  *
524  * This puts a job in the kernel-global workqueue.
525  */
526 int fastcall schedule_work(struct work_struct *work)
527 {
528 	return queue_work(keventd_wq, work);
529 }
530 EXPORT_SYMBOL(schedule_work);
531 
532 /**
533  * schedule_delayed_work - put work task in global workqueue after delay
534  * @dwork: job to be done
535  * @delay: number of jiffies to wait or 0 for immediate execution
536  *
537  * After waiting for a given time this puts a job in the kernel-global
538  * workqueue.
539  */
540 int fastcall schedule_delayed_work(struct delayed_work *dwork,
541 					unsigned long delay)
542 {
543 	timer_stats_timer_set_start_info(&dwork->timer);
544 	return queue_delayed_work(keventd_wq, dwork, delay);
545 }
546 EXPORT_SYMBOL(schedule_delayed_work);
547 
548 /**
549  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
550  * @cpu: cpu to use
551  * @dwork: job to be done
552  * @delay: number of jiffies to wait
553  *
554  * After waiting for a given time this puts a job in the kernel-global
555  * workqueue on the specified CPU.
556  */
557 int schedule_delayed_work_on(int cpu,
558 			struct delayed_work *dwork, unsigned long delay)
559 {
560 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
561 }
562 EXPORT_SYMBOL(schedule_delayed_work_on);
563 
564 /**
565  * schedule_on_each_cpu - call a function on each online CPU from keventd
566  * @func: the function to call
567  *
568  * Returns zero on success.
569  * Returns -ve errno on failure.
570  *
571  * Appears to be racy against CPU hotplug.
572  *
573  * schedule_on_each_cpu() is very slow.
574  */
575 int schedule_on_each_cpu(work_func_t func)
576 {
577 	int cpu;
578 	struct work_struct *works;
579 
580 	works = alloc_percpu(struct work_struct);
581 	if (!works)
582 		return -ENOMEM;
583 
584 	preempt_disable();		/* CPU hotplug */
585 	for_each_online_cpu(cpu) {
586 		struct work_struct *work = per_cpu_ptr(works, cpu);
587 
588 		INIT_WORK(work, func);
589 		set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
590 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
591 	}
592 	preempt_enable();
593 	flush_workqueue(keventd_wq);
594 	free_percpu(works);
595 	return 0;
596 }
597 
598 void flush_scheduled_work(void)
599 {
600 	flush_workqueue(keventd_wq);
601 }
602 EXPORT_SYMBOL(flush_scheduled_work);
603 
604 /**
605  * execute_in_process_context - reliably execute the routine with user context
606  * @fn:		the function to execute
607  * @ew:		guaranteed storage for the execute work structure (must
608  *		be available when the work executes)
609  *
610  * Executes the function immediately if process context is available,
611  * otherwise schedules the function for delayed execution.
612  *
613  * Returns:	0 - function was executed
614  *		1 - function was scheduled for execution
615  */
616 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
617 {
618 	if (!in_interrupt()) {
619 		fn(&ew->work);
620 		return 0;
621 	}
622 
623 	INIT_WORK(&ew->work, fn);
624 	schedule_work(&ew->work);
625 
626 	return 1;
627 }
628 EXPORT_SYMBOL_GPL(execute_in_process_context);
629 
630 int keventd_up(void)
631 {
632 	return keventd_wq != NULL;
633 }
634 
635 int current_is_keventd(void)
636 {
637 	struct cpu_workqueue_struct *cwq;
638 	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
639 	int ret = 0;
640 
641 	BUG_ON(!keventd_wq);
642 
643 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
644 	if (current == cwq->thread)
645 		ret = 1;
646 
647 	return ret;
648 
649 }
650 
651 static struct cpu_workqueue_struct *
652 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
653 {
654 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
655 
656 	cwq->wq = wq;
657 	spin_lock_init(&cwq->lock);
658 	INIT_LIST_HEAD(&cwq->worklist);
659 	init_waitqueue_head(&cwq->more_work);
660 
661 	return cwq;
662 }
663 
664 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
665 {
666 	struct workqueue_struct *wq = cwq->wq;
667 	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
668 	struct task_struct *p;
669 
670 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
671 	/*
672 	 * Nobody can add the work_struct to this cwq,
673 	 *	if (caller is __create_workqueue)
674 	 *		nobody should see this wq
675 	 *	else // caller is CPU_UP_PREPARE
676 	 *		cpu is not on cpu_online_map
677 	 * so we can abort safely.
678 	 */
679 	if (IS_ERR(p))
680 		return PTR_ERR(p);
681 
682 	cwq->thread = p;
683 
684 	return 0;
685 }
686 
687 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
688 {
689 	struct task_struct *p = cwq->thread;
690 
691 	if (p != NULL) {
692 		if (cpu >= 0)
693 			kthread_bind(p, cpu);
694 		wake_up_process(p);
695 	}
696 }
697 
698 struct workqueue_struct *__create_workqueue(const char *name,
699 					    int singlethread, int freezeable)
700 {
701 	struct workqueue_struct *wq;
702 	struct cpu_workqueue_struct *cwq;
703 	int err = 0, cpu;
704 
705 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
706 	if (!wq)
707 		return NULL;
708 
709 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
710 	if (!wq->cpu_wq) {
711 		kfree(wq);
712 		return NULL;
713 	}
714 
715 	wq->name = name;
716 	wq->singlethread = singlethread;
717 	wq->freezeable = freezeable;
718 	INIT_LIST_HEAD(&wq->list);
719 
720 	if (singlethread) {
721 		cwq = init_cpu_workqueue(wq, singlethread_cpu);
722 		err = create_workqueue_thread(cwq, singlethread_cpu);
723 		start_workqueue_thread(cwq, -1);
724 	} else {
725 		mutex_lock(&workqueue_mutex);
726 		list_add(&wq->list, &workqueues);
727 
728 		for_each_possible_cpu(cpu) {
729 			cwq = init_cpu_workqueue(wq, cpu);
730 			if (err || !cpu_online(cpu))
731 				continue;
732 			err = create_workqueue_thread(cwq, cpu);
733 			start_workqueue_thread(cwq, cpu);
734 		}
735 		mutex_unlock(&workqueue_mutex);
736 	}
737 
738 	if (err) {
739 		destroy_workqueue(wq);
740 		wq = NULL;
741 	}
742 	return wq;
743 }
744 EXPORT_SYMBOL_GPL(__create_workqueue);
745 
746 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
747 {
748 	/*
749 	 * Our caller is either destroy_workqueue() or CPU_DEAD,
750 	 * workqueue_mutex protects cwq->thread
751 	 */
752 	if (cwq->thread == NULL)
753 		return;
754 
755 	flush_cpu_workqueue(cwq);
756 	/*
757 	 * If the caller is CPU_DEAD and cwq->worklist was not empty,
758 	 * a concurrent flush_workqueue() can insert a barrier after us.
759 	 * However, in that case run_workqueue() won't return and check
760 	 * kthread_should_stop() until it flushes all work_struct's.
761 	 * When ->worklist becomes empty it is safe to exit because no
762 	 * more work_structs can be queued on this cwq: flush_workqueue
763 	 * checks list_empty(), and a "normal" queue_work() can't use
764 	 * a dead CPU.
765 	 */
766 	kthread_stop(cwq->thread);
767 	cwq->thread = NULL;
768 }
769 
770 /**
771  * destroy_workqueue - safely terminate a workqueue
772  * @wq: target workqueue
773  *
774  * Safely destroy a workqueue. All work currently pending will be done first.
775  */
776 void destroy_workqueue(struct workqueue_struct *wq)
777 {
778 	const cpumask_t *cpu_map = wq_cpu_map(wq);
779 	struct cpu_workqueue_struct *cwq;
780 	int cpu;
781 
782 	mutex_lock(&workqueue_mutex);
783 	list_del(&wq->list);
784 	mutex_unlock(&workqueue_mutex);
785 
786 	for_each_cpu_mask(cpu, *cpu_map) {
787 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
788 		cleanup_workqueue_thread(cwq, cpu);
789 	}
790 
791 	free_percpu(wq->cpu_wq);
792 	kfree(wq);
793 }
794 EXPORT_SYMBOL_GPL(destroy_workqueue);
795 
796 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
797 						unsigned long action,
798 						void *hcpu)
799 {
800 	unsigned int cpu = (unsigned long)hcpu;
801 	struct cpu_workqueue_struct *cwq;
802 	struct workqueue_struct *wq;
803 
804 	action &= ~CPU_TASKS_FROZEN;
805 
806 	switch (action) {
807 	case CPU_LOCK_ACQUIRE:
808 		mutex_lock(&workqueue_mutex);
809 		return NOTIFY_OK;
810 
811 	case CPU_LOCK_RELEASE:
812 		mutex_unlock(&workqueue_mutex);
813 		return NOTIFY_OK;
814 
815 	case CPU_UP_PREPARE:
816 		cpu_set(cpu, cpu_populated_map);
817 	}
818 
819 	list_for_each_entry(wq, &workqueues, list) {
820 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
821 
822 		switch (action) {
823 		case CPU_UP_PREPARE:
824 			if (!create_workqueue_thread(cwq, cpu))
825 				break;
826 			printk(KERN_ERR "workqueue for %i failed\n", cpu);
827 			return NOTIFY_BAD;
828 
829 		case CPU_ONLINE:
830 			start_workqueue_thread(cwq, cpu);
831 			break;
832 
833 		case CPU_UP_CANCELED:
834 			start_workqueue_thread(cwq, -1);
835 		case CPU_DEAD:
836 			cleanup_workqueue_thread(cwq, cpu);
837 			break;
838 		}
839 	}
840 
841 	return NOTIFY_OK;
842 }
843 
844 void __init init_workqueues(void)
845 {
846 	cpu_populated_map = cpu_online_map;
847 	singlethread_cpu = first_cpu(cpu_possible_map);
848 	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
849 	hotcpu_notifier(workqueue_cpu_callback, 0);
850 	keventd_wq = create_workqueue("events");
851 	BUG_ON(!keventd_wq);
852 }
853