xref: /linux/kernel/workqueue.c (revision 913df4453f85f1fe79b35ecf3c9a0c0b707d22a2)
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/workqueue.h>
38 
39 /*
40  * The per-CPU workqueue (if single thread, we always use the first
41  * possible cpu).
42  */
43 struct cpu_workqueue_struct {
44 
45 	spinlock_t lock;
46 
47 	struct list_head worklist;
48 	wait_queue_head_t more_work;
49 	struct work_struct *current_work;
50 
51 	struct workqueue_struct *wq;
52 	struct task_struct *thread;
53 } ____cacheline_aligned;
54 
55 /*
56  * The externally visible workqueue abstraction is an array of
57  * per-CPU workqueues:
58  */
59 struct workqueue_struct {
60 	struct cpu_workqueue_struct *cpu_wq;
61 	struct list_head list;
62 	const char *name;
63 	int singlethread;
64 	int freezeable;		/* Freeze threads during suspend */
65 	int rt;
66 #ifdef CONFIG_LOCKDEP
67 	struct lockdep_map lockdep_map;
68 #endif
69 };
70 
71 /* Serializes the accesses to the list of workqueues. */
72 static DEFINE_SPINLOCK(workqueue_lock);
73 static LIST_HEAD(workqueues);
74 
75 static int singlethread_cpu __read_mostly;
76 static const struct cpumask *cpu_singlethread_map __read_mostly;
77 /*
78  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
79  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
80  * which comes in between can't use for_each_online_cpu(). We could
81  * use cpu_possible_map, the cpumask below is more a documentation
82  * than optimization.
83  */
84 static cpumask_var_t cpu_populated_map __read_mostly;
85 
86 /* If it's single threaded, it isn't in the list of workqueues. */
87 static inline int is_wq_single_threaded(struct workqueue_struct *wq)
88 {
89 	return wq->singlethread;
90 }
91 
92 static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
93 {
94 	return is_wq_single_threaded(wq)
95 		? cpu_singlethread_map : cpu_populated_map;
96 }
97 
98 static
99 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
100 {
101 	if (unlikely(is_wq_single_threaded(wq)))
102 		cpu = singlethread_cpu;
103 	return per_cpu_ptr(wq->cpu_wq, cpu);
104 }
105 
106 /*
107  * Set the workqueue on which a work item is to be run
108  * - Must *only* be called if the pending flag is set
109  */
110 static inline void set_wq_data(struct work_struct *work,
111 				struct cpu_workqueue_struct *cwq)
112 {
113 	unsigned long new;
114 
115 	BUG_ON(!work_pending(work));
116 
117 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
118 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
119 	atomic_long_set(&work->data, new);
120 }
121 
122 static inline
123 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
124 {
125 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
126 }
127 
128 static void insert_work(struct cpu_workqueue_struct *cwq,
129 			struct work_struct *work, struct list_head *head)
130 {
131 	trace_workqueue_insertion(cwq->thread, work);
132 
133 	set_wq_data(work, cwq);
134 	/*
135 	 * Ensure that we get the right work->data if we see the
136 	 * result of list_add() below, see try_to_grab_pending().
137 	 */
138 	smp_wmb();
139 	list_add_tail(&work->entry, head);
140 	wake_up(&cwq->more_work);
141 }
142 
143 static void __queue_work(struct cpu_workqueue_struct *cwq,
144 			 struct work_struct *work)
145 {
146 	unsigned long flags;
147 
148 	spin_lock_irqsave(&cwq->lock, flags);
149 	insert_work(cwq, work, &cwq->worklist);
150 	spin_unlock_irqrestore(&cwq->lock, flags);
151 }
152 
153 /**
154  * queue_work - queue work on a workqueue
155  * @wq: workqueue to use
156  * @work: work to queue
157  *
158  * Returns 0 if @work was already on a queue, non-zero otherwise.
159  *
160  * We queue the work to the CPU on which it was submitted, but if the CPU dies
161  * it can be processed by another CPU.
162  */
163 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
164 {
165 	int ret;
166 
167 	ret = queue_work_on(get_cpu(), wq, work);
168 	put_cpu();
169 
170 	return ret;
171 }
172 EXPORT_SYMBOL_GPL(queue_work);
173 
174 /**
175  * queue_work_on - queue work on specific cpu
176  * @cpu: CPU number to execute work on
177  * @wq: workqueue to use
178  * @work: work to queue
179  *
180  * Returns 0 if @work was already on a queue, non-zero otherwise.
181  *
182  * We queue the work to a specific CPU, the caller must ensure it
183  * can't go away.
184  */
185 int
186 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
187 {
188 	int ret = 0;
189 
190 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
191 		BUG_ON(!list_empty(&work->entry));
192 		__queue_work(wq_per_cpu(wq, cpu), work);
193 		ret = 1;
194 	}
195 	return ret;
196 }
197 EXPORT_SYMBOL_GPL(queue_work_on);
198 
199 static void delayed_work_timer_fn(unsigned long __data)
200 {
201 	struct delayed_work *dwork = (struct delayed_work *)__data;
202 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
203 	struct workqueue_struct *wq = cwq->wq;
204 
205 	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
206 }
207 
208 /**
209  * queue_delayed_work - queue work on a workqueue after delay
210  * @wq: workqueue to use
211  * @dwork: delayable work to queue
212  * @delay: number of jiffies to wait before queueing
213  *
214  * Returns 0 if @work was already on a queue, non-zero otherwise.
215  */
216 int queue_delayed_work(struct workqueue_struct *wq,
217 			struct delayed_work *dwork, unsigned long delay)
218 {
219 	if (delay == 0)
220 		return queue_work(wq, &dwork->work);
221 
222 	return queue_delayed_work_on(-1, wq, dwork, delay);
223 }
224 EXPORT_SYMBOL_GPL(queue_delayed_work);
225 
226 /**
227  * queue_delayed_work_on - queue work on specific CPU after delay
228  * @cpu: CPU number to execute work on
229  * @wq: workqueue to use
230  * @dwork: work to queue
231  * @delay: number of jiffies to wait before queueing
232  *
233  * Returns 0 if @work was already on a queue, non-zero otherwise.
234  */
235 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
236 			struct delayed_work *dwork, unsigned long delay)
237 {
238 	int ret = 0;
239 	struct timer_list *timer = &dwork->timer;
240 	struct work_struct *work = &dwork->work;
241 
242 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
243 		BUG_ON(timer_pending(timer));
244 		BUG_ON(!list_empty(&work->entry));
245 
246 		timer_stats_timer_set_start_info(&dwork->timer);
247 
248 		/* This stores cwq for the moment, for the timer_fn */
249 		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
250 		timer->expires = jiffies + delay;
251 		timer->data = (unsigned long)dwork;
252 		timer->function = delayed_work_timer_fn;
253 
254 		if (unlikely(cpu >= 0))
255 			add_timer_on(timer, cpu);
256 		else
257 			add_timer(timer);
258 		ret = 1;
259 	}
260 	return ret;
261 }
262 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
263 
264 static void run_workqueue(struct cpu_workqueue_struct *cwq)
265 {
266 	spin_lock_irq(&cwq->lock);
267 	while (!list_empty(&cwq->worklist)) {
268 		struct work_struct *work = list_entry(cwq->worklist.next,
269 						struct work_struct, entry);
270 		work_func_t f = work->func;
271 #ifdef CONFIG_LOCKDEP
272 		/*
273 		 * It is permissible to free the struct work_struct
274 		 * from inside the function that is called from it,
275 		 * this we need to take into account for lockdep too.
276 		 * To avoid bogus "held lock freed" warnings as well
277 		 * as problems when looking into work->lockdep_map,
278 		 * make a copy and use that here.
279 		 */
280 		struct lockdep_map lockdep_map = work->lockdep_map;
281 #endif
282 		trace_workqueue_execution(cwq->thread, work);
283 		cwq->current_work = work;
284 		list_del_init(cwq->worklist.next);
285 		spin_unlock_irq(&cwq->lock);
286 
287 		BUG_ON(get_wq_data(work) != cwq);
288 		work_clear_pending(work);
289 		lock_map_acquire(&cwq->wq->lockdep_map);
290 		lock_map_acquire(&lockdep_map);
291 		f(work);
292 		lock_map_release(&lockdep_map);
293 		lock_map_release(&cwq->wq->lockdep_map);
294 
295 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
296 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
297 					"%s/0x%08x/%d\n",
298 					current->comm, preempt_count(),
299 				       	task_pid_nr(current));
300 			printk(KERN_ERR "    last function: ");
301 			print_symbol("%s\n", (unsigned long)f);
302 			debug_show_held_locks(current);
303 			dump_stack();
304 		}
305 
306 		spin_lock_irq(&cwq->lock);
307 		cwq->current_work = NULL;
308 	}
309 	spin_unlock_irq(&cwq->lock);
310 }
311 
312 static int worker_thread(void *__cwq)
313 {
314 	struct cpu_workqueue_struct *cwq = __cwq;
315 	DEFINE_WAIT(wait);
316 
317 	if (cwq->wq->freezeable)
318 		set_freezable();
319 
320 	for (;;) {
321 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
322 		if (!freezing(current) &&
323 		    !kthread_should_stop() &&
324 		    list_empty(&cwq->worklist))
325 			schedule();
326 		finish_wait(&cwq->more_work, &wait);
327 
328 		try_to_freeze();
329 
330 		if (kthread_should_stop())
331 			break;
332 
333 		run_workqueue(cwq);
334 	}
335 
336 	return 0;
337 }
338 
339 struct wq_barrier {
340 	struct work_struct	work;
341 	struct completion	done;
342 };
343 
344 static void wq_barrier_func(struct work_struct *work)
345 {
346 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
347 	complete(&barr->done);
348 }
349 
350 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
351 			struct wq_barrier *barr, struct list_head *head)
352 {
353 	INIT_WORK(&barr->work, wq_barrier_func);
354 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
355 
356 	init_completion(&barr->done);
357 
358 	insert_work(cwq, &barr->work, head);
359 }
360 
361 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
362 {
363 	int active = 0;
364 	struct wq_barrier barr;
365 
366 	WARN_ON(cwq->thread == current);
367 
368 	spin_lock_irq(&cwq->lock);
369 	if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
370 		insert_wq_barrier(cwq, &barr, &cwq->worklist);
371 		active = 1;
372 	}
373 	spin_unlock_irq(&cwq->lock);
374 
375 	if (active)
376 		wait_for_completion(&barr.done);
377 
378 	return active;
379 }
380 
381 /**
382  * flush_workqueue - ensure that any scheduled work has run to completion.
383  * @wq: workqueue to flush
384  *
385  * Forces execution of the workqueue and blocks until its completion.
386  * This is typically used in driver shutdown handlers.
387  *
388  * We sleep until all works which were queued on entry have been handled,
389  * but we are not livelocked by new incoming ones.
390  *
391  * This function used to run the workqueues itself.  Now we just wait for the
392  * helper threads to do it.
393  */
394 void flush_workqueue(struct workqueue_struct *wq)
395 {
396 	const struct cpumask *cpu_map = wq_cpu_map(wq);
397 	int cpu;
398 
399 	might_sleep();
400 	lock_map_acquire(&wq->lockdep_map);
401 	lock_map_release(&wq->lockdep_map);
402 	for_each_cpu(cpu, cpu_map)
403 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
404 }
405 EXPORT_SYMBOL_GPL(flush_workqueue);
406 
407 /**
408  * flush_work - block until a work_struct's callback has terminated
409  * @work: the work which is to be flushed
410  *
411  * Returns false if @work has already terminated.
412  *
413  * It is expected that, prior to calling flush_work(), the caller has
414  * arranged for the work to not be requeued, otherwise it doesn't make
415  * sense to use this function.
416  */
417 int flush_work(struct work_struct *work)
418 {
419 	struct cpu_workqueue_struct *cwq;
420 	struct list_head *prev;
421 	struct wq_barrier barr;
422 
423 	might_sleep();
424 	cwq = get_wq_data(work);
425 	if (!cwq)
426 		return 0;
427 
428 	lock_map_acquire(&cwq->wq->lockdep_map);
429 	lock_map_release(&cwq->wq->lockdep_map);
430 
431 	prev = NULL;
432 	spin_lock_irq(&cwq->lock);
433 	if (!list_empty(&work->entry)) {
434 		/*
435 		 * See the comment near try_to_grab_pending()->smp_rmb().
436 		 * If it was re-queued under us we are not going to wait.
437 		 */
438 		smp_rmb();
439 		if (unlikely(cwq != get_wq_data(work)))
440 			goto out;
441 		prev = &work->entry;
442 	} else {
443 		if (cwq->current_work != work)
444 			goto out;
445 		prev = &cwq->worklist;
446 	}
447 	insert_wq_barrier(cwq, &barr, prev->next);
448 out:
449 	spin_unlock_irq(&cwq->lock);
450 	if (!prev)
451 		return 0;
452 
453 	wait_for_completion(&barr.done);
454 	return 1;
455 }
456 EXPORT_SYMBOL_GPL(flush_work);
457 
458 /*
459  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
460  * so this work can't be re-armed in any way.
461  */
462 static int try_to_grab_pending(struct work_struct *work)
463 {
464 	struct cpu_workqueue_struct *cwq;
465 	int ret = -1;
466 
467 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
468 		return 0;
469 
470 	/*
471 	 * The queueing is in progress, or it is already queued. Try to
472 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
473 	 */
474 
475 	cwq = get_wq_data(work);
476 	if (!cwq)
477 		return ret;
478 
479 	spin_lock_irq(&cwq->lock);
480 	if (!list_empty(&work->entry)) {
481 		/*
482 		 * This work is queued, but perhaps we locked the wrong cwq.
483 		 * In that case we must see the new value after rmb(), see
484 		 * insert_work()->wmb().
485 		 */
486 		smp_rmb();
487 		if (cwq == get_wq_data(work)) {
488 			list_del_init(&work->entry);
489 			ret = 1;
490 		}
491 	}
492 	spin_unlock_irq(&cwq->lock);
493 
494 	return ret;
495 }
496 
497 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
498 				struct work_struct *work)
499 {
500 	struct wq_barrier barr;
501 	int running = 0;
502 
503 	spin_lock_irq(&cwq->lock);
504 	if (unlikely(cwq->current_work == work)) {
505 		insert_wq_barrier(cwq, &barr, cwq->worklist.next);
506 		running = 1;
507 	}
508 	spin_unlock_irq(&cwq->lock);
509 
510 	if (unlikely(running))
511 		wait_for_completion(&barr.done);
512 }
513 
514 static void wait_on_work(struct work_struct *work)
515 {
516 	struct cpu_workqueue_struct *cwq;
517 	struct workqueue_struct *wq;
518 	const struct cpumask *cpu_map;
519 	int cpu;
520 
521 	might_sleep();
522 
523 	lock_map_acquire(&work->lockdep_map);
524 	lock_map_release(&work->lockdep_map);
525 
526 	cwq = get_wq_data(work);
527 	if (!cwq)
528 		return;
529 
530 	wq = cwq->wq;
531 	cpu_map = wq_cpu_map(wq);
532 
533 	for_each_cpu(cpu, cpu_map)
534 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
535 }
536 
537 static int __cancel_work_timer(struct work_struct *work,
538 				struct timer_list* timer)
539 {
540 	int ret;
541 
542 	do {
543 		ret = (timer && likely(del_timer(timer)));
544 		if (!ret)
545 			ret = try_to_grab_pending(work);
546 		wait_on_work(work);
547 	} while (unlikely(ret < 0));
548 
549 	work_clear_pending(work);
550 	return ret;
551 }
552 
553 /**
554  * cancel_work_sync - block until a work_struct's callback has terminated
555  * @work: the work which is to be flushed
556  *
557  * Returns true if @work was pending.
558  *
559  * cancel_work_sync() will cancel the work if it is queued. If the work's
560  * callback appears to be running, cancel_work_sync() will block until it
561  * has completed.
562  *
563  * It is possible to use this function if the work re-queues itself. It can
564  * cancel the work even if it migrates to another workqueue, however in that
565  * case it only guarantees that work->func() has completed on the last queued
566  * workqueue.
567  *
568  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
569  * pending, otherwise it goes into a busy-wait loop until the timer expires.
570  *
571  * The caller must ensure that workqueue_struct on which this work was last
572  * queued can't be destroyed before this function returns.
573  */
574 int cancel_work_sync(struct work_struct *work)
575 {
576 	return __cancel_work_timer(work, NULL);
577 }
578 EXPORT_SYMBOL_GPL(cancel_work_sync);
579 
580 /**
581  * cancel_delayed_work_sync - reliably kill off a delayed work.
582  * @dwork: the delayed work struct
583  *
584  * Returns true if @dwork was pending.
585  *
586  * It is possible to use this function if @dwork rearms itself via queue_work()
587  * or queue_delayed_work(). See also the comment for cancel_work_sync().
588  */
589 int cancel_delayed_work_sync(struct delayed_work *dwork)
590 {
591 	return __cancel_work_timer(&dwork->work, &dwork->timer);
592 }
593 EXPORT_SYMBOL(cancel_delayed_work_sync);
594 
595 static struct workqueue_struct *keventd_wq __read_mostly;
596 
597 /**
598  * schedule_work - put work task in global workqueue
599  * @work: job to be done
600  *
601  * Returns zero if @work was already on the kernel-global workqueue and
602  * non-zero otherwise.
603  *
604  * This puts a job in the kernel-global workqueue if it was not already
605  * queued and leaves it in the same position on the kernel-global
606  * workqueue otherwise.
607  */
608 int schedule_work(struct work_struct *work)
609 {
610 	return queue_work(keventd_wq, work);
611 }
612 EXPORT_SYMBOL(schedule_work);
613 
614 /*
615  * schedule_work_on - put work task on a specific cpu
616  * @cpu: cpu to put the work task on
617  * @work: job to be done
618  *
619  * This puts a job on a specific cpu
620  */
621 int schedule_work_on(int cpu, struct work_struct *work)
622 {
623 	return queue_work_on(cpu, keventd_wq, work);
624 }
625 EXPORT_SYMBOL(schedule_work_on);
626 
627 /**
628  * schedule_delayed_work - put work task in global workqueue after delay
629  * @dwork: job to be done
630  * @delay: number of jiffies to wait or 0 for immediate execution
631  *
632  * After waiting for a given time this puts a job in the kernel-global
633  * workqueue.
634  */
635 int schedule_delayed_work(struct delayed_work *dwork,
636 					unsigned long delay)
637 {
638 	return queue_delayed_work(keventd_wq, dwork, delay);
639 }
640 EXPORT_SYMBOL(schedule_delayed_work);
641 
642 /**
643  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
644  * @cpu: cpu to use
645  * @dwork: job to be done
646  * @delay: number of jiffies to wait
647  *
648  * After waiting for a given time this puts a job in the kernel-global
649  * workqueue on the specified CPU.
650  */
651 int schedule_delayed_work_on(int cpu,
652 			struct delayed_work *dwork, unsigned long delay)
653 {
654 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
655 }
656 EXPORT_SYMBOL(schedule_delayed_work_on);
657 
658 /**
659  * schedule_on_each_cpu - call a function on each online CPU from keventd
660  * @func: the function to call
661  *
662  * Returns zero on success.
663  * Returns -ve errno on failure.
664  *
665  * schedule_on_each_cpu() is very slow.
666  */
667 int schedule_on_each_cpu(work_func_t func)
668 {
669 	int cpu;
670 	struct work_struct *works;
671 
672 	works = alloc_percpu(struct work_struct);
673 	if (!works)
674 		return -ENOMEM;
675 
676 	get_online_cpus();
677 	for_each_online_cpu(cpu) {
678 		struct work_struct *work = per_cpu_ptr(works, cpu);
679 
680 		INIT_WORK(work, func);
681 		schedule_work_on(cpu, work);
682 	}
683 	for_each_online_cpu(cpu)
684 		flush_work(per_cpu_ptr(works, cpu));
685 	put_online_cpus();
686 	free_percpu(works);
687 	return 0;
688 }
689 
690 void flush_scheduled_work(void)
691 {
692 	flush_workqueue(keventd_wq);
693 }
694 EXPORT_SYMBOL(flush_scheduled_work);
695 
696 /**
697  * execute_in_process_context - reliably execute the routine with user context
698  * @fn:		the function to execute
699  * @ew:		guaranteed storage for the execute work structure (must
700  *		be available when the work executes)
701  *
702  * Executes the function immediately if process context is available,
703  * otherwise schedules the function for delayed execution.
704  *
705  * Returns:	0 - function was executed
706  *		1 - function was scheduled for execution
707  */
708 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
709 {
710 	if (!in_interrupt()) {
711 		fn(&ew->work);
712 		return 0;
713 	}
714 
715 	INIT_WORK(&ew->work, fn);
716 	schedule_work(&ew->work);
717 
718 	return 1;
719 }
720 EXPORT_SYMBOL_GPL(execute_in_process_context);
721 
722 int keventd_up(void)
723 {
724 	return keventd_wq != NULL;
725 }
726 
727 int current_is_keventd(void)
728 {
729 	struct cpu_workqueue_struct *cwq;
730 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
731 	int ret = 0;
732 
733 	BUG_ON(!keventd_wq);
734 
735 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
736 	if (current == cwq->thread)
737 		ret = 1;
738 
739 	return ret;
740 
741 }
742 
743 static struct cpu_workqueue_struct *
744 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
745 {
746 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
747 
748 	cwq->wq = wq;
749 	spin_lock_init(&cwq->lock);
750 	INIT_LIST_HEAD(&cwq->worklist);
751 	init_waitqueue_head(&cwq->more_work);
752 
753 	return cwq;
754 }
755 
756 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
757 {
758 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
759 	struct workqueue_struct *wq = cwq->wq;
760 	const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
761 	struct task_struct *p;
762 
763 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
764 	/*
765 	 * Nobody can add the work_struct to this cwq,
766 	 *	if (caller is __create_workqueue)
767 	 *		nobody should see this wq
768 	 *	else // caller is CPU_UP_PREPARE
769 	 *		cpu is not on cpu_online_map
770 	 * so we can abort safely.
771 	 */
772 	if (IS_ERR(p))
773 		return PTR_ERR(p);
774 	if (cwq->wq->rt)
775 		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
776 	cwq->thread = p;
777 
778 	trace_workqueue_creation(cwq->thread, cpu);
779 
780 	return 0;
781 }
782 
783 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
784 {
785 	struct task_struct *p = cwq->thread;
786 
787 	if (p != NULL) {
788 		if (cpu >= 0)
789 			kthread_bind(p, cpu);
790 		wake_up_process(p);
791 	}
792 }
793 
794 struct workqueue_struct *__create_workqueue_key(const char *name,
795 						int singlethread,
796 						int freezeable,
797 						int rt,
798 						struct lock_class_key *key,
799 						const char *lock_name)
800 {
801 	struct workqueue_struct *wq;
802 	struct cpu_workqueue_struct *cwq;
803 	int err = 0, cpu;
804 
805 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
806 	if (!wq)
807 		return NULL;
808 
809 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
810 	if (!wq->cpu_wq) {
811 		kfree(wq);
812 		return NULL;
813 	}
814 
815 	wq->name = name;
816 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
817 	wq->singlethread = singlethread;
818 	wq->freezeable = freezeable;
819 	wq->rt = rt;
820 	INIT_LIST_HEAD(&wq->list);
821 
822 	if (singlethread) {
823 		cwq = init_cpu_workqueue(wq, singlethread_cpu);
824 		err = create_workqueue_thread(cwq, singlethread_cpu);
825 		start_workqueue_thread(cwq, -1);
826 	} else {
827 		cpu_maps_update_begin();
828 		/*
829 		 * We must place this wq on list even if the code below fails.
830 		 * cpu_down(cpu) can remove cpu from cpu_populated_map before
831 		 * destroy_workqueue() takes the lock, in that case we leak
832 		 * cwq[cpu]->thread.
833 		 */
834 		spin_lock(&workqueue_lock);
835 		list_add(&wq->list, &workqueues);
836 		spin_unlock(&workqueue_lock);
837 		/*
838 		 * We must initialize cwqs for each possible cpu even if we
839 		 * are going to call destroy_workqueue() finally. Otherwise
840 		 * cpu_up() can hit the uninitialized cwq once we drop the
841 		 * lock.
842 		 */
843 		for_each_possible_cpu(cpu) {
844 			cwq = init_cpu_workqueue(wq, cpu);
845 			if (err || !cpu_online(cpu))
846 				continue;
847 			err = create_workqueue_thread(cwq, cpu);
848 			start_workqueue_thread(cwq, cpu);
849 		}
850 		cpu_maps_update_done();
851 	}
852 
853 	if (err) {
854 		destroy_workqueue(wq);
855 		wq = NULL;
856 	}
857 	return wq;
858 }
859 EXPORT_SYMBOL_GPL(__create_workqueue_key);
860 
861 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
862 {
863 	/*
864 	 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
865 	 * cpu_add_remove_lock protects cwq->thread.
866 	 */
867 	if (cwq->thread == NULL)
868 		return;
869 
870 	lock_map_acquire(&cwq->wq->lockdep_map);
871 	lock_map_release(&cwq->wq->lockdep_map);
872 
873 	flush_cpu_workqueue(cwq);
874 	/*
875 	 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
876 	 * a concurrent flush_workqueue() can insert a barrier after us.
877 	 * However, in that case run_workqueue() won't return and check
878 	 * kthread_should_stop() until it flushes all work_struct's.
879 	 * When ->worklist becomes empty it is safe to exit because no
880 	 * more work_structs can be queued on this cwq: flush_workqueue
881 	 * checks list_empty(), and a "normal" queue_work() can't use
882 	 * a dead CPU.
883 	 */
884 	trace_workqueue_destruction(cwq->thread);
885 	kthread_stop(cwq->thread);
886 	cwq->thread = NULL;
887 }
888 
889 /**
890  * destroy_workqueue - safely terminate a workqueue
891  * @wq: target workqueue
892  *
893  * Safely destroy a workqueue. All work currently pending will be done first.
894  */
895 void destroy_workqueue(struct workqueue_struct *wq)
896 {
897 	const struct cpumask *cpu_map = wq_cpu_map(wq);
898 	int cpu;
899 
900 	cpu_maps_update_begin();
901 	spin_lock(&workqueue_lock);
902 	list_del(&wq->list);
903 	spin_unlock(&workqueue_lock);
904 
905 	for_each_cpu(cpu, cpu_map)
906 		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
907  	cpu_maps_update_done();
908 
909 	free_percpu(wq->cpu_wq);
910 	kfree(wq);
911 }
912 EXPORT_SYMBOL_GPL(destroy_workqueue);
913 
914 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
915 						unsigned long action,
916 						void *hcpu)
917 {
918 	unsigned int cpu = (unsigned long)hcpu;
919 	struct cpu_workqueue_struct *cwq;
920 	struct workqueue_struct *wq;
921 	int ret = NOTIFY_OK;
922 
923 	action &= ~CPU_TASKS_FROZEN;
924 
925 	switch (action) {
926 	case CPU_UP_PREPARE:
927 		cpumask_set_cpu(cpu, cpu_populated_map);
928 	}
929 undo:
930 	list_for_each_entry(wq, &workqueues, list) {
931 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
932 
933 		switch (action) {
934 		case CPU_UP_PREPARE:
935 			if (!create_workqueue_thread(cwq, cpu))
936 				break;
937 			printk(KERN_ERR "workqueue [%s] for %i failed\n",
938 				wq->name, cpu);
939 			action = CPU_UP_CANCELED;
940 			ret = NOTIFY_BAD;
941 			goto undo;
942 
943 		case CPU_ONLINE:
944 			start_workqueue_thread(cwq, cpu);
945 			break;
946 
947 		case CPU_UP_CANCELED:
948 			start_workqueue_thread(cwq, -1);
949 		case CPU_POST_DEAD:
950 			cleanup_workqueue_thread(cwq);
951 			break;
952 		}
953 	}
954 
955 	switch (action) {
956 	case CPU_UP_CANCELED:
957 	case CPU_POST_DEAD:
958 		cpumask_clear_cpu(cpu, cpu_populated_map);
959 	}
960 
961 	return ret;
962 }
963 
964 #ifdef CONFIG_SMP
965 
966 struct work_for_cpu {
967 	struct completion completion;
968 	long (*fn)(void *);
969 	void *arg;
970 	long ret;
971 };
972 
973 static int do_work_for_cpu(void *_wfc)
974 {
975 	struct work_for_cpu *wfc = _wfc;
976 	wfc->ret = wfc->fn(wfc->arg);
977 	complete(&wfc->completion);
978 	return 0;
979 }
980 
981 /**
982  * work_on_cpu - run a function in user context on a particular cpu
983  * @cpu: the cpu to run on
984  * @fn: the function to run
985  * @arg: the function arg
986  *
987  * This will return the value @fn returns.
988  * It is up to the caller to ensure that the cpu doesn't go offline.
989  * The caller must not hold any locks which would prevent @fn from completing.
990  */
991 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
992 {
993 	struct task_struct *sub_thread;
994 	struct work_for_cpu wfc = {
995 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
996 		.fn = fn,
997 		.arg = arg,
998 	};
999 
1000 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1001 	if (IS_ERR(sub_thread))
1002 		return PTR_ERR(sub_thread);
1003 	kthread_bind(sub_thread, cpu);
1004 	wake_up_process(sub_thread);
1005 	wait_for_completion(&wfc.completion);
1006 	return wfc.ret;
1007 }
1008 EXPORT_SYMBOL_GPL(work_on_cpu);
1009 #endif /* CONFIG_SMP */
1010 
1011 void __init init_workqueues(void)
1012 {
1013 	alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1014 
1015 	cpumask_copy(cpu_populated_map, cpu_online_mask);
1016 	singlethread_cpu = cpumask_first(cpu_possible_mask);
1017 	cpu_singlethread_map = cpumask_of(singlethread_cpu);
1018 	hotcpu_notifier(workqueue_cpu_callback, 0);
1019 	keventd_wq = create_workqueue("events");
1020 	BUG_ON(!keventd_wq);
1021 }
1022