xref: /linux/kernel/kthread.c (revision 9a379e77033f02c4a071891afdf0f0a01eff8ccb)
1 /* Kernel thread helper functions.
2  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
3  *
4  * Creation is done via kthreadd, so that we get a clean environment
5  * even if we're invoked from userspace (think modprobe, hotplug cpu,
6  * etc.).
7  */
8 #include <uapi/linux/sched/types.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task.h>
11 #include <linux/kthread.h>
12 #include <linux/completion.h>
13 #include <linux/err.h>
14 #include <linux/cpuset.h>
15 #include <linux/unistd.h>
16 #include <linux/file.h>
17 #include <linux/export.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/freezer.h>
21 #include <linux/ptrace.h>
22 #include <linux/uaccess.h>
23 #include <trace/events/sched.h>
24 
25 static DEFINE_SPINLOCK(kthread_create_lock);
26 static LIST_HEAD(kthread_create_list);
27 struct task_struct *kthreadd_task;
28 
29 struct kthread_create_info
30 {
31 	/* Information passed to kthread() from kthreadd. */
32 	int (*threadfn)(void *data);
33 	void *data;
34 	int node;
35 
36 	/* Result passed back to kthread_create() from kthreadd. */
37 	struct task_struct *result;
38 	struct completion *done;
39 
40 	struct list_head list;
41 };
42 
43 struct kthread {
44 	unsigned long flags;
45 	unsigned int cpu;
46 	void *data;
47 	struct completion parked;
48 	struct completion exited;
49 #ifdef CONFIG_BLK_CGROUP
50 	struct cgroup_subsys_state *blkcg_css;
51 #endif
52 };
53 
54 enum KTHREAD_BITS {
55 	KTHREAD_IS_PER_CPU = 0,
56 	KTHREAD_SHOULD_STOP,
57 	KTHREAD_SHOULD_PARK,
58 	KTHREAD_IS_PARKED,
59 };
60 
61 static inline void set_kthread_struct(void *kthread)
62 {
63 	/*
64 	 * We abuse ->set_child_tid to avoid the new member and because it
65 	 * can't be wrongly copied by copy_process(). We also rely on fact
66 	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
67 	 */
68 	current->set_child_tid = (__force void __user *)kthread;
69 }
70 
71 static inline struct kthread *to_kthread(struct task_struct *k)
72 {
73 	WARN_ON(!(k->flags & PF_KTHREAD));
74 	return (__force void *)k->set_child_tid;
75 }
76 
77 void free_kthread_struct(struct task_struct *k)
78 {
79 	struct kthread *kthread;
80 
81 	/*
82 	 * Can be NULL if this kthread was created by kernel_thread()
83 	 * or if kmalloc() in kthread() failed.
84 	 */
85 	kthread = to_kthread(k);
86 #ifdef CONFIG_BLK_CGROUP
87 	WARN_ON_ONCE(kthread && kthread->blkcg_css);
88 #endif
89 	kfree(kthread);
90 }
91 
92 /**
93  * kthread_should_stop - should this kthread return now?
94  *
95  * When someone calls kthread_stop() on your kthread, it will be woken
96  * and this will return true.  You should then return, and your return
97  * value will be passed through to kthread_stop().
98  */
99 bool kthread_should_stop(void)
100 {
101 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
102 }
103 EXPORT_SYMBOL(kthread_should_stop);
104 
105 /**
106  * kthread_should_park - should this kthread park now?
107  *
108  * When someone calls kthread_park() on your kthread, it will be woken
109  * and this will return true.  You should then do the necessary
110  * cleanup and call kthread_parkme()
111  *
112  * Similar to kthread_should_stop(), but this keeps the thread alive
113  * and in a park position. kthread_unpark() "restarts" the thread and
114  * calls the thread function again.
115  */
116 bool kthread_should_park(void)
117 {
118 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
119 }
120 EXPORT_SYMBOL_GPL(kthread_should_park);
121 
122 /**
123  * kthread_freezable_should_stop - should this freezable kthread return now?
124  * @was_frozen: optional out parameter, indicates whether %current was frozen
125  *
126  * kthread_should_stop() for freezable kthreads, which will enter
127  * refrigerator if necessary.  This function is safe from kthread_stop() /
128  * freezer deadlock and freezable kthreads should use this function instead
129  * of calling try_to_freeze() directly.
130  */
131 bool kthread_freezable_should_stop(bool *was_frozen)
132 {
133 	bool frozen = false;
134 
135 	might_sleep();
136 
137 	if (unlikely(freezing(current)))
138 		frozen = __refrigerator(true);
139 
140 	if (was_frozen)
141 		*was_frozen = frozen;
142 
143 	return kthread_should_stop();
144 }
145 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
146 
147 /**
148  * kthread_data - return data value specified on kthread creation
149  * @task: kthread task in question
150  *
151  * Return the data value specified when kthread @task was created.
152  * The caller is responsible for ensuring the validity of @task when
153  * calling this function.
154  */
155 void *kthread_data(struct task_struct *task)
156 {
157 	return to_kthread(task)->data;
158 }
159 
160 /**
161  * kthread_probe_data - speculative version of kthread_data()
162  * @task: possible kthread task in question
163  *
164  * @task could be a kthread task.  Return the data value specified when it
165  * was created if accessible.  If @task isn't a kthread task or its data is
166  * inaccessible for any reason, %NULL is returned.  This function requires
167  * that @task itself is safe to dereference.
168  */
169 void *kthread_probe_data(struct task_struct *task)
170 {
171 	struct kthread *kthread = to_kthread(task);
172 	void *data = NULL;
173 
174 	probe_kernel_read(&data, &kthread->data, sizeof(data));
175 	return data;
176 }
177 
178 static void __kthread_parkme(struct kthread *self)
179 {
180 	__set_current_state(TASK_PARKED);
181 	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
182 		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
183 			complete(&self->parked);
184 		schedule();
185 		__set_current_state(TASK_PARKED);
186 	}
187 	clear_bit(KTHREAD_IS_PARKED, &self->flags);
188 	__set_current_state(TASK_RUNNING);
189 }
190 
191 void kthread_parkme(void)
192 {
193 	__kthread_parkme(to_kthread(current));
194 }
195 EXPORT_SYMBOL_GPL(kthread_parkme);
196 
197 static int kthread(void *_create)
198 {
199 	/* Copy data: it's on kthread's stack */
200 	struct kthread_create_info *create = _create;
201 	int (*threadfn)(void *data) = create->threadfn;
202 	void *data = create->data;
203 	struct completion *done;
204 	struct kthread *self;
205 	int ret;
206 
207 	self = kzalloc(sizeof(*self), GFP_KERNEL);
208 	set_kthread_struct(self);
209 
210 	/* If user was SIGKILLed, I release the structure. */
211 	done = xchg(&create->done, NULL);
212 	if (!done) {
213 		kfree(create);
214 		do_exit(-EINTR);
215 	}
216 
217 	if (!self) {
218 		create->result = ERR_PTR(-ENOMEM);
219 		complete(done);
220 		do_exit(-ENOMEM);
221 	}
222 
223 	self->data = data;
224 	init_completion(&self->exited);
225 	init_completion(&self->parked);
226 	current->vfork_done = &self->exited;
227 
228 	/* OK, tell user we're spawned, wait for stop or wakeup */
229 	__set_current_state(TASK_UNINTERRUPTIBLE);
230 	create->result = current;
231 	complete(done);
232 	schedule();
233 
234 	ret = -EINTR;
235 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
236 		cgroup_kthread_ready();
237 		__kthread_parkme(self);
238 		ret = threadfn(data);
239 	}
240 	do_exit(ret);
241 }
242 
243 /* called from do_fork() to get node information for about to be created task */
244 int tsk_fork_get_node(struct task_struct *tsk)
245 {
246 #ifdef CONFIG_NUMA
247 	if (tsk == kthreadd_task)
248 		return tsk->pref_node_fork;
249 #endif
250 	return NUMA_NO_NODE;
251 }
252 
253 static void create_kthread(struct kthread_create_info *create)
254 {
255 	int pid;
256 
257 #ifdef CONFIG_NUMA
258 	current->pref_node_fork = create->node;
259 #endif
260 	/* We want our own signal handler (we take no signals by default). */
261 	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
262 	if (pid < 0) {
263 		/* If user was SIGKILLed, I release the structure. */
264 		struct completion *done = xchg(&create->done, NULL);
265 
266 		if (!done) {
267 			kfree(create);
268 			return;
269 		}
270 		create->result = ERR_PTR(pid);
271 		complete(done);
272 	}
273 }
274 
275 static __printf(4, 0)
276 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
277 						    void *data, int node,
278 						    const char namefmt[],
279 						    va_list args)
280 {
281 	DECLARE_COMPLETION_ONSTACK(done);
282 	struct task_struct *task;
283 	struct kthread_create_info *create = kmalloc(sizeof(*create),
284 						     GFP_KERNEL);
285 
286 	if (!create)
287 		return ERR_PTR(-ENOMEM);
288 	create->threadfn = threadfn;
289 	create->data = data;
290 	create->node = node;
291 	create->done = &done;
292 
293 	spin_lock(&kthread_create_lock);
294 	list_add_tail(&create->list, &kthread_create_list);
295 	spin_unlock(&kthread_create_lock);
296 
297 	wake_up_process(kthreadd_task);
298 	/*
299 	 * Wait for completion in killable state, for I might be chosen by
300 	 * the OOM killer while kthreadd is trying to allocate memory for
301 	 * new kernel thread.
302 	 */
303 	if (unlikely(wait_for_completion_killable(&done))) {
304 		/*
305 		 * If I was SIGKILLed before kthreadd (or new kernel thread)
306 		 * calls complete(), leave the cleanup of this structure to
307 		 * that thread.
308 		 */
309 		if (xchg(&create->done, NULL))
310 			return ERR_PTR(-EINTR);
311 		/*
312 		 * kthreadd (or new kernel thread) will call complete()
313 		 * shortly.
314 		 */
315 		wait_for_completion(&done);
316 	}
317 	task = create->result;
318 	if (!IS_ERR(task)) {
319 		static const struct sched_param param = { .sched_priority = 0 };
320 
321 		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
322 		/*
323 		 * root may have changed our (kthreadd's) priority or CPU mask.
324 		 * The kernel thread should not inherit these properties.
325 		 */
326 		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
327 		set_cpus_allowed_ptr(task, cpu_all_mask);
328 	}
329 	kfree(create);
330 	return task;
331 }
332 
333 /**
334  * kthread_create_on_node - create a kthread.
335  * @threadfn: the function to run until signal_pending(current).
336  * @data: data ptr for @threadfn.
337  * @node: task and thread structures for the thread are allocated on this node
338  * @namefmt: printf-style name for the thread.
339  *
340  * Description: This helper function creates and names a kernel
341  * thread.  The thread will be stopped: use wake_up_process() to start
342  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
343  * is affine to all CPUs.
344  *
345  * If thread is going to be bound on a particular cpu, give its node
346  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
347  * When woken, the thread will run @threadfn() with @data as its
348  * argument. @threadfn() can either call do_exit() directly if it is a
349  * standalone thread for which no one will call kthread_stop(), or
350  * return when 'kthread_should_stop()' is true (which means
351  * kthread_stop() has been called).  The return value should be zero
352  * or a negative error number; it will be passed to kthread_stop().
353  *
354  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
355  */
356 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
357 					   void *data, int node,
358 					   const char namefmt[],
359 					   ...)
360 {
361 	struct task_struct *task;
362 	va_list args;
363 
364 	va_start(args, namefmt);
365 	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
366 	va_end(args);
367 
368 	return task;
369 }
370 EXPORT_SYMBOL(kthread_create_on_node);
371 
372 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
373 {
374 	unsigned long flags;
375 
376 	if (!wait_task_inactive(p, state)) {
377 		WARN_ON(1);
378 		return;
379 	}
380 
381 	/* It's safe because the task is inactive. */
382 	raw_spin_lock_irqsave(&p->pi_lock, flags);
383 	do_set_cpus_allowed(p, mask);
384 	p->flags |= PF_NO_SETAFFINITY;
385 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
386 }
387 
388 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
389 {
390 	__kthread_bind_mask(p, cpumask_of(cpu), state);
391 }
392 
393 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
394 {
395 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
396 }
397 
398 /**
399  * kthread_bind - bind a just-created kthread to a cpu.
400  * @p: thread created by kthread_create().
401  * @cpu: cpu (might not be online, must be possible) for @k to run on.
402  *
403  * Description: This function is equivalent to set_cpus_allowed(),
404  * except that @cpu doesn't need to be online, and the thread must be
405  * stopped (i.e., just returned from kthread_create()).
406  */
407 void kthread_bind(struct task_struct *p, unsigned int cpu)
408 {
409 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
410 }
411 EXPORT_SYMBOL(kthread_bind);
412 
413 /**
414  * kthread_create_on_cpu - Create a cpu bound kthread
415  * @threadfn: the function to run until signal_pending(current).
416  * @data: data ptr for @threadfn.
417  * @cpu: The cpu on which the thread should be bound,
418  * @namefmt: printf-style name for the thread. Format is restricted
419  *	     to "name.*%u". Code fills in cpu number.
420  *
421  * Description: This helper function creates and names a kernel thread
422  * The thread will be woken and put into park mode.
423  */
424 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
425 					  void *data, unsigned int cpu,
426 					  const char *namefmt)
427 {
428 	struct task_struct *p;
429 
430 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
431 				   cpu);
432 	if (IS_ERR(p))
433 		return p;
434 	kthread_bind(p, cpu);
435 	/* CPU hotplug need to bind once again when unparking the thread. */
436 	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
437 	to_kthread(p)->cpu = cpu;
438 	return p;
439 }
440 
441 /**
442  * kthread_unpark - unpark a thread created by kthread_create().
443  * @k:		thread created by kthread_create().
444  *
445  * Sets kthread_should_park() for @k to return false, wakes it, and
446  * waits for it to return. If the thread is marked percpu then its
447  * bound to the cpu again.
448  */
449 void kthread_unpark(struct task_struct *k)
450 {
451 	struct kthread *kthread = to_kthread(k);
452 
453 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
454 	/*
455 	 * We clear the IS_PARKED bit here as we don't wait
456 	 * until the task has left the park code. So if we'd
457 	 * park before that happens we'd see the IS_PARKED bit
458 	 * which might be about to be cleared.
459 	 */
460 	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
461 		/*
462 		 * Newly created kthread was parked when the CPU was offline.
463 		 * The binding was lost and we need to set it again.
464 		 */
465 		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
466 			__kthread_bind(k, kthread->cpu, TASK_PARKED);
467 		wake_up_state(k, TASK_PARKED);
468 	}
469 }
470 EXPORT_SYMBOL_GPL(kthread_unpark);
471 
472 /**
473  * kthread_park - park a thread created by kthread_create().
474  * @k: thread created by kthread_create().
475  *
476  * Sets kthread_should_park() for @k to return true, wakes it, and
477  * waits for it to return. This can also be called after kthread_create()
478  * instead of calling wake_up_process(): the thread will park without
479  * calling threadfn().
480  *
481  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
482  * If called by the kthread itself just the park bit is set.
483  */
484 int kthread_park(struct task_struct *k)
485 {
486 	struct kthread *kthread = to_kthread(k);
487 
488 	if (WARN_ON(k->flags & PF_EXITING))
489 		return -ENOSYS;
490 
491 	if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
492 		set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
493 		if (k != current) {
494 			wake_up_process(k);
495 			wait_for_completion(&kthread->parked);
496 		}
497 	}
498 
499 	return 0;
500 }
501 EXPORT_SYMBOL_GPL(kthread_park);
502 
503 /**
504  * kthread_stop - stop a thread created by kthread_create().
505  * @k: thread created by kthread_create().
506  *
507  * Sets kthread_should_stop() for @k to return true, wakes it, and
508  * waits for it to exit. This can also be called after kthread_create()
509  * instead of calling wake_up_process(): the thread will exit without
510  * calling threadfn().
511  *
512  * If threadfn() may call do_exit() itself, the caller must ensure
513  * task_struct can't go away.
514  *
515  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
516  * was never called.
517  */
518 int kthread_stop(struct task_struct *k)
519 {
520 	struct kthread *kthread;
521 	int ret;
522 
523 	trace_sched_kthread_stop(k);
524 
525 	get_task_struct(k);
526 	kthread = to_kthread(k);
527 	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
528 	kthread_unpark(k);
529 	wake_up_process(k);
530 	wait_for_completion(&kthread->exited);
531 	ret = k->exit_code;
532 	put_task_struct(k);
533 
534 	trace_sched_kthread_stop_ret(ret);
535 	return ret;
536 }
537 EXPORT_SYMBOL(kthread_stop);
538 
539 int kthreadd(void *unused)
540 {
541 	struct task_struct *tsk = current;
542 
543 	/* Setup a clean context for our children to inherit. */
544 	set_task_comm(tsk, "kthreadd");
545 	ignore_signals(tsk);
546 	set_cpus_allowed_ptr(tsk, cpu_all_mask);
547 	set_mems_allowed(node_states[N_MEMORY]);
548 
549 	current->flags |= PF_NOFREEZE;
550 	cgroup_init_kthreadd();
551 
552 	for (;;) {
553 		set_current_state(TASK_INTERRUPTIBLE);
554 		if (list_empty(&kthread_create_list))
555 			schedule();
556 		__set_current_state(TASK_RUNNING);
557 
558 		spin_lock(&kthread_create_lock);
559 		while (!list_empty(&kthread_create_list)) {
560 			struct kthread_create_info *create;
561 
562 			create = list_entry(kthread_create_list.next,
563 					    struct kthread_create_info, list);
564 			list_del_init(&create->list);
565 			spin_unlock(&kthread_create_lock);
566 
567 			create_kthread(create);
568 
569 			spin_lock(&kthread_create_lock);
570 		}
571 		spin_unlock(&kthread_create_lock);
572 	}
573 
574 	return 0;
575 }
576 
577 void __kthread_init_worker(struct kthread_worker *worker,
578 				const char *name,
579 				struct lock_class_key *key)
580 {
581 	memset(worker, 0, sizeof(struct kthread_worker));
582 	spin_lock_init(&worker->lock);
583 	lockdep_set_class_and_name(&worker->lock, key, name);
584 	INIT_LIST_HEAD(&worker->work_list);
585 	INIT_LIST_HEAD(&worker->delayed_work_list);
586 }
587 EXPORT_SYMBOL_GPL(__kthread_init_worker);
588 
589 /**
590  * kthread_worker_fn - kthread function to process kthread_worker
591  * @worker_ptr: pointer to initialized kthread_worker
592  *
593  * This function implements the main cycle of kthread worker. It processes
594  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
595  * is empty.
596  *
597  * The works are not allowed to keep any locks, disable preemption or interrupts
598  * when they finish. There is defined a safe point for freezing when one work
599  * finishes and before a new one is started.
600  *
601  * Also the works must not be handled by more than one worker at the same time,
602  * see also kthread_queue_work().
603  */
604 int kthread_worker_fn(void *worker_ptr)
605 {
606 	struct kthread_worker *worker = worker_ptr;
607 	struct kthread_work *work;
608 
609 	/*
610 	 * FIXME: Update the check and remove the assignment when all kthread
611 	 * worker users are created using kthread_create_worker*() functions.
612 	 */
613 	WARN_ON(worker->task && worker->task != current);
614 	worker->task = current;
615 
616 	if (worker->flags & KTW_FREEZABLE)
617 		set_freezable();
618 
619 repeat:
620 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
621 
622 	if (kthread_should_stop()) {
623 		__set_current_state(TASK_RUNNING);
624 		spin_lock_irq(&worker->lock);
625 		worker->task = NULL;
626 		spin_unlock_irq(&worker->lock);
627 		return 0;
628 	}
629 
630 	work = NULL;
631 	spin_lock_irq(&worker->lock);
632 	if (!list_empty(&worker->work_list)) {
633 		work = list_first_entry(&worker->work_list,
634 					struct kthread_work, node);
635 		list_del_init(&work->node);
636 	}
637 	worker->current_work = work;
638 	spin_unlock_irq(&worker->lock);
639 
640 	if (work) {
641 		__set_current_state(TASK_RUNNING);
642 		work->func(work);
643 	} else if (!freezing(current))
644 		schedule();
645 
646 	try_to_freeze();
647 	cond_resched();
648 	goto repeat;
649 }
650 EXPORT_SYMBOL_GPL(kthread_worker_fn);
651 
652 static __printf(3, 0) struct kthread_worker *
653 __kthread_create_worker(int cpu, unsigned int flags,
654 			const char namefmt[], va_list args)
655 {
656 	struct kthread_worker *worker;
657 	struct task_struct *task;
658 	int node = -1;
659 
660 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
661 	if (!worker)
662 		return ERR_PTR(-ENOMEM);
663 
664 	kthread_init_worker(worker);
665 
666 	if (cpu >= 0)
667 		node = cpu_to_node(cpu);
668 
669 	task = __kthread_create_on_node(kthread_worker_fn, worker,
670 						node, namefmt, args);
671 	if (IS_ERR(task))
672 		goto fail_task;
673 
674 	if (cpu >= 0)
675 		kthread_bind(task, cpu);
676 
677 	worker->flags = flags;
678 	worker->task = task;
679 	wake_up_process(task);
680 	return worker;
681 
682 fail_task:
683 	kfree(worker);
684 	return ERR_CAST(task);
685 }
686 
687 /**
688  * kthread_create_worker - create a kthread worker
689  * @flags: flags modifying the default behavior of the worker
690  * @namefmt: printf-style name for the kthread worker (task).
691  *
692  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
693  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
694  * when the worker was SIGKILLed.
695  */
696 struct kthread_worker *
697 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
698 {
699 	struct kthread_worker *worker;
700 	va_list args;
701 
702 	va_start(args, namefmt);
703 	worker = __kthread_create_worker(-1, flags, namefmt, args);
704 	va_end(args);
705 
706 	return worker;
707 }
708 EXPORT_SYMBOL(kthread_create_worker);
709 
710 /**
711  * kthread_create_worker_on_cpu - create a kthread worker and bind it
712  *	it to a given CPU and the associated NUMA node.
713  * @cpu: CPU number
714  * @flags: flags modifying the default behavior of the worker
715  * @namefmt: printf-style name for the kthread worker (task).
716  *
717  * Use a valid CPU number if you want to bind the kthread worker
718  * to the given CPU and the associated NUMA node.
719  *
720  * A good practice is to add the cpu number also into the worker name.
721  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
722  *
723  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
724  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
725  * when the worker was SIGKILLed.
726  */
727 struct kthread_worker *
728 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
729 			     const char namefmt[], ...)
730 {
731 	struct kthread_worker *worker;
732 	va_list args;
733 
734 	va_start(args, namefmt);
735 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
736 	va_end(args);
737 
738 	return worker;
739 }
740 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
741 
742 /*
743  * Returns true when the work could not be queued at the moment.
744  * It happens when it is already pending in a worker list
745  * or when it is being cancelled.
746  */
747 static inline bool queuing_blocked(struct kthread_worker *worker,
748 				   struct kthread_work *work)
749 {
750 	lockdep_assert_held(&worker->lock);
751 
752 	return !list_empty(&work->node) || work->canceling;
753 }
754 
755 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
756 					     struct kthread_work *work)
757 {
758 	lockdep_assert_held(&worker->lock);
759 	WARN_ON_ONCE(!list_empty(&work->node));
760 	/* Do not use a work with >1 worker, see kthread_queue_work() */
761 	WARN_ON_ONCE(work->worker && work->worker != worker);
762 }
763 
764 /* insert @work before @pos in @worker */
765 static void kthread_insert_work(struct kthread_worker *worker,
766 				struct kthread_work *work,
767 				struct list_head *pos)
768 {
769 	kthread_insert_work_sanity_check(worker, work);
770 
771 	list_add_tail(&work->node, pos);
772 	work->worker = worker;
773 	if (!worker->current_work && likely(worker->task))
774 		wake_up_process(worker->task);
775 }
776 
777 /**
778  * kthread_queue_work - queue a kthread_work
779  * @worker: target kthread_worker
780  * @work: kthread_work to queue
781  *
782  * Queue @work to work processor @task for async execution.  @task
783  * must have been created with kthread_worker_create().  Returns %true
784  * if @work was successfully queued, %false if it was already pending.
785  *
786  * Reinitialize the work if it needs to be used by another worker.
787  * For example, when the worker was stopped and started again.
788  */
789 bool kthread_queue_work(struct kthread_worker *worker,
790 			struct kthread_work *work)
791 {
792 	bool ret = false;
793 	unsigned long flags;
794 
795 	spin_lock_irqsave(&worker->lock, flags);
796 	if (!queuing_blocked(worker, work)) {
797 		kthread_insert_work(worker, work, &worker->work_list);
798 		ret = true;
799 	}
800 	spin_unlock_irqrestore(&worker->lock, flags);
801 	return ret;
802 }
803 EXPORT_SYMBOL_GPL(kthread_queue_work);
804 
805 /**
806  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
807  *	delayed work when the timer expires.
808  * @t: pointer to the expired timer
809  *
810  * The format of the function is defined by struct timer_list.
811  * It should have been called from irqsafe timer with irq already off.
812  */
813 void kthread_delayed_work_timer_fn(struct timer_list *t)
814 {
815 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
816 	struct kthread_work *work = &dwork->work;
817 	struct kthread_worker *worker = work->worker;
818 
819 	/*
820 	 * This might happen when a pending work is reinitialized.
821 	 * It means that it is used a wrong way.
822 	 */
823 	if (WARN_ON_ONCE(!worker))
824 		return;
825 
826 	spin_lock(&worker->lock);
827 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
828 	WARN_ON_ONCE(work->worker != worker);
829 
830 	/* Move the work from worker->delayed_work_list. */
831 	WARN_ON_ONCE(list_empty(&work->node));
832 	list_del_init(&work->node);
833 	kthread_insert_work(worker, work, &worker->work_list);
834 
835 	spin_unlock(&worker->lock);
836 }
837 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
838 
839 void __kthread_queue_delayed_work(struct kthread_worker *worker,
840 				  struct kthread_delayed_work *dwork,
841 				  unsigned long delay)
842 {
843 	struct timer_list *timer = &dwork->timer;
844 	struct kthread_work *work = &dwork->work;
845 
846 	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
847 
848 	/*
849 	 * If @delay is 0, queue @dwork->work immediately.  This is for
850 	 * both optimization and correctness.  The earliest @timer can
851 	 * expire is on the closest next tick and delayed_work users depend
852 	 * on that there's no such delay when @delay is 0.
853 	 */
854 	if (!delay) {
855 		kthread_insert_work(worker, work, &worker->work_list);
856 		return;
857 	}
858 
859 	/* Be paranoid and try to detect possible races already now. */
860 	kthread_insert_work_sanity_check(worker, work);
861 
862 	list_add(&work->node, &worker->delayed_work_list);
863 	work->worker = worker;
864 	timer->expires = jiffies + delay;
865 	add_timer(timer);
866 }
867 
868 /**
869  * kthread_queue_delayed_work - queue the associated kthread work
870  *	after a delay.
871  * @worker: target kthread_worker
872  * @dwork: kthread_delayed_work to queue
873  * @delay: number of jiffies to wait before queuing
874  *
875  * If the work has not been pending it starts a timer that will queue
876  * the work after the given @delay. If @delay is zero, it queues the
877  * work immediately.
878  *
879  * Return: %false if the @work has already been pending. It means that
880  * either the timer was running or the work was queued. It returns %true
881  * otherwise.
882  */
883 bool kthread_queue_delayed_work(struct kthread_worker *worker,
884 				struct kthread_delayed_work *dwork,
885 				unsigned long delay)
886 {
887 	struct kthread_work *work = &dwork->work;
888 	unsigned long flags;
889 	bool ret = false;
890 
891 	spin_lock_irqsave(&worker->lock, flags);
892 
893 	if (!queuing_blocked(worker, work)) {
894 		__kthread_queue_delayed_work(worker, dwork, delay);
895 		ret = true;
896 	}
897 
898 	spin_unlock_irqrestore(&worker->lock, flags);
899 	return ret;
900 }
901 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
902 
903 struct kthread_flush_work {
904 	struct kthread_work	work;
905 	struct completion	done;
906 };
907 
908 static void kthread_flush_work_fn(struct kthread_work *work)
909 {
910 	struct kthread_flush_work *fwork =
911 		container_of(work, struct kthread_flush_work, work);
912 	complete(&fwork->done);
913 }
914 
915 /**
916  * kthread_flush_work - flush a kthread_work
917  * @work: work to flush
918  *
919  * If @work is queued or executing, wait for it to finish execution.
920  */
921 void kthread_flush_work(struct kthread_work *work)
922 {
923 	struct kthread_flush_work fwork = {
924 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
925 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
926 	};
927 	struct kthread_worker *worker;
928 	bool noop = false;
929 
930 	worker = work->worker;
931 	if (!worker)
932 		return;
933 
934 	spin_lock_irq(&worker->lock);
935 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
936 	WARN_ON_ONCE(work->worker != worker);
937 
938 	if (!list_empty(&work->node))
939 		kthread_insert_work(worker, &fwork.work, work->node.next);
940 	else if (worker->current_work == work)
941 		kthread_insert_work(worker, &fwork.work,
942 				    worker->work_list.next);
943 	else
944 		noop = true;
945 
946 	spin_unlock_irq(&worker->lock);
947 
948 	if (!noop)
949 		wait_for_completion(&fwork.done);
950 }
951 EXPORT_SYMBOL_GPL(kthread_flush_work);
952 
953 /*
954  * This function removes the work from the worker queue. Also it makes sure
955  * that it won't get queued later via the delayed work's timer.
956  *
957  * The work might still be in use when this function finishes. See the
958  * current_work proceed by the worker.
959  *
960  * Return: %true if @work was pending and successfully canceled,
961  *	%false if @work was not pending
962  */
963 static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
964 				  unsigned long *flags)
965 {
966 	/* Try to cancel the timer if exists. */
967 	if (is_dwork) {
968 		struct kthread_delayed_work *dwork =
969 			container_of(work, struct kthread_delayed_work, work);
970 		struct kthread_worker *worker = work->worker;
971 
972 		/*
973 		 * del_timer_sync() must be called to make sure that the timer
974 		 * callback is not running. The lock must be temporary released
975 		 * to avoid a deadlock with the callback. In the meantime,
976 		 * any queuing is blocked by setting the canceling counter.
977 		 */
978 		work->canceling++;
979 		spin_unlock_irqrestore(&worker->lock, *flags);
980 		del_timer_sync(&dwork->timer);
981 		spin_lock_irqsave(&worker->lock, *flags);
982 		work->canceling--;
983 	}
984 
985 	/*
986 	 * Try to remove the work from a worker list. It might either
987 	 * be from worker->work_list or from worker->delayed_work_list.
988 	 */
989 	if (!list_empty(&work->node)) {
990 		list_del_init(&work->node);
991 		return true;
992 	}
993 
994 	return false;
995 }
996 
997 /**
998  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
999  * @worker: kthread worker to use
1000  * @dwork: kthread delayed work to queue
1001  * @delay: number of jiffies to wait before queuing
1002  *
1003  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1004  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1005  * @work is guaranteed to be queued immediately.
1006  *
1007  * Return: %true if @dwork was pending and its timer was modified,
1008  * %false otherwise.
1009  *
1010  * A special case is when the work is being canceled in parallel.
1011  * It might be caused either by the real kthread_cancel_delayed_work_sync()
1012  * or yet another kthread_mod_delayed_work() call. We let the other command
1013  * win and return %false here. The caller is supposed to synchronize these
1014  * operations a reasonable way.
1015  *
1016  * This function is safe to call from any context including IRQ handler.
1017  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1018  * for details.
1019  */
1020 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1021 			      struct kthread_delayed_work *dwork,
1022 			      unsigned long delay)
1023 {
1024 	struct kthread_work *work = &dwork->work;
1025 	unsigned long flags;
1026 	int ret = false;
1027 
1028 	spin_lock_irqsave(&worker->lock, flags);
1029 
1030 	/* Do not bother with canceling when never queued. */
1031 	if (!work->worker)
1032 		goto fast_queue;
1033 
1034 	/* Work must not be used with >1 worker, see kthread_queue_work() */
1035 	WARN_ON_ONCE(work->worker != worker);
1036 
1037 	/* Do not fight with another command that is canceling this work. */
1038 	if (work->canceling)
1039 		goto out;
1040 
1041 	ret = __kthread_cancel_work(work, true, &flags);
1042 fast_queue:
1043 	__kthread_queue_delayed_work(worker, dwork, delay);
1044 out:
1045 	spin_unlock_irqrestore(&worker->lock, flags);
1046 	return ret;
1047 }
1048 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1049 
1050 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1051 {
1052 	struct kthread_worker *worker = work->worker;
1053 	unsigned long flags;
1054 	int ret = false;
1055 
1056 	if (!worker)
1057 		goto out;
1058 
1059 	spin_lock_irqsave(&worker->lock, flags);
1060 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1061 	WARN_ON_ONCE(work->worker != worker);
1062 
1063 	ret = __kthread_cancel_work(work, is_dwork, &flags);
1064 
1065 	if (worker->current_work != work)
1066 		goto out_fast;
1067 
1068 	/*
1069 	 * The work is in progress and we need to wait with the lock released.
1070 	 * In the meantime, block any queuing by setting the canceling counter.
1071 	 */
1072 	work->canceling++;
1073 	spin_unlock_irqrestore(&worker->lock, flags);
1074 	kthread_flush_work(work);
1075 	spin_lock_irqsave(&worker->lock, flags);
1076 	work->canceling--;
1077 
1078 out_fast:
1079 	spin_unlock_irqrestore(&worker->lock, flags);
1080 out:
1081 	return ret;
1082 }
1083 
1084 /**
1085  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1086  * @work: the kthread work to cancel
1087  *
1088  * Cancel @work and wait for its execution to finish.  This function
1089  * can be used even if the work re-queues itself. On return from this
1090  * function, @work is guaranteed to be not pending or executing on any CPU.
1091  *
1092  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1093  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1094  *
1095  * The caller must ensure that the worker on which @work was last
1096  * queued can't be destroyed before this function returns.
1097  *
1098  * Return: %true if @work was pending, %false otherwise.
1099  */
1100 bool kthread_cancel_work_sync(struct kthread_work *work)
1101 {
1102 	return __kthread_cancel_work_sync(work, false);
1103 }
1104 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1105 
1106 /**
1107  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1108  *	wait for it to finish.
1109  * @dwork: the kthread delayed work to cancel
1110  *
1111  * This is kthread_cancel_work_sync() for delayed works.
1112  *
1113  * Return: %true if @dwork was pending, %false otherwise.
1114  */
1115 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1116 {
1117 	return __kthread_cancel_work_sync(&dwork->work, true);
1118 }
1119 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1120 
1121 /**
1122  * kthread_flush_worker - flush all current works on a kthread_worker
1123  * @worker: worker to flush
1124  *
1125  * Wait until all currently executing or pending works on @worker are
1126  * finished.
1127  */
1128 void kthread_flush_worker(struct kthread_worker *worker)
1129 {
1130 	struct kthread_flush_work fwork = {
1131 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1132 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1133 	};
1134 
1135 	kthread_queue_work(worker, &fwork.work);
1136 	wait_for_completion(&fwork.done);
1137 }
1138 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1139 
1140 /**
1141  * kthread_destroy_worker - destroy a kthread worker
1142  * @worker: worker to be destroyed
1143  *
1144  * Flush and destroy @worker.  The simple flush is enough because the kthread
1145  * worker API is used only in trivial scenarios.  There are no multi-step state
1146  * machines needed.
1147  */
1148 void kthread_destroy_worker(struct kthread_worker *worker)
1149 {
1150 	struct task_struct *task;
1151 
1152 	task = worker->task;
1153 	if (WARN_ON(!task))
1154 		return;
1155 
1156 	kthread_flush_worker(worker);
1157 	kthread_stop(task);
1158 	WARN_ON(!list_empty(&worker->work_list));
1159 	kfree(worker);
1160 }
1161 EXPORT_SYMBOL(kthread_destroy_worker);
1162 
1163 #ifdef CONFIG_BLK_CGROUP
1164 /**
1165  * kthread_associate_blkcg - associate blkcg to current kthread
1166  * @css: the cgroup info
1167  *
1168  * Current thread must be a kthread. The thread is running jobs on behalf of
1169  * other threads. In some cases, we expect the jobs attach cgroup info of
1170  * original threads instead of that of current thread. This function stores
1171  * original thread's cgroup info in current kthread context for later
1172  * retrieval.
1173  */
1174 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1175 {
1176 	struct kthread *kthread;
1177 
1178 	if (!(current->flags & PF_KTHREAD))
1179 		return;
1180 	kthread = to_kthread(current);
1181 	if (!kthread)
1182 		return;
1183 
1184 	if (kthread->blkcg_css) {
1185 		css_put(kthread->blkcg_css);
1186 		kthread->blkcg_css = NULL;
1187 	}
1188 	if (css) {
1189 		css_get(css);
1190 		kthread->blkcg_css = css;
1191 	}
1192 }
1193 EXPORT_SYMBOL(kthread_associate_blkcg);
1194 
1195 /**
1196  * kthread_blkcg - get associated blkcg css of current kthread
1197  *
1198  * Current thread must be a kthread.
1199  */
1200 struct cgroup_subsys_state *kthread_blkcg(void)
1201 {
1202 	struct kthread *kthread;
1203 
1204 	if (current->flags & PF_KTHREAD) {
1205 		kthread = to_kthread(current);
1206 		if (kthread)
1207 			return kthread->blkcg_css;
1208 	}
1209 	return NULL;
1210 }
1211 EXPORT_SYMBOL(kthread_blkcg);
1212 #endif
1213