xref: /linux/kernel/kthread.c (revision 1cbfb828e05171ca2dd77b5988d068e6872480fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
4  *   Copyright (C) 2009 Red Hat, Inc.
5  *
6  * Creation is done via kthreadd, so that we get a clean environment
7  * even if we're invoked from userspace (think modprobe, hotplug cpu,
8  * etc.).
9  */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32 
33 
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37 
38 struct kthread_create_info
39 {
40 	/* Information passed to kthread() from kthreadd. */
41 	char *full_name;
42 	int (*threadfn)(void *data);
43 	void *data;
44 	int node;
45 
46 	/* Result passed back to kthread_create() from kthreadd. */
47 	struct task_struct *result;
48 	struct completion *done;
49 
50 	struct list_head list;
51 };
52 
53 struct kthread {
54 	unsigned long flags;
55 	unsigned int cpu;
56 	int result;
57 	int (*threadfn)(void *);
58 	void *data;
59 	struct completion parked;
60 	struct completion exited;
61 #ifdef CONFIG_BLK_CGROUP
62 	struct cgroup_subsys_state *blkcg_css;
63 #endif
64 	/* To store the full name if task comm is truncated. */
65 	char *full_name;
66 };
67 
68 enum KTHREAD_BITS {
69 	KTHREAD_IS_PER_CPU = 0,
70 	KTHREAD_SHOULD_STOP,
71 	KTHREAD_SHOULD_PARK,
72 };
73 
74 static inline struct kthread *to_kthread(struct task_struct *k)
75 {
76 	WARN_ON(!(k->flags & PF_KTHREAD));
77 	return k->worker_private;
78 }
79 
80 /*
81  * Variant of to_kthread() that doesn't assume @p is a kthread.
82  *
83  * Per construction; when:
84  *
85  *   (p->flags & PF_KTHREAD) && p->worker_private
86  *
87  * the task is both a kthread and struct kthread is persistent. However
88  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
89  * begin_new_exec()).
90  */
91 static inline struct kthread *__to_kthread(struct task_struct *p)
92 {
93 	void *kthread = p->worker_private;
94 	if (kthread && !(p->flags & PF_KTHREAD))
95 		kthread = NULL;
96 	return kthread;
97 }
98 
99 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
100 {
101 	struct kthread *kthread = to_kthread(tsk);
102 
103 	if (!kthread || !kthread->full_name) {
104 		strscpy(buf, tsk->comm, buf_size);
105 		return;
106 	}
107 
108 	strscpy_pad(buf, kthread->full_name, buf_size);
109 }
110 
111 bool set_kthread_struct(struct task_struct *p)
112 {
113 	struct kthread *kthread;
114 
115 	if (WARN_ON_ONCE(to_kthread(p)))
116 		return false;
117 
118 	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
119 	if (!kthread)
120 		return false;
121 
122 	init_completion(&kthread->exited);
123 	init_completion(&kthread->parked);
124 	p->vfork_done = &kthread->exited;
125 
126 	p->worker_private = kthread;
127 	return true;
128 }
129 
130 void free_kthread_struct(struct task_struct *k)
131 {
132 	struct kthread *kthread;
133 
134 	/*
135 	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
136 	 */
137 	kthread = to_kthread(k);
138 	if (!kthread)
139 		return;
140 
141 #ifdef CONFIG_BLK_CGROUP
142 	WARN_ON_ONCE(kthread->blkcg_css);
143 #endif
144 	k->worker_private = NULL;
145 	kfree(kthread->full_name);
146 	kfree(kthread);
147 }
148 
149 /**
150  * kthread_should_stop - should this kthread return now?
151  *
152  * When someone calls kthread_stop() on your kthread, it will be woken
153  * and this will return true.  You should then return, and your return
154  * value will be passed through to kthread_stop().
155  */
156 bool kthread_should_stop(void)
157 {
158 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
159 }
160 EXPORT_SYMBOL(kthread_should_stop);
161 
162 static bool __kthread_should_park(struct task_struct *k)
163 {
164 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
165 }
166 
167 /**
168  * kthread_should_park - should this kthread park now?
169  *
170  * When someone calls kthread_park() on your kthread, it will be woken
171  * and this will return true.  You should then do the necessary
172  * cleanup and call kthread_parkme()
173  *
174  * Similar to kthread_should_stop(), but this keeps the thread alive
175  * and in a park position. kthread_unpark() "restarts" the thread and
176  * calls the thread function again.
177  */
178 bool kthread_should_park(void)
179 {
180 	return __kthread_should_park(current);
181 }
182 EXPORT_SYMBOL_GPL(kthread_should_park);
183 
184 bool kthread_should_stop_or_park(void)
185 {
186 	struct kthread *kthread = __to_kthread(current);
187 
188 	if (!kthread)
189 		return false;
190 
191 	return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
192 }
193 
194 /**
195  * kthread_freezable_should_stop - should this freezable kthread return now?
196  * @was_frozen: optional out parameter, indicates whether %current was frozen
197  *
198  * kthread_should_stop() for freezable kthreads, which will enter
199  * refrigerator if necessary.  This function is safe from kthread_stop() /
200  * freezer deadlock and freezable kthreads should use this function instead
201  * of calling try_to_freeze() directly.
202  */
203 bool kthread_freezable_should_stop(bool *was_frozen)
204 {
205 	bool frozen = false;
206 
207 	might_sleep();
208 
209 	if (unlikely(freezing(current)))
210 		frozen = __refrigerator(true);
211 
212 	if (was_frozen)
213 		*was_frozen = frozen;
214 
215 	return kthread_should_stop();
216 }
217 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
218 
219 /**
220  * kthread_func - return the function specified on kthread creation
221  * @task: kthread task in question
222  *
223  * Returns NULL if the task is not a kthread.
224  */
225 void *kthread_func(struct task_struct *task)
226 {
227 	struct kthread *kthread = __to_kthread(task);
228 	if (kthread)
229 		return kthread->threadfn;
230 	return NULL;
231 }
232 EXPORT_SYMBOL_GPL(kthread_func);
233 
234 /**
235  * kthread_data - return data value specified on kthread creation
236  * @task: kthread task in question
237  *
238  * Return the data value specified when kthread @task was created.
239  * The caller is responsible for ensuring the validity of @task when
240  * calling this function.
241  */
242 void *kthread_data(struct task_struct *task)
243 {
244 	return to_kthread(task)->data;
245 }
246 EXPORT_SYMBOL_GPL(kthread_data);
247 
248 /**
249  * kthread_probe_data - speculative version of kthread_data()
250  * @task: possible kthread task in question
251  *
252  * @task could be a kthread task.  Return the data value specified when it
253  * was created if accessible.  If @task isn't a kthread task or its data is
254  * inaccessible for any reason, %NULL is returned.  This function requires
255  * that @task itself is safe to dereference.
256  */
257 void *kthread_probe_data(struct task_struct *task)
258 {
259 	struct kthread *kthread = __to_kthread(task);
260 	void *data = NULL;
261 
262 	if (kthread)
263 		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
264 	return data;
265 }
266 
267 static void __kthread_parkme(struct kthread *self)
268 {
269 	for (;;) {
270 		/*
271 		 * TASK_PARKED is a special state; we must serialize against
272 		 * possible pending wakeups to avoid store-store collisions on
273 		 * task->state.
274 		 *
275 		 * Such a collision might possibly result in the task state
276 		 * changin from TASK_PARKED and us failing the
277 		 * wait_task_inactive() in kthread_park().
278 		 */
279 		set_special_state(TASK_PARKED);
280 		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
281 			break;
282 
283 		/*
284 		 * Thread is going to call schedule(), do not preempt it,
285 		 * or the caller of kthread_park() may spend more time in
286 		 * wait_task_inactive().
287 		 */
288 		preempt_disable();
289 		complete(&self->parked);
290 		schedule_preempt_disabled();
291 		preempt_enable();
292 	}
293 	__set_current_state(TASK_RUNNING);
294 }
295 
296 void kthread_parkme(void)
297 {
298 	__kthread_parkme(to_kthread(current));
299 }
300 EXPORT_SYMBOL_GPL(kthread_parkme);
301 
302 /**
303  * kthread_exit - Cause the current kthread return @result to kthread_stop().
304  * @result: The integer value to return to kthread_stop().
305  *
306  * While kthread_exit can be called directly, it exists so that
307  * functions which do some additional work in non-modular code such as
308  * module_put_and_kthread_exit can be implemented.
309  *
310  * Does not return.
311  */
312 void __noreturn kthread_exit(long result)
313 {
314 	struct kthread *kthread = to_kthread(current);
315 	kthread->result = result;
316 	do_exit(0);
317 }
318 EXPORT_SYMBOL(kthread_exit);
319 
320 /**
321  * kthread_complete_and_exit - Exit the current kthread.
322  * @comp: Completion to complete
323  * @code: The integer value to return to kthread_stop().
324  *
325  * If present, complete @comp and then return code to kthread_stop().
326  *
327  * A kernel thread whose module may be removed after the completion of
328  * @comp can use this function to exit safely.
329  *
330  * Does not return.
331  */
332 void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
333 {
334 	if (comp)
335 		complete(comp);
336 
337 	kthread_exit(code);
338 }
339 EXPORT_SYMBOL(kthread_complete_and_exit);
340 
341 static int kthread(void *_create)
342 {
343 	static const struct sched_param param = { .sched_priority = 0 };
344 	/* Copy data: it's on kthread's stack */
345 	struct kthread_create_info *create = _create;
346 	int (*threadfn)(void *data) = create->threadfn;
347 	void *data = create->data;
348 	struct completion *done;
349 	struct kthread *self;
350 	int ret;
351 
352 	self = to_kthread(current);
353 
354 	/* Release the structure when caller killed by a fatal signal. */
355 	done = xchg(&create->done, NULL);
356 	if (!done) {
357 		kfree(create->full_name);
358 		kfree(create);
359 		kthread_exit(-EINTR);
360 	}
361 
362 	self->full_name = create->full_name;
363 	self->threadfn = threadfn;
364 	self->data = data;
365 
366 	/*
367 	 * The new thread inherited kthreadd's priority and CPU mask. Reset
368 	 * back to default in case they have been changed.
369 	 */
370 	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
371 	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
372 
373 	/* OK, tell user we're spawned, wait for stop or wakeup */
374 	__set_current_state(TASK_UNINTERRUPTIBLE);
375 	create->result = current;
376 	/*
377 	 * Thread is going to call schedule(), do not preempt it,
378 	 * or the creator may spend more time in wait_task_inactive().
379 	 */
380 	preempt_disable();
381 	complete(done);
382 	schedule_preempt_disabled();
383 	preempt_enable();
384 
385 	ret = -EINTR;
386 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
387 		cgroup_kthread_ready();
388 		__kthread_parkme(self);
389 		ret = threadfn(data);
390 	}
391 	kthread_exit(ret);
392 }
393 
394 /* called from kernel_clone() to get node information for about to be created task */
395 int tsk_fork_get_node(struct task_struct *tsk)
396 {
397 #ifdef CONFIG_NUMA
398 	if (tsk == kthreadd_task)
399 		return tsk->pref_node_fork;
400 #endif
401 	return NUMA_NO_NODE;
402 }
403 
404 static void create_kthread(struct kthread_create_info *create)
405 {
406 	int pid;
407 
408 #ifdef CONFIG_NUMA
409 	current->pref_node_fork = create->node;
410 #endif
411 	/* We want our own signal handler (we take no signals by default). */
412 	pid = kernel_thread(kthread, create, create->full_name,
413 			    CLONE_FS | CLONE_FILES | SIGCHLD);
414 	if (pid < 0) {
415 		/* Release the structure when caller killed by a fatal signal. */
416 		struct completion *done = xchg(&create->done, NULL);
417 
418 		kfree(create->full_name);
419 		if (!done) {
420 			kfree(create);
421 			return;
422 		}
423 		create->result = ERR_PTR(pid);
424 		complete(done);
425 	}
426 }
427 
428 static __printf(4, 0)
429 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
430 						    void *data, int node,
431 						    const char namefmt[],
432 						    va_list args)
433 {
434 	DECLARE_COMPLETION_ONSTACK(done);
435 	struct task_struct *task;
436 	struct kthread_create_info *create = kmalloc(sizeof(*create),
437 						     GFP_KERNEL);
438 
439 	if (!create)
440 		return ERR_PTR(-ENOMEM);
441 	create->threadfn = threadfn;
442 	create->data = data;
443 	create->node = node;
444 	create->done = &done;
445 	create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
446 	if (!create->full_name) {
447 		task = ERR_PTR(-ENOMEM);
448 		goto free_create;
449 	}
450 
451 	spin_lock(&kthread_create_lock);
452 	list_add_tail(&create->list, &kthread_create_list);
453 	spin_unlock(&kthread_create_lock);
454 
455 	wake_up_process(kthreadd_task);
456 	/*
457 	 * Wait for completion in killable state, for I might be chosen by
458 	 * the OOM killer while kthreadd is trying to allocate memory for
459 	 * new kernel thread.
460 	 */
461 	if (unlikely(wait_for_completion_killable(&done))) {
462 		/*
463 		 * If I was killed by a fatal signal before kthreadd (or new
464 		 * kernel thread) calls complete(), leave the cleanup of this
465 		 * structure to that thread.
466 		 */
467 		if (xchg(&create->done, NULL))
468 			return ERR_PTR(-EINTR);
469 		/*
470 		 * kthreadd (or new kernel thread) will call complete()
471 		 * shortly.
472 		 */
473 		wait_for_completion(&done);
474 	}
475 	task = create->result;
476 free_create:
477 	kfree(create);
478 	return task;
479 }
480 
481 /**
482  * kthread_create_on_node - create a kthread.
483  * @threadfn: the function to run until signal_pending(current).
484  * @data: data ptr for @threadfn.
485  * @node: task and thread structures for the thread are allocated on this node
486  * @namefmt: printf-style name for the thread.
487  *
488  * Description: This helper function creates and names a kernel
489  * thread.  The thread will be stopped: use wake_up_process() to start
490  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
491  * is affine to all CPUs.
492  *
493  * If thread is going to be bound on a particular cpu, give its node
494  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
495  * When woken, the thread will run @threadfn() with @data as its
496  * argument. @threadfn() can either return directly if it is a
497  * standalone thread for which no one will call kthread_stop(), or
498  * return when 'kthread_should_stop()' is true (which means
499  * kthread_stop() has been called).  The return value should be zero
500  * or a negative error number; it will be passed to kthread_stop().
501  *
502  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
503  */
504 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
505 					   void *data, int node,
506 					   const char namefmt[],
507 					   ...)
508 {
509 	struct task_struct *task;
510 	va_list args;
511 
512 	va_start(args, namefmt);
513 	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
514 	va_end(args);
515 
516 	return task;
517 }
518 EXPORT_SYMBOL(kthread_create_on_node);
519 
520 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
521 {
522 	unsigned long flags;
523 
524 	if (!wait_task_inactive(p, state)) {
525 		WARN_ON(1);
526 		return;
527 	}
528 
529 	/* It's safe because the task is inactive. */
530 	raw_spin_lock_irqsave(&p->pi_lock, flags);
531 	do_set_cpus_allowed(p, mask);
532 	p->flags |= PF_NO_SETAFFINITY;
533 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
534 }
535 
536 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
537 {
538 	__kthread_bind_mask(p, cpumask_of(cpu), state);
539 }
540 
541 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
542 {
543 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
544 }
545 
546 /**
547  * kthread_bind - bind a just-created kthread to a cpu.
548  * @p: thread created by kthread_create().
549  * @cpu: cpu (might not be online, must be possible) for @k to run on.
550  *
551  * Description: This function is equivalent to set_cpus_allowed(),
552  * except that @cpu doesn't need to be online, and the thread must be
553  * stopped (i.e., just returned from kthread_create()).
554  */
555 void kthread_bind(struct task_struct *p, unsigned int cpu)
556 {
557 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
558 }
559 EXPORT_SYMBOL(kthread_bind);
560 
561 /**
562  * kthread_create_on_cpu - Create a cpu bound kthread
563  * @threadfn: the function to run until signal_pending(current).
564  * @data: data ptr for @threadfn.
565  * @cpu: The cpu on which the thread should be bound,
566  * @namefmt: printf-style name for the thread. Format is restricted
567  *	     to "name.*%u". Code fills in cpu number.
568  *
569  * Description: This helper function creates and names a kernel thread
570  */
571 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
572 					  void *data, unsigned int cpu,
573 					  const char *namefmt)
574 {
575 	struct task_struct *p;
576 
577 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
578 				   cpu);
579 	if (IS_ERR(p))
580 		return p;
581 	kthread_bind(p, cpu);
582 	/* CPU hotplug need to bind once again when unparking the thread. */
583 	to_kthread(p)->cpu = cpu;
584 	return p;
585 }
586 EXPORT_SYMBOL(kthread_create_on_cpu);
587 
588 void kthread_set_per_cpu(struct task_struct *k, int cpu)
589 {
590 	struct kthread *kthread = to_kthread(k);
591 	if (!kthread)
592 		return;
593 
594 	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
595 
596 	if (cpu < 0) {
597 		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
598 		return;
599 	}
600 
601 	kthread->cpu = cpu;
602 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
603 }
604 
605 bool kthread_is_per_cpu(struct task_struct *p)
606 {
607 	struct kthread *kthread = __to_kthread(p);
608 	if (!kthread)
609 		return false;
610 
611 	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
612 }
613 
614 /**
615  * kthread_unpark - unpark a thread created by kthread_create().
616  * @k:		thread created by kthread_create().
617  *
618  * Sets kthread_should_park() for @k to return false, wakes it, and
619  * waits for it to return. If the thread is marked percpu then its
620  * bound to the cpu again.
621  */
622 void kthread_unpark(struct task_struct *k)
623 {
624 	struct kthread *kthread = to_kthread(k);
625 
626 	if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
627 		return;
628 	/*
629 	 * Newly created kthread was parked when the CPU was offline.
630 	 * The binding was lost and we need to set it again.
631 	 */
632 	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
633 		__kthread_bind(k, kthread->cpu, TASK_PARKED);
634 
635 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
636 	/*
637 	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
638 	 */
639 	wake_up_state(k, TASK_PARKED);
640 }
641 EXPORT_SYMBOL_GPL(kthread_unpark);
642 
643 /**
644  * kthread_park - park a thread created by kthread_create().
645  * @k: thread created by kthread_create().
646  *
647  * Sets kthread_should_park() for @k to return true, wakes it, and
648  * waits for it to return. This can also be called after kthread_create()
649  * instead of calling wake_up_process(): the thread will park without
650  * calling threadfn().
651  *
652  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
653  * If called by the kthread itself just the park bit is set.
654  */
655 int kthread_park(struct task_struct *k)
656 {
657 	struct kthread *kthread = to_kthread(k);
658 
659 	if (WARN_ON(k->flags & PF_EXITING))
660 		return -ENOSYS;
661 
662 	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
663 		return -EBUSY;
664 
665 	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
666 	if (k != current) {
667 		wake_up_process(k);
668 		/*
669 		 * Wait for __kthread_parkme() to complete(), this means we
670 		 * _will_ have TASK_PARKED and are about to call schedule().
671 		 */
672 		wait_for_completion(&kthread->parked);
673 		/*
674 		 * Now wait for that schedule() to complete and the task to
675 		 * get scheduled out.
676 		 */
677 		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
678 	}
679 
680 	return 0;
681 }
682 EXPORT_SYMBOL_GPL(kthread_park);
683 
684 /**
685  * kthread_stop - stop a thread created by kthread_create().
686  * @k: thread created by kthread_create().
687  *
688  * Sets kthread_should_stop() for @k to return true, wakes it, and
689  * waits for it to exit. This can also be called after kthread_create()
690  * instead of calling wake_up_process(): the thread will exit without
691  * calling threadfn().
692  *
693  * If threadfn() may call kthread_exit() itself, the caller must ensure
694  * task_struct can't go away.
695  *
696  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
697  * was never called.
698  */
699 int kthread_stop(struct task_struct *k)
700 {
701 	struct kthread *kthread;
702 	int ret;
703 
704 	trace_sched_kthread_stop(k);
705 
706 	get_task_struct(k);
707 	kthread = to_kthread(k);
708 	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
709 	kthread_unpark(k);
710 	set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
711 	wake_up_process(k);
712 	wait_for_completion(&kthread->exited);
713 	ret = kthread->result;
714 	put_task_struct(k);
715 
716 	trace_sched_kthread_stop_ret(ret);
717 	return ret;
718 }
719 EXPORT_SYMBOL(kthread_stop);
720 
721 /**
722  * kthread_stop_put - stop a thread and put its task struct
723  * @k: thread created by kthread_create().
724  *
725  * Stops a thread created by kthread_create() and put its task_struct.
726  * Only use when holding an extra task struct reference obtained by
727  * calling get_task_struct().
728  */
729 int kthread_stop_put(struct task_struct *k)
730 {
731 	int ret;
732 
733 	ret = kthread_stop(k);
734 	put_task_struct(k);
735 	return ret;
736 }
737 EXPORT_SYMBOL(kthread_stop_put);
738 
739 int kthreadd(void *unused)
740 {
741 	static const char comm[TASK_COMM_LEN] = "kthreadd";
742 	struct task_struct *tsk = current;
743 
744 	/* Setup a clean context for our children to inherit. */
745 	set_task_comm(tsk, comm);
746 	ignore_signals(tsk);
747 	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
748 	set_mems_allowed(node_states[N_MEMORY]);
749 
750 	current->flags |= PF_NOFREEZE;
751 	cgroup_init_kthreadd();
752 
753 	for (;;) {
754 		set_current_state(TASK_INTERRUPTIBLE);
755 		if (list_empty(&kthread_create_list))
756 			schedule();
757 		__set_current_state(TASK_RUNNING);
758 
759 		spin_lock(&kthread_create_lock);
760 		while (!list_empty(&kthread_create_list)) {
761 			struct kthread_create_info *create;
762 
763 			create = list_entry(kthread_create_list.next,
764 					    struct kthread_create_info, list);
765 			list_del_init(&create->list);
766 			spin_unlock(&kthread_create_lock);
767 
768 			create_kthread(create);
769 
770 			spin_lock(&kthread_create_lock);
771 		}
772 		spin_unlock(&kthread_create_lock);
773 	}
774 
775 	return 0;
776 }
777 
778 void __kthread_init_worker(struct kthread_worker *worker,
779 				const char *name,
780 				struct lock_class_key *key)
781 {
782 	memset(worker, 0, sizeof(struct kthread_worker));
783 	raw_spin_lock_init(&worker->lock);
784 	lockdep_set_class_and_name(&worker->lock, key, name);
785 	INIT_LIST_HEAD(&worker->work_list);
786 	INIT_LIST_HEAD(&worker->delayed_work_list);
787 }
788 EXPORT_SYMBOL_GPL(__kthread_init_worker);
789 
790 /**
791  * kthread_worker_fn - kthread function to process kthread_worker
792  * @worker_ptr: pointer to initialized kthread_worker
793  *
794  * This function implements the main cycle of kthread worker. It processes
795  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
796  * is empty.
797  *
798  * The works are not allowed to keep any locks, disable preemption or interrupts
799  * when they finish. There is defined a safe point for freezing when one work
800  * finishes and before a new one is started.
801  *
802  * Also the works must not be handled by more than one worker at the same time,
803  * see also kthread_queue_work().
804  */
805 int kthread_worker_fn(void *worker_ptr)
806 {
807 	struct kthread_worker *worker = worker_ptr;
808 	struct kthread_work *work;
809 
810 	/*
811 	 * FIXME: Update the check and remove the assignment when all kthread
812 	 * worker users are created using kthread_create_worker*() functions.
813 	 */
814 	WARN_ON(worker->task && worker->task != current);
815 	worker->task = current;
816 
817 	if (worker->flags & KTW_FREEZABLE)
818 		set_freezable();
819 
820 repeat:
821 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
822 
823 	if (kthread_should_stop()) {
824 		__set_current_state(TASK_RUNNING);
825 		raw_spin_lock_irq(&worker->lock);
826 		worker->task = NULL;
827 		raw_spin_unlock_irq(&worker->lock);
828 		return 0;
829 	}
830 
831 	work = NULL;
832 	raw_spin_lock_irq(&worker->lock);
833 	if (!list_empty(&worker->work_list)) {
834 		work = list_first_entry(&worker->work_list,
835 					struct kthread_work, node);
836 		list_del_init(&work->node);
837 	}
838 	worker->current_work = work;
839 	raw_spin_unlock_irq(&worker->lock);
840 
841 	if (work) {
842 		kthread_work_func_t func = work->func;
843 		__set_current_state(TASK_RUNNING);
844 		trace_sched_kthread_work_execute_start(work);
845 		work->func(work);
846 		/*
847 		 * Avoid dereferencing work after this point.  The trace
848 		 * event only cares about the address.
849 		 */
850 		trace_sched_kthread_work_execute_end(work, func);
851 	} else if (!freezing(current)) {
852 		schedule();
853 	} else {
854 		/*
855 		 * Handle the case where the current remains
856 		 * TASK_INTERRUPTIBLE. try_to_freeze() expects
857 		 * the current to be TASK_RUNNING.
858 		 */
859 		__set_current_state(TASK_RUNNING);
860 	}
861 
862 	try_to_freeze();
863 	cond_resched();
864 	goto repeat;
865 }
866 EXPORT_SYMBOL_GPL(kthread_worker_fn);
867 
868 static __printf(3, 0) struct kthread_worker *
869 __kthread_create_worker(int cpu, unsigned int flags,
870 			const char namefmt[], va_list args)
871 {
872 	struct kthread_worker *worker;
873 	struct task_struct *task;
874 	int node = NUMA_NO_NODE;
875 
876 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
877 	if (!worker)
878 		return ERR_PTR(-ENOMEM);
879 
880 	kthread_init_worker(worker);
881 
882 	if (cpu >= 0)
883 		node = cpu_to_node(cpu);
884 
885 	task = __kthread_create_on_node(kthread_worker_fn, worker,
886 						node, namefmt, args);
887 	if (IS_ERR(task))
888 		goto fail_task;
889 
890 	if (cpu >= 0)
891 		kthread_bind(task, cpu);
892 
893 	worker->flags = flags;
894 	worker->task = task;
895 	wake_up_process(task);
896 	return worker;
897 
898 fail_task:
899 	kfree(worker);
900 	return ERR_CAST(task);
901 }
902 
903 /**
904  * kthread_create_worker - create a kthread worker
905  * @flags: flags modifying the default behavior of the worker
906  * @namefmt: printf-style name for the kthread worker (task).
907  *
908  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
909  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
910  * when the caller was killed by a fatal signal.
911  */
912 struct kthread_worker *
913 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
914 {
915 	struct kthread_worker *worker;
916 	va_list args;
917 
918 	va_start(args, namefmt);
919 	worker = __kthread_create_worker(-1, flags, namefmt, args);
920 	va_end(args);
921 
922 	return worker;
923 }
924 EXPORT_SYMBOL(kthread_create_worker);
925 
926 /**
927  * kthread_create_worker_on_cpu - create a kthread worker and bind it
928  *	to a given CPU and the associated NUMA node.
929  * @cpu: CPU number
930  * @flags: flags modifying the default behavior of the worker
931  * @namefmt: printf-style name for the kthread worker (task).
932  *
933  * Use a valid CPU number if you want to bind the kthread worker
934  * to the given CPU and the associated NUMA node.
935  *
936  * A good practice is to add the cpu number also into the worker name.
937  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
938  *
939  * CPU hotplug:
940  * The kthread worker API is simple and generic. It just provides a way
941  * to create, use, and destroy workers.
942  *
943  * It is up to the API user how to handle CPU hotplug. They have to decide
944  * how to handle pending work items, prevent queuing new ones, and
945  * restore the functionality when the CPU goes off and on. There are a
946  * few catches:
947  *
948  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
949  *
950  *    - The worker might not exist when the CPU was off when the user
951  *      created the workers.
952  *
953  * Good practice is to implement two CPU hotplug callbacks and to
954  * destroy/create the worker when the CPU goes down/up.
955  *
956  * Return:
957  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
958  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
959  * when the caller was killed by a fatal signal.
960  */
961 struct kthread_worker *
962 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
963 			     const char namefmt[], ...)
964 {
965 	struct kthread_worker *worker;
966 	va_list args;
967 
968 	va_start(args, namefmt);
969 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
970 	va_end(args);
971 
972 	return worker;
973 }
974 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
975 
976 /*
977  * Returns true when the work could not be queued at the moment.
978  * It happens when it is already pending in a worker list
979  * or when it is being cancelled.
980  */
981 static inline bool queuing_blocked(struct kthread_worker *worker,
982 				   struct kthread_work *work)
983 {
984 	lockdep_assert_held(&worker->lock);
985 
986 	return !list_empty(&work->node) || work->canceling;
987 }
988 
989 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
990 					     struct kthread_work *work)
991 {
992 	lockdep_assert_held(&worker->lock);
993 	WARN_ON_ONCE(!list_empty(&work->node));
994 	/* Do not use a work with >1 worker, see kthread_queue_work() */
995 	WARN_ON_ONCE(work->worker && work->worker != worker);
996 }
997 
998 /* insert @work before @pos in @worker */
999 static void kthread_insert_work(struct kthread_worker *worker,
1000 				struct kthread_work *work,
1001 				struct list_head *pos)
1002 {
1003 	kthread_insert_work_sanity_check(worker, work);
1004 
1005 	trace_sched_kthread_work_queue_work(worker, work);
1006 
1007 	list_add_tail(&work->node, pos);
1008 	work->worker = worker;
1009 	if (!worker->current_work && likely(worker->task))
1010 		wake_up_process(worker->task);
1011 }
1012 
1013 /**
1014  * kthread_queue_work - queue a kthread_work
1015  * @worker: target kthread_worker
1016  * @work: kthread_work to queue
1017  *
1018  * Queue @work to work processor @task for async execution.  @task
1019  * must have been created with kthread_worker_create().  Returns %true
1020  * if @work was successfully queued, %false if it was already pending.
1021  *
1022  * Reinitialize the work if it needs to be used by another worker.
1023  * For example, when the worker was stopped and started again.
1024  */
1025 bool kthread_queue_work(struct kthread_worker *worker,
1026 			struct kthread_work *work)
1027 {
1028 	bool ret = false;
1029 	unsigned long flags;
1030 
1031 	raw_spin_lock_irqsave(&worker->lock, flags);
1032 	if (!queuing_blocked(worker, work)) {
1033 		kthread_insert_work(worker, work, &worker->work_list);
1034 		ret = true;
1035 	}
1036 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1037 	return ret;
1038 }
1039 EXPORT_SYMBOL_GPL(kthread_queue_work);
1040 
1041 /**
1042  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1043  *	delayed work when the timer expires.
1044  * @t: pointer to the expired timer
1045  *
1046  * The format of the function is defined by struct timer_list.
1047  * It should have been called from irqsafe timer with irq already off.
1048  */
1049 void kthread_delayed_work_timer_fn(struct timer_list *t)
1050 {
1051 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1052 	struct kthread_work *work = &dwork->work;
1053 	struct kthread_worker *worker = work->worker;
1054 	unsigned long flags;
1055 
1056 	/*
1057 	 * This might happen when a pending work is reinitialized.
1058 	 * It means that it is used a wrong way.
1059 	 */
1060 	if (WARN_ON_ONCE(!worker))
1061 		return;
1062 
1063 	raw_spin_lock_irqsave(&worker->lock, flags);
1064 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1065 	WARN_ON_ONCE(work->worker != worker);
1066 
1067 	/* Move the work from worker->delayed_work_list. */
1068 	WARN_ON_ONCE(list_empty(&work->node));
1069 	list_del_init(&work->node);
1070 	if (!work->canceling)
1071 		kthread_insert_work(worker, work, &worker->work_list);
1072 
1073 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1074 }
1075 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1076 
1077 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1078 					 struct kthread_delayed_work *dwork,
1079 					 unsigned long delay)
1080 {
1081 	struct timer_list *timer = &dwork->timer;
1082 	struct kthread_work *work = &dwork->work;
1083 
1084 	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1085 
1086 	/*
1087 	 * If @delay is 0, queue @dwork->work immediately.  This is for
1088 	 * both optimization and correctness.  The earliest @timer can
1089 	 * expire is on the closest next tick and delayed_work users depend
1090 	 * on that there's no such delay when @delay is 0.
1091 	 */
1092 	if (!delay) {
1093 		kthread_insert_work(worker, work, &worker->work_list);
1094 		return;
1095 	}
1096 
1097 	/* Be paranoid and try to detect possible races already now. */
1098 	kthread_insert_work_sanity_check(worker, work);
1099 
1100 	list_add(&work->node, &worker->delayed_work_list);
1101 	work->worker = worker;
1102 	timer->expires = jiffies + delay;
1103 	add_timer(timer);
1104 }
1105 
1106 /**
1107  * kthread_queue_delayed_work - queue the associated kthread work
1108  *	after a delay.
1109  * @worker: target kthread_worker
1110  * @dwork: kthread_delayed_work to queue
1111  * @delay: number of jiffies to wait before queuing
1112  *
1113  * If the work has not been pending it starts a timer that will queue
1114  * the work after the given @delay. If @delay is zero, it queues the
1115  * work immediately.
1116  *
1117  * Return: %false if the @work has already been pending. It means that
1118  * either the timer was running or the work was queued. It returns %true
1119  * otherwise.
1120  */
1121 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1122 				struct kthread_delayed_work *dwork,
1123 				unsigned long delay)
1124 {
1125 	struct kthread_work *work = &dwork->work;
1126 	unsigned long flags;
1127 	bool ret = false;
1128 
1129 	raw_spin_lock_irqsave(&worker->lock, flags);
1130 
1131 	if (!queuing_blocked(worker, work)) {
1132 		__kthread_queue_delayed_work(worker, dwork, delay);
1133 		ret = true;
1134 	}
1135 
1136 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1137 	return ret;
1138 }
1139 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1140 
1141 struct kthread_flush_work {
1142 	struct kthread_work	work;
1143 	struct completion	done;
1144 };
1145 
1146 static void kthread_flush_work_fn(struct kthread_work *work)
1147 {
1148 	struct kthread_flush_work *fwork =
1149 		container_of(work, struct kthread_flush_work, work);
1150 	complete(&fwork->done);
1151 }
1152 
1153 /**
1154  * kthread_flush_work - flush a kthread_work
1155  * @work: work to flush
1156  *
1157  * If @work is queued or executing, wait for it to finish execution.
1158  */
1159 void kthread_flush_work(struct kthread_work *work)
1160 {
1161 	struct kthread_flush_work fwork = {
1162 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1163 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1164 	};
1165 	struct kthread_worker *worker;
1166 	bool noop = false;
1167 
1168 	worker = work->worker;
1169 	if (!worker)
1170 		return;
1171 
1172 	raw_spin_lock_irq(&worker->lock);
1173 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1174 	WARN_ON_ONCE(work->worker != worker);
1175 
1176 	if (!list_empty(&work->node))
1177 		kthread_insert_work(worker, &fwork.work, work->node.next);
1178 	else if (worker->current_work == work)
1179 		kthread_insert_work(worker, &fwork.work,
1180 				    worker->work_list.next);
1181 	else
1182 		noop = true;
1183 
1184 	raw_spin_unlock_irq(&worker->lock);
1185 
1186 	if (!noop)
1187 		wait_for_completion(&fwork.done);
1188 }
1189 EXPORT_SYMBOL_GPL(kthread_flush_work);
1190 
1191 /*
1192  * Make sure that the timer is neither set nor running and could
1193  * not manipulate the work list_head any longer.
1194  *
1195  * The function is called under worker->lock. The lock is temporary
1196  * released but the timer can't be set again in the meantime.
1197  */
1198 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1199 					      unsigned long *flags)
1200 {
1201 	struct kthread_delayed_work *dwork =
1202 		container_of(work, struct kthread_delayed_work, work);
1203 	struct kthread_worker *worker = work->worker;
1204 
1205 	/*
1206 	 * del_timer_sync() must be called to make sure that the timer
1207 	 * callback is not running. The lock must be temporary released
1208 	 * to avoid a deadlock with the callback. In the meantime,
1209 	 * any queuing is blocked by setting the canceling counter.
1210 	 */
1211 	work->canceling++;
1212 	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1213 	del_timer_sync(&dwork->timer);
1214 	raw_spin_lock_irqsave(&worker->lock, *flags);
1215 	work->canceling--;
1216 }
1217 
1218 /*
1219  * This function removes the work from the worker queue.
1220  *
1221  * It is called under worker->lock. The caller must make sure that
1222  * the timer used by delayed work is not running, e.g. by calling
1223  * kthread_cancel_delayed_work_timer().
1224  *
1225  * The work might still be in use when this function finishes. See the
1226  * current_work proceed by the worker.
1227  *
1228  * Return: %true if @work was pending and successfully canceled,
1229  *	%false if @work was not pending
1230  */
1231 static bool __kthread_cancel_work(struct kthread_work *work)
1232 {
1233 	/*
1234 	 * Try to remove the work from a worker list. It might either
1235 	 * be from worker->work_list or from worker->delayed_work_list.
1236 	 */
1237 	if (!list_empty(&work->node)) {
1238 		list_del_init(&work->node);
1239 		return true;
1240 	}
1241 
1242 	return false;
1243 }
1244 
1245 /**
1246  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1247  * @worker: kthread worker to use
1248  * @dwork: kthread delayed work to queue
1249  * @delay: number of jiffies to wait before queuing
1250  *
1251  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1252  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1253  * @work is guaranteed to be queued immediately.
1254  *
1255  * Return: %false if @dwork was idle and queued, %true otherwise.
1256  *
1257  * A special case is when the work is being canceled in parallel.
1258  * It might be caused either by the real kthread_cancel_delayed_work_sync()
1259  * or yet another kthread_mod_delayed_work() call. We let the other command
1260  * win and return %true here. The return value can be used for reference
1261  * counting and the number of queued works stays the same. Anyway, the caller
1262  * is supposed to synchronize these operations a reasonable way.
1263  *
1264  * This function is safe to call from any context including IRQ handler.
1265  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1266  * for details.
1267  */
1268 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1269 			      struct kthread_delayed_work *dwork,
1270 			      unsigned long delay)
1271 {
1272 	struct kthread_work *work = &dwork->work;
1273 	unsigned long flags;
1274 	int ret;
1275 
1276 	raw_spin_lock_irqsave(&worker->lock, flags);
1277 
1278 	/* Do not bother with canceling when never queued. */
1279 	if (!work->worker) {
1280 		ret = false;
1281 		goto fast_queue;
1282 	}
1283 
1284 	/* Work must not be used with >1 worker, see kthread_queue_work() */
1285 	WARN_ON_ONCE(work->worker != worker);
1286 
1287 	/*
1288 	 * Temporary cancel the work but do not fight with another command
1289 	 * that is canceling the work as well.
1290 	 *
1291 	 * It is a bit tricky because of possible races with another
1292 	 * mod_delayed_work() and cancel_delayed_work() callers.
1293 	 *
1294 	 * The timer must be canceled first because worker->lock is released
1295 	 * when doing so. But the work can be removed from the queue (list)
1296 	 * only when it can be queued again so that the return value can
1297 	 * be used for reference counting.
1298 	 */
1299 	kthread_cancel_delayed_work_timer(work, &flags);
1300 	if (work->canceling) {
1301 		/* The number of works in the queue does not change. */
1302 		ret = true;
1303 		goto out;
1304 	}
1305 	ret = __kthread_cancel_work(work);
1306 
1307 fast_queue:
1308 	__kthread_queue_delayed_work(worker, dwork, delay);
1309 out:
1310 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1311 	return ret;
1312 }
1313 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1314 
1315 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1316 {
1317 	struct kthread_worker *worker = work->worker;
1318 	unsigned long flags;
1319 	int ret = false;
1320 
1321 	if (!worker)
1322 		goto out;
1323 
1324 	raw_spin_lock_irqsave(&worker->lock, flags);
1325 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1326 	WARN_ON_ONCE(work->worker != worker);
1327 
1328 	if (is_dwork)
1329 		kthread_cancel_delayed_work_timer(work, &flags);
1330 
1331 	ret = __kthread_cancel_work(work);
1332 
1333 	if (worker->current_work != work)
1334 		goto out_fast;
1335 
1336 	/*
1337 	 * The work is in progress and we need to wait with the lock released.
1338 	 * In the meantime, block any queuing by setting the canceling counter.
1339 	 */
1340 	work->canceling++;
1341 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1342 	kthread_flush_work(work);
1343 	raw_spin_lock_irqsave(&worker->lock, flags);
1344 	work->canceling--;
1345 
1346 out_fast:
1347 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1348 out:
1349 	return ret;
1350 }
1351 
1352 /**
1353  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1354  * @work: the kthread work to cancel
1355  *
1356  * Cancel @work and wait for its execution to finish.  This function
1357  * can be used even if the work re-queues itself. On return from this
1358  * function, @work is guaranteed to be not pending or executing on any CPU.
1359  *
1360  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1361  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1362  *
1363  * The caller must ensure that the worker on which @work was last
1364  * queued can't be destroyed before this function returns.
1365  *
1366  * Return: %true if @work was pending, %false otherwise.
1367  */
1368 bool kthread_cancel_work_sync(struct kthread_work *work)
1369 {
1370 	return __kthread_cancel_work_sync(work, false);
1371 }
1372 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1373 
1374 /**
1375  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1376  *	wait for it to finish.
1377  * @dwork: the kthread delayed work to cancel
1378  *
1379  * This is kthread_cancel_work_sync() for delayed works.
1380  *
1381  * Return: %true if @dwork was pending, %false otherwise.
1382  */
1383 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1384 {
1385 	return __kthread_cancel_work_sync(&dwork->work, true);
1386 }
1387 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1388 
1389 /**
1390  * kthread_flush_worker - flush all current works on a kthread_worker
1391  * @worker: worker to flush
1392  *
1393  * Wait until all currently executing or pending works on @worker are
1394  * finished.
1395  */
1396 void kthread_flush_worker(struct kthread_worker *worker)
1397 {
1398 	struct kthread_flush_work fwork = {
1399 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1400 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1401 	};
1402 
1403 	kthread_queue_work(worker, &fwork.work);
1404 	wait_for_completion(&fwork.done);
1405 }
1406 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1407 
1408 /**
1409  * kthread_destroy_worker - destroy a kthread worker
1410  * @worker: worker to be destroyed
1411  *
1412  * Flush and destroy @worker.  The simple flush is enough because the kthread
1413  * worker API is used only in trivial scenarios.  There are no multi-step state
1414  * machines needed.
1415  *
1416  * Note that this function is not responsible for handling delayed work, so
1417  * caller should be responsible for queuing or canceling all delayed work items
1418  * before invoke this function.
1419  */
1420 void kthread_destroy_worker(struct kthread_worker *worker)
1421 {
1422 	struct task_struct *task;
1423 
1424 	task = worker->task;
1425 	if (WARN_ON(!task))
1426 		return;
1427 
1428 	kthread_flush_worker(worker);
1429 	kthread_stop(task);
1430 	WARN_ON(!list_empty(&worker->delayed_work_list));
1431 	WARN_ON(!list_empty(&worker->work_list));
1432 	kfree(worker);
1433 }
1434 EXPORT_SYMBOL(kthread_destroy_worker);
1435 
1436 /**
1437  * kthread_use_mm - make the calling kthread operate on an address space
1438  * @mm: address space to operate on
1439  */
1440 void kthread_use_mm(struct mm_struct *mm)
1441 {
1442 	struct mm_struct *active_mm;
1443 	struct task_struct *tsk = current;
1444 
1445 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1446 	WARN_ON_ONCE(tsk->mm);
1447 
1448 	/*
1449 	 * It is possible for mm to be the same as tsk->active_mm, but
1450 	 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1451 	 * because these references are not equivalent.
1452 	 */
1453 	mmgrab(mm);
1454 
1455 	task_lock(tsk);
1456 	/* Hold off tlb flush IPIs while switching mm's */
1457 	local_irq_disable();
1458 	active_mm = tsk->active_mm;
1459 	tsk->active_mm = mm;
1460 	tsk->mm = mm;
1461 	membarrier_update_current_mm(mm);
1462 	switch_mm_irqs_off(active_mm, mm, tsk);
1463 	local_irq_enable();
1464 	task_unlock(tsk);
1465 #ifdef finish_arch_post_lock_switch
1466 	finish_arch_post_lock_switch();
1467 #endif
1468 
1469 	/*
1470 	 * When a kthread starts operating on an address space, the loop
1471 	 * in membarrier_{private,global}_expedited() may not observe
1472 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1473 	 * memory barrier after storing to tsk->mm, before accessing
1474 	 * user-space memory. A full memory barrier for membarrier
1475 	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1476 	 * mmdrop_lazy_tlb().
1477 	 */
1478 	mmdrop_lazy_tlb(active_mm);
1479 }
1480 EXPORT_SYMBOL_GPL(kthread_use_mm);
1481 
1482 /**
1483  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1484  * @mm: address space to operate on
1485  */
1486 void kthread_unuse_mm(struct mm_struct *mm)
1487 {
1488 	struct task_struct *tsk = current;
1489 
1490 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1491 	WARN_ON_ONCE(!tsk->mm);
1492 
1493 	task_lock(tsk);
1494 	/*
1495 	 * When a kthread stops operating on an address space, the loop
1496 	 * in membarrier_{private,global}_expedited() may not observe
1497 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1498 	 * memory barrier after accessing user-space memory, before
1499 	 * clearing tsk->mm.
1500 	 */
1501 	smp_mb__after_spinlock();
1502 	local_irq_disable();
1503 	tsk->mm = NULL;
1504 	membarrier_update_current_mm(NULL);
1505 	mmgrab_lazy_tlb(mm);
1506 	/* active_mm is still 'mm' */
1507 	enter_lazy_tlb(mm, tsk);
1508 	local_irq_enable();
1509 	task_unlock(tsk);
1510 
1511 	mmdrop(mm);
1512 }
1513 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1514 
1515 #ifdef CONFIG_BLK_CGROUP
1516 /**
1517  * kthread_associate_blkcg - associate blkcg to current kthread
1518  * @css: the cgroup info
1519  *
1520  * Current thread must be a kthread. The thread is running jobs on behalf of
1521  * other threads. In some cases, we expect the jobs attach cgroup info of
1522  * original threads instead of that of current thread. This function stores
1523  * original thread's cgroup info in current kthread context for later
1524  * retrieval.
1525  */
1526 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1527 {
1528 	struct kthread *kthread;
1529 
1530 	if (!(current->flags & PF_KTHREAD))
1531 		return;
1532 	kthread = to_kthread(current);
1533 	if (!kthread)
1534 		return;
1535 
1536 	if (kthread->blkcg_css) {
1537 		css_put(kthread->blkcg_css);
1538 		kthread->blkcg_css = NULL;
1539 	}
1540 	if (css) {
1541 		css_get(css);
1542 		kthread->blkcg_css = css;
1543 	}
1544 }
1545 EXPORT_SYMBOL(kthread_associate_blkcg);
1546 
1547 /**
1548  * kthread_blkcg - get associated blkcg css of current kthread
1549  *
1550  * Current thread must be a kthread.
1551  */
1552 struct cgroup_subsys_state *kthread_blkcg(void)
1553 {
1554 	struct kthread *kthread;
1555 
1556 	if (current->flags & PF_KTHREAD) {
1557 		kthread = to_kthread(current);
1558 		if (kthread)
1559 			return kthread->blkcg_css;
1560 	}
1561 	return NULL;
1562 }
1563 #endif
1564