xref: /linux/kernel/kthread.c (revision a0efa2f362a69e47b9d8b48f770ef3a0249a7911)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
4  *   Copyright (C) 2009 Red Hat, Inc.
5  *
6  * Creation is done via kthreadd, so that we get a clean environment
7  * even if we're invoked from userspace (think modprobe, hotplug cpu,
8  * etc.).
9  */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32 
33 
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37 
38 struct kthread_create_info
39 {
40 	/* Information passed to kthread() from kthreadd. */
41 	char *full_name;
42 	int (*threadfn)(void *data);
43 	void *data;
44 	int node;
45 
46 	/* Result passed back to kthread_create() from kthreadd. */
47 	struct task_struct *result;
48 	struct completion *done;
49 
50 	struct list_head list;
51 };
52 
53 struct kthread {
54 	unsigned long flags;
55 	unsigned int cpu;
56 	int result;
57 	int (*threadfn)(void *);
58 	void *data;
59 	struct completion parked;
60 	struct completion exited;
61 #ifdef CONFIG_BLK_CGROUP
62 	struct cgroup_subsys_state *blkcg_css;
63 #endif
64 	/* To store the full name if task comm is truncated. */
65 	char *full_name;
66 };
67 
68 enum KTHREAD_BITS {
69 	KTHREAD_IS_PER_CPU = 0,
70 	KTHREAD_SHOULD_STOP,
71 	KTHREAD_SHOULD_PARK,
72 };
73 
74 static inline struct kthread *to_kthread(struct task_struct *k)
75 {
76 	WARN_ON(!(k->flags & PF_KTHREAD));
77 	return k->worker_private;
78 }
79 
80 /*
81  * Variant of to_kthread() that doesn't assume @p is a kthread.
82  *
83  * Per construction; when:
84  *
85  *   (p->flags & PF_KTHREAD) && p->worker_private
86  *
87  * the task is both a kthread and struct kthread is persistent. However
88  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
89  * begin_new_exec()).
90  */
91 static inline struct kthread *__to_kthread(struct task_struct *p)
92 {
93 	void *kthread = p->worker_private;
94 	if (kthread && !(p->flags & PF_KTHREAD))
95 		kthread = NULL;
96 	return kthread;
97 }
98 
99 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
100 {
101 	struct kthread *kthread = to_kthread(tsk);
102 
103 	if (!kthread || !kthread->full_name) {
104 		__get_task_comm(buf, buf_size, tsk);
105 		return;
106 	}
107 
108 	strscpy_pad(buf, kthread->full_name, buf_size);
109 }
110 
111 bool set_kthread_struct(struct task_struct *p)
112 {
113 	struct kthread *kthread;
114 
115 	if (WARN_ON_ONCE(to_kthread(p)))
116 		return false;
117 
118 	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
119 	if (!kthread)
120 		return false;
121 
122 	init_completion(&kthread->exited);
123 	init_completion(&kthread->parked);
124 	p->vfork_done = &kthread->exited;
125 
126 	p->worker_private = kthread;
127 	return true;
128 }
129 
130 void free_kthread_struct(struct task_struct *k)
131 {
132 	struct kthread *kthread;
133 
134 	/*
135 	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
136 	 */
137 	kthread = to_kthread(k);
138 	if (!kthread)
139 		return;
140 
141 #ifdef CONFIG_BLK_CGROUP
142 	WARN_ON_ONCE(kthread->blkcg_css);
143 #endif
144 	k->worker_private = NULL;
145 	kfree(kthread->full_name);
146 	kfree(kthread);
147 }
148 
149 /**
150  * kthread_should_stop - should this kthread return now?
151  *
152  * When someone calls kthread_stop() on your kthread, it will be woken
153  * and this will return true.  You should then return, and your return
154  * value will be passed through to kthread_stop().
155  */
156 bool kthread_should_stop(void)
157 {
158 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
159 }
160 EXPORT_SYMBOL(kthread_should_stop);
161 
162 static bool __kthread_should_park(struct task_struct *k)
163 {
164 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
165 }
166 
167 /**
168  * kthread_should_park - should this kthread park now?
169  *
170  * When someone calls kthread_park() on your kthread, it will be woken
171  * and this will return true.  You should then do the necessary
172  * cleanup and call kthread_parkme()
173  *
174  * Similar to kthread_should_stop(), but this keeps the thread alive
175  * and in a park position. kthread_unpark() "restarts" the thread and
176  * calls the thread function again.
177  */
178 bool kthread_should_park(void)
179 {
180 	return __kthread_should_park(current);
181 }
182 EXPORT_SYMBOL_GPL(kthread_should_park);
183 
184 bool kthread_should_stop_or_park(void)
185 {
186 	struct kthread *kthread = __to_kthread(current);
187 
188 	if (!kthread)
189 		return false;
190 
191 	return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
192 }
193 
194 /**
195  * kthread_freezable_should_stop - should this freezable kthread return now?
196  * @was_frozen: optional out parameter, indicates whether %current was frozen
197  *
198  * kthread_should_stop() for freezable kthreads, which will enter
199  * refrigerator if necessary.  This function is safe from kthread_stop() /
200  * freezer deadlock and freezable kthreads should use this function instead
201  * of calling try_to_freeze() directly.
202  */
203 bool kthread_freezable_should_stop(bool *was_frozen)
204 {
205 	bool frozen = false;
206 
207 	might_sleep();
208 
209 	if (unlikely(freezing(current)))
210 		frozen = __refrigerator(true);
211 
212 	if (was_frozen)
213 		*was_frozen = frozen;
214 
215 	return kthread_should_stop();
216 }
217 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
218 
219 /**
220  * kthread_func - return the function specified on kthread creation
221  * @task: kthread task in question
222  *
223  * Returns NULL if the task is not a kthread.
224  */
225 void *kthread_func(struct task_struct *task)
226 {
227 	struct kthread *kthread = __to_kthread(task);
228 	if (kthread)
229 		return kthread->threadfn;
230 	return NULL;
231 }
232 EXPORT_SYMBOL_GPL(kthread_func);
233 
234 /**
235  * kthread_data - return data value specified on kthread creation
236  * @task: kthread task in question
237  *
238  * Return the data value specified when kthread @task was created.
239  * The caller is responsible for ensuring the validity of @task when
240  * calling this function.
241  */
242 void *kthread_data(struct task_struct *task)
243 {
244 	return to_kthread(task)->data;
245 }
246 EXPORT_SYMBOL_GPL(kthread_data);
247 
248 /**
249  * kthread_probe_data - speculative version of kthread_data()
250  * @task: possible kthread task in question
251  *
252  * @task could be a kthread task.  Return the data value specified when it
253  * was created if accessible.  If @task isn't a kthread task or its data is
254  * inaccessible for any reason, %NULL is returned.  This function requires
255  * that @task itself is safe to dereference.
256  */
257 void *kthread_probe_data(struct task_struct *task)
258 {
259 	struct kthread *kthread = __to_kthread(task);
260 	void *data = NULL;
261 
262 	if (kthread)
263 		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
264 	return data;
265 }
266 
267 static void __kthread_parkme(struct kthread *self)
268 {
269 	for (;;) {
270 		/*
271 		 * TASK_PARKED is a special state; we must serialize against
272 		 * possible pending wakeups to avoid store-store collisions on
273 		 * task->state.
274 		 *
275 		 * Such a collision might possibly result in the task state
276 		 * changin from TASK_PARKED and us failing the
277 		 * wait_task_inactive() in kthread_park().
278 		 */
279 		set_special_state(TASK_PARKED);
280 		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
281 			break;
282 
283 		/*
284 		 * Thread is going to call schedule(), do not preempt it,
285 		 * or the caller of kthread_park() may spend more time in
286 		 * wait_task_inactive().
287 		 */
288 		preempt_disable();
289 		complete(&self->parked);
290 		schedule_preempt_disabled();
291 		preempt_enable();
292 	}
293 	__set_current_state(TASK_RUNNING);
294 }
295 
296 void kthread_parkme(void)
297 {
298 	__kthread_parkme(to_kthread(current));
299 }
300 EXPORT_SYMBOL_GPL(kthread_parkme);
301 
302 /**
303  * kthread_exit - Cause the current kthread return @result to kthread_stop().
304  * @result: The integer value to return to kthread_stop().
305  *
306  * While kthread_exit can be called directly, it exists so that
307  * functions which do some additional work in non-modular code such as
308  * module_put_and_kthread_exit can be implemented.
309  *
310  * Does not return.
311  */
312 void __noreturn kthread_exit(long result)
313 {
314 	struct kthread *kthread = to_kthread(current);
315 	kthread->result = result;
316 	do_exit(0);
317 }
318 EXPORT_SYMBOL(kthread_exit);
319 
320 /**
321  * kthread_complete_and_exit - Exit the current kthread.
322  * @comp: Completion to complete
323  * @code: The integer value to return to kthread_stop().
324  *
325  * If present, complete @comp and then return code to kthread_stop().
326  *
327  * A kernel thread whose module may be removed after the completion of
328  * @comp can use this function to exit safely.
329  *
330  * Does not return.
331  */
332 void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
333 {
334 	if (comp)
335 		complete(comp);
336 
337 	kthread_exit(code);
338 }
339 EXPORT_SYMBOL(kthread_complete_and_exit);
340 
341 static int kthread(void *_create)
342 {
343 	static const struct sched_param param = { .sched_priority = 0 };
344 	/* Copy data: it's on kthread's stack */
345 	struct kthread_create_info *create = _create;
346 	int (*threadfn)(void *data) = create->threadfn;
347 	void *data = create->data;
348 	struct completion *done;
349 	struct kthread *self;
350 	int ret;
351 
352 	self = to_kthread(current);
353 
354 	/* Release the structure when caller killed by a fatal signal. */
355 	done = xchg(&create->done, NULL);
356 	if (!done) {
357 		kfree(create->full_name);
358 		kfree(create);
359 		kthread_exit(-EINTR);
360 	}
361 
362 	self->full_name = create->full_name;
363 	self->threadfn = threadfn;
364 	self->data = data;
365 
366 	/*
367 	 * The new thread inherited kthreadd's priority and CPU mask. Reset
368 	 * back to default in case they have been changed.
369 	 */
370 	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
371 	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
372 
373 	/* OK, tell user we're spawned, wait for stop or wakeup */
374 	__set_current_state(TASK_UNINTERRUPTIBLE);
375 	create->result = current;
376 	/*
377 	 * Thread is going to call schedule(), do not preempt it,
378 	 * or the creator may spend more time in wait_task_inactive().
379 	 */
380 	preempt_disable();
381 	complete(done);
382 	schedule_preempt_disabled();
383 	preempt_enable();
384 
385 	ret = -EINTR;
386 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
387 		cgroup_kthread_ready();
388 		__kthread_parkme(self);
389 		ret = threadfn(data);
390 	}
391 	kthread_exit(ret);
392 }
393 
394 /* called from kernel_clone() to get node information for about to be created task */
395 int tsk_fork_get_node(struct task_struct *tsk)
396 {
397 #ifdef CONFIG_NUMA
398 	if (tsk == kthreadd_task)
399 		return tsk->pref_node_fork;
400 #endif
401 	return NUMA_NO_NODE;
402 }
403 
404 static void create_kthread(struct kthread_create_info *create)
405 {
406 	int pid;
407 
408 #ifdef CONFIG_NUMA
409 	current->pref_node_fork = create->node;
410 #endif
411 	/* We want our own signal handler (we take no signals by default). */
412 	pid = kernel_thread(kthread, create, create->full_name,
413 			    CLONE_FS | CLONE_FILES | SIGCHLD);
414 	if (pid < 0) {
415 		/* Release the structure when caller killed by a fatal signal. */
416 		struct completion *done = xchg(&create->done, NULL);
417 
418 		kfree(create->full_name);
419 		if (!done) {
420 			kfree(create);
421 			return;
422 		}
423 		create->result = ERR_PTR(pid);
424 		complete(done);
425 	}
426 }
427 
428 static __printf(4, 0)
429 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
430 						    void *data, int node,
431 						    const char namefmt[],
432 						    va_list args)
433 {
434 	DECLARE_COMPLETION_ONSTACK(done);
435 	struct task_struct *task;
436 	struct kthread_create_info *create = kmalloc(sizeof(*create),
437 						     GFP_KERNEL);
438 
439 	if (!create)
440 		return ERR_PTR(-ENOMEM);
441 	create->threadfn = threadfn;
442 	create->data = data;
443 	create->node = node;
444 	create->done = &done;
445 	create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
446 	if (!create->full_name) {
447 		task = ERR_PTR(-ENOMEM);
448 		goto free_create;
449 	}
450 
451 	spin_lock(&kthread_create_lock);
452 	list_add_tail(&create->list, &kthread_create_list);
453 	spin_unlock(&kthread_create_lock);
454 
455 	wake_up_process(kthreadd_task);
456 	/*
457 	 * Wait for completion in killable state, for I might be chosen by
458 	 * the OOM killer while kthreadd is trying to allocate memory for
459 	 * new kernel thread.
460 	 */
461 	if (unlikely(wait_for_completion_killable(&done))) {
462 		/*
463 		 * If I was killed by a fatal signal before kthreadd (or new
464 		 * kernel thread) calls complete(), leave the cleanup of this
465 		 * structure to that thread.
466 		 */
467 		if (xchg(&create->done, NULL))
468 			return ERR_PTR(-EINTR);
469 		/*
470 		 * kthreadd (or new kernel thread) will call complete()
471 		 * shortly.
472 		 */
473 		wait_for_completion(&done);
474 	}
475 	task = create->result;
476 free_create:
477 	kfree(create);
478 	return task;
479 }
480 
481 /**
482  * kthread_create_on_node - create a kthread.
483  * @threadfn: the function to run until signal_pending(current).
484  * @data: data ptr for @threadfn.
485  * @node: task and thread structures for the thread are allocated on this node
486  * @namefmt: printf-style name for the thread.
487  *
488  * Description: This helper function creates and names a kernel
489  * thread.  The thread will be stopped: use wake_up_process() to start
490  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
491  * is affine to all CPUs.
492  *
493  * If thread is going to be bound on a particular cpu, give its node
494  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
495  * When woken, the thread will run @threadfn() with @data as its
496  * argument. @threadfn() can either return directly if it is a
497  * standalone thread for which no one will call kthread_stop(), or
498  * return when 'kthread_should_stop()' is true (which means
499  * kthread_stop() has been called).  The return value should be zero
500  * or a negative error number; it will be passed to kthread_stop().
501  *
502  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
503  */
504 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
505 					   void *data, int node,
506 					   const char namefmt[],
507 					   ...)
508 {
509 	struct task_struct *task;
510 	va_list args;
511 
512 	va_start(args, namefmt);
513 	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
514 	va_end(args);
515 
516 	return task;
517 }
518 EXPORT_SYMBOL(kthread_create_on_node);
519 
520 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
521 {
522 	unsigned long flags;
523 
524 	if (!wait_task_inactive(p, state)) {
525 		WARN_ON(1);
526 		return;
527 	}
528 
529 	/* It's safe because the task is inactive. */
530 	raw_spin_lock_irqsave(&p->pi_lock, flags);
531 	do_set_cpus_allowed(p, mask);
532 	p->flags |= PF_NO_SETAFFINITY;
533 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
534 }
535 
536 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
537 {
538 	__kthread_bind_mask(p, cpumask_of(cpu), state);
539 }
540 
541 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
542 {
543 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
544 }
545 
546 /**
547  * kthread_bind - bind a just-created kthread to a cpu.
548  * @p: thread created by kthread_create().
549  * @cpu: cpu (might not be online, must be possible) for @k to run on.
550  *
551  * Description: This function is equivalent to set_cpus_allowed(),
552  * except that @cpu doesn't need to be online, and the thread must be
553  * stopped (i.e., just returned from kthread_create()).
554  */
555 void kthread_bind(struct task_struct *p, unsigned int cpu)
556 {
557 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
558 }
559 EXPORT_SYMBOL(kthread_bind);
560 
561 /**
562  * kthread_create_on_cpu - Create a cpu bound kthread
563  * @threadfn: the function to run until signal_pending(current).
564  * @data: data ptr for @threadfn.
565  * @cpu: The cpu on which the thread should be bound,
566  * @namefmt: printf-style name for the thread. Format is restricted
567  *	     to "name.*%u". Code fills in cpu number.
568  *
569  * Description: This helper function creates and names a kernel thread
570  */
571 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
572 					  void *data, unsigned int cpu,
573 					  const char *namefmt)
574 {
575 	struct task_struct *p;
576 
577 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
578 				   cpu);
579 	if (IS_ERR(p))
580 		return p;
581 	kthread_bind(p, cpu);
582 	/* CPU hotplug need to bind once again when unparking the thread. */
583 	to_kthread(p)->cpu = cpu;
584 	return p;
585 }
586 EXPORT_SYMBOL(kthread_create_on_cpu);
587 
588 void kthread_set_per_cpu(struct task_struct *k, int cpu)
589 {
590 	struct kthread *kthread = to_kthread(k);
591 	if (!kthread)
592 		return;
593 
594 	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
595 
596 	if (cpu < 0) {
597 		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
598 		return;
599 	}
600 
601 	kthread->cpu = cpu;
602 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
603 }
604 
605 bool kthread_is_per_cpu(struct task_struct *p)
606 {
607 	struct kthread *kthread = __to_kthread(p);
608 	if (!kthread)
609 		return false;
610 
611 	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
612 }
613 
614 /**
615  * kthread_unpark - unpark a thread created by kthread_create().
616  * @k:		thread created by kthread_create().
617  *
618  * Sets kthread_should_park() for @k to return false, wakes it, and
619  * waits for it to return. If the thread is marked percpu then its
620  * bound to the cpu again.
621  */
622 void kthread_unpark(struct task_struct *k)
623 {
624 	struct kthread *kthread = to_kthread(k);
625 
626 	/*
627 	 * Newly created kthread was parked when the CPU was offline.
628 	 * The binding was lost and we need to set it again.
629 	 */
630 	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
631 		__kthread_bind(k, kthread->cpu, TASK_PARKED);
632 
633 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
634 	/*
635 	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
636 	 */
637 	wake_up_state(k, TASK_PARKED);
638 }
639 EXPORT_SYMBOL_GPL(kthread_unpark);
640 
641 /**
642  * kthread_park - park a thread created by kthread_create().
643  * @k: thread created by kthread_create().
644  *
645  * Sets kthread_should_park() for @k to return true, wakes it, and
646  * waits for it to return. This can also be called after kthread_create()
647  * instead of calling wake_up_process(): the thread will park without
648  * calling threadfn().
649  *
650  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
651  * If called by the kthread itself just the park bit is set.
652  */
653 int kthread_park(struct task_struct *k)
654 {
655 	struct kthread *kthread = to_kthread(k);
656 
657 	if (WARN_ON(k->flags & PF_EXITING))
658 		return -ENOSYS;
659 
660 	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
661 		return -EBUSY;
662 
663 	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
664 	if (k != current) {
665 		wake_up_process(k);
666 		/*
667 		 * Wait for __kthread_parkme() to complete(), this means we
668 		 * _will_ have TASK_PARKED and are about to call schedule().
669 		 */
670 		wait_for_completion(&kthread->parked);
671 		/*
672 		 * Now wait for that schedule() to complete and the task to
673 		 * get scheduled out.
674 		 */
675 		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
676 	}
677 
678 	return 0;
679 }
680 EXPORT_SYMBOL_GPL(kthread_park);
681 
682 /**
683  * kthread_stop - stop a thread created by kthread_create().
684  * @k: thread created by kthread_create().
685  *
686  * Sets kthread_should_stop() for @k to return true, wakes it, and
687  * waits for it to exit. This can also be called after kthread_create()
688  * instead of calling wake_up_process(): the thread will exit without
689  * calling threadfn().
690  *
691  * If threadfn() may call kthread_exit() itself, the caller must ensure
692  * task_struct can't go away.
693  *
694  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
695  * was never called.
696  */
697 int kthread_stop(struct task_struct *k)
698 {
699 	struct kthread *kthread;
700 	int ret;
701 
702 	trace_sched_kthread_stop(k);
703 
704 	get_task_struct(k);
705 	kthread = to_kthread(k);
706 	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
707 	kthread_unpark(k);
708 	set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
709 	wake_up_process(k);
710 	wait_for_completion(&kthread->exited);
711 	ret = kthread->result;
712 	put_task_struct(k);
713 
714 	trace_sched_kthread_stop_ret(ret);
715 	return ret;
716 }
717 EXPORT_SYMBOL(kthread_stop);
718 
719 /**
720  * kthread_stop_put - stop a thread and put its task struct
721  * @k: thread created by kthread_create().
722  *
723  * Stops a thread created by kthread_create() and put its task_struct.
724  * Only use when holding an extra task struct reference obtained by
725  * calling get_task_struct().
726  */
727 int kthread_stop_put(struct task_struct *k)
728 {
729 	int ret;
730 
731 	ret = kthread_stop(k);
732 	put_task_struct(k);
733 	return ret;
734 }
735 EXPORT_SYMBOL(kthread_stop_put);
736 
737 int kthreadd(void *unused)
738 {
739 	struct task_struct *tsk = current;
740 
741 	/* Setup a clean context for our children to inherit. */
742 	set_task_comm(tsk, "kthreadd");
743 	ignore_signals(tsk);
744 	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
745 	set_mems_allowed(node_states[N_MEMORY]);
746 
747 	current->flags |= PF_NOFREEZE;
748 	cgroup_init_kthreadd();
749 
750 	for (;;) {
751 		set_current_state(TASK_INTERRUPTIBLE);
752 		if (list_empty(&kthread_create_list))
753 			schedule();
754 		__set_current_state(TASK_RUNNING);
755 
756 		spin_lock(&kthread_create_lock);
757 		while (!list_empty(&kthread_create_list)) {
758 			struct kthread_create_info *create;
759 
760 			create = list_entry(kthread_create_list.next,
761 					    struct kthread_create_info, list);
762 			list_del_init(&create->list);
763 			spin_unlock(&kthread_create_lock);
764 
765 			create_kthread(create);
766 
767 			spin_lock(&kthread_create_lock);
768 		}
769 		spin_unlock(&kthread_create_lock);
770 	}
771 
772 	return 0;
773 }
774 
775 void __kthread_init_worker(struct kthread_worker *worker,
776 				const char *name,
777 				struct lock_class_key *key)
778 {
779 	memset(worker, 0, sizeof(struct kthread_worker));
780 	raw_spin_lock_init(&worker->lock);
781 	lockdep_set_class_and_name(&worker->lock, key, name);
782 	INIT_LIST_HEAD(&worker->work_list);
783 	INIT_LIST_HEAD(&worker->delayed_work_list);
784 }
785 EXPORT_SYMBOL_GPL(__kthread_init_worker);
786 
787 /**
788  * kthread_worker_fn - kthread function to process kthread_worker
789  * @worker_ptr: pointer to initialized kthread_worker
790  *
791  * This function implements the main cycle of kthread worker. It processes
792  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
793  * is empty.
794  *
795  * The works are not allowed to keep any locks, disable preemption or interrupts
796  * when they finish. There is defined a safe point for freezing when one work
797  * finishes and before a new one is started.
798  *
799  * Also the works must not be handled by more than one worker at the same time,
800  * see also kthread_queue_work().
801  */
802 int kthread_worker_fn(void *worker_ptr)
803 {
804 	struct kthread_worker *worker = worker_ptr;
805 	struct kthread_work *work;
806 
807 	/*
808 	 * FIXME: Update the check and remove the assignment when all kthread
809 	 * worker users are created using kthread_create_worker*() functions.
810 	 */
811 	WARN_ON(worker->task && worker->task != current);
812 	worker->task = current;
813 
814 	if (worker->flags & KTW_FREEZABLE)
815 		set_freezable();
816 
817 repeat:
818 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
819 
820 	if (kthread_should_stop()) {
821 		__set_current_state(TASK_RUNNING);
822 		raw_spin_lock_irq(&worker->lock);
823 		worker->task = NULL;
824 		raw_spin_unlock_irq(&worker->lock);
825 		return 0;
826 	}
827 
828 	work = NULL;
829 	raw_spin_lock_irq(&worker->lock);
830 	if (!list_empty(&worker->work_list)) {
831 		work = list_first_entry(&worker->work_list,
832 					struct kthread_work, node);
833 		list_del_init(&work->node);
834 	}
835 	worker->current_work = work;
836 	raw_spin_unlock_irq(&worker->lock);
837 
838 	if (work) {
839 		kthread_work_func_t func = work->func;
840 		__set_current_state(TASK_RUNNING);
841 		trace_sched_kthread_work_execute_start(work);
842 		work->func(work);
843 		/*
844 		 * Avoid dereferencing work after this point.  The trace
845 		 * event only cares about the address.
846 		 */
847 		trace_sched_kthread_work_execute_end(work, func);
848 	} else if (!freezing(current)) {
849 		schedule();
850 	} else {
851 		/*
852 		 * Handle the case where the current remains
853 		 * TASK_INTERRUPTIBLE. try_to_freeze() expects
854 		 * the current to be TASK_RUNNING.
855 		 */
856 		__set_current_state(TASK_RUNNING);
857 	}
858 
859 	try_to_freeze();
860 	cond_resched();
861 	goto repeat;
862 }
863 EXPORT_SYMBOL_GPL(kthread_worker_fn);
864 
865 static __printf(3, 0) struct kthread_worker *
866 __kthread_create_worker(int cpu, unsigned int flags,
867 			const char namefmt[], va_list args)
868 {
869 	struct kthread_worker *worker;
870 	struct task_struct *task;
871 	int node = NUMA_NO_NODE;
872 
873 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
874 	if (!worker)
875 		return ERR_PTR(-ENOMEM);
876 
877 	kthread_init_worker(worker);
878 
879 	if (cpu >= 0)
880 		node = cpu_to_node(cpu);
881 
882 	task = __kthread_create_on_node(kthread_worker_fn, worker,
883 						node, namefmt, args);
884 	if (IS_ERR(task))
885 		goto fail_task;
886 
887 	if (cpu >= 0)
888 		kthread_bind(task, cpu);
889 
890 	worker->flags = flags;
891 	worker->task = task;
892 	wake_up_process(task);
893 	return worker;
894 
895 fail_task:
896 	kfree(worker);
897 	return ERR_CAST(task);
898 }
899 
900 /**
901  * kthread_create_worker - create a kthread worker
902  * @flags: flags modifying the default behavior of the worker
903  * @namefmt: printf-style name for the kthread worker (task).
904  *
905  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
906  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
907  * when the caller was killed by a fatal signal.
908  */
909 struct kthread_worker *
910 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
911 {
912 	struct kthread_worker *worker;
913 	va_list args;
914 
915 	va_start(args, namefmt);
916 	worker = __kthread_create_worker(-1, flags, namefmt, args);
917 	va_end(args);
918 
919 	return worker;
920 }
921 EXPORT_SYMBOL(kthread_create_worker);
922 
923 /**
924  * kthread_create_worker_on_cpu - create a kthread worker and bind it
925  *	to a given CPU and the associated NUMA node.
926  * @cpu: CPU number
927  * @flags: flags modifying the default behavior of the worker
928  * @namefmt: printf-style name for the kthread worker (task).
929  *
930  * Use a valid CPU number if you want to bind the kthread worker
931  * to the given CPU and the associated NUMA node.
932  *
933  * A good practice is to add the cpu number also into the worker name.
934  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
935  *
936  * CPU hotplug:
937  * The kthread worker API is simple and generic. It just provides a way
938  * to create, use, and destroy workers.
939  *
940  * It is up to the API user how to handle CPU hotplug. They have to decide
941  * how to handle pending work items, prevent queuing new ones, and
942  * restore the functionality when the CPU goes off and on. There are a
943  * few catches:
944  *
945  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
946  *
947  *    - The worker might not exist when the CPU was off when the user
948  *      created the workers.
949  *
950  * Good practice is to implement two CPU hotplug callbacks and to
951  * destroy/create the worker when the CPU goes down/up.
952  *
953  * Return:
954  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
955  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
956  * when the caller was killed by a fatal signal.
957  */
958 struct kthread_worker *
959 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
960 			     const char namefmt[], ...)
961 {
962 	struct kthread_worker *worker;
963 	va_list args;
964 
965 	va_start(args, namefmt);
966 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
967 	va_end(args);
968 
969 	return worker;
970 }
971 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
972 
973 /*
974  * Returns true when the work could not be queued at the moment.
975  * It happens when it is already pending in a worker list
976  * or when it is being cancelled.
977  */
978 static inline bool queuing_blocked(struct kthread_worker *worker,
979 				   struct kthread_work *work)
980 {
981 	lockdep_assert_held(&worker->lock);
982 
983 	return !list_empty(&work->node) || work->canceling;
984 }
985 
986 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
987 					     struct kthread_work *work)
988 {
989 	lockdep_assert_held(&worker->lock);
990 	WARN_ON_ONCE(!list_empty(&work->node));
991 	/* Do not use a work with >1 worker, see kthread_queue_work() */
992 	WARN_ON_ONCE(work->worker && work->worker != worker);
993 }
994 
995 /* insert @work before @pos in @worker */
996 static void kthread_insert_work(struct kthread_worker *worker,
997 				struct kthread_work *work,
998 				struct list_head *pos)
999 {
1000 	kthread_insert_work_sanity_check(worker, work);
1001 
1002 	trace_sched_kthread_work_queue_work(worker, work);
1003 
1004 	list_add_tail(&work->node, pos);
1005 	work->worker = worker;
1006 	if (!worker->current_work && likely(worker->task))
1007 		wake_up_process(worker->task);
1008 }
1009 
1010 /**
1011  * kthread_queue_work - queue a kthread_work
1012  * @worker: target kthread_worker
1013  * @work: kthread_work to queue
1014  *
1015  * Queue @work to work processor @task for async execution.  @task
1016  * must have been created with kthread_worker_create().  Returns %true
1017  * if @work was successfully queued, %false if it was already pending.
1018  *
1019  * Reinitialize the work if it needs to be used by another worker.
1020  * For example, when the worker was stopped and started again.
1021  */
1022 bool kthread_queue_work(struct kthread_worker *worker,
1023 			struct kthread_work *work)
1024 {
1025 	bool ret = false;
1026 	unsigned long flags;
1027 
1028 	raw_spin_lock_irqsave(&worker->lock, flags);
1029 	if (!queuing_blocked(worker, work)) {
1030 		kthread_insert_work(worker, work, &worker->work_list);
1031 		ret = true;
1032 	}
1033 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1034 	return ret;
1035 }
1036 EXPORT_SYMBOL_GPL(kthread_queue_work);
1037 
1038 /**
1039  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1040  *	delayed work when the timer expires.
1041  * @t: pointer to the expired timer
1042  *
1043  * The format of the function is defined by struct timer_list.
1044  * It should have been called from irqsafe timer with irq already off.
1045  */
1046 void kthread_delayed_work_timer_fn(struct timer_list *t)
1047 {
1048 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1049 	struct kthread_work *work = &dwork->work;
1050 	struct kthread_worker *worker = work->worker;
1051 	unsigned long flags;
1052 
1053 	/*
1054 	 * This might happen when a pending work is reinitialized.
1055 	 * It means that it is used a wrong way.
1056 	 */
1057 	if (WARN_ON_ONCE(!worker))
1058 		return;
1059 
1060 	raw_spin_lock_irqsave(&worker->lock, flags);
1061 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1062 	WARN_ON_ONCE(work->worker != worker);
1063 
1064 	/* Move the work from worker->delayed_work_list. */
1065 	WARN_ON_ONCE(list_empty(&work->node));
1066 	list_del_init(&work->node);
1067 	if (!work->canceling)
1068 		kthread_insert_work(worker, work, &worker->work_list);
1069 
1070 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1071 }
1072 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1073 
1074 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1075 					 struct kthread_delayed_work *dwork,
1076 					 unsigned long delay)
1077 {
1078 	struct timer_list *timer = &dwork->timer;
1079 	struct kthread_work *work = &dwork->work;
1080 
1081 	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1082 
1083 	/*
1084 	 * If @delay is 0, queue @dwork->work immediately.  This is for
1085 	 * both optimization and correctness.  The earliest @timer can
1086 	 * expire is on the closest next tick and delayed_work users depend
1087 	 * on that there's no such delay when @delay is 0.
1088 	 */
1089 	if (!delay) {
1090 		kthread_insert_work(worker, work, &worker->work_list);
1091 		return;
1092 	}
1093 
1094 	/* Be paranoid and try to detect possible races already now. */
1095 	kthread_insert_work_sanity_check(worker, work);
1096 
1097 	list_add(&work->node, &worker->delayed_work_list);
1098 	work->worker = worker;
1099 	timer->expires = jiffies + delay;
1100 	add_timer(timer);
1101 }
1102 
1103 /**
1104  * kthread_queue_delayed_work - queue the associated kthread work
1105  *	after a delay.
1106  * @worker: target kthread_worker
1107  * @dwork: kthread_delayed_work to queue
1108  * @delay: number of jiffies to wait before queuing
1109  *
1110  * If the work has not been pending it starts a timer that will queue
1111  * the work after the given @delay. If @delay is zero, it queues the
1112  * work immediately.
1113  *
1114  * Return: %false if the @work has already been pending. It means that
1115  * either the timer was running or the work was queued. It returns %true
1116  * otherwise.
1117  */
1118 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1119 				struct kthread_delayed_work *dwork,
1120 				unsigned long delay)
1121 {
1122 	struct kthread_work *work = &dwork->work;
1123 	unsigned long flags;
1124 	bool ret = false;
1125 
1126 	raw_spin_lock_irqsave(&worker->lock, flags);
1127 
1128 	if (!queuing_blocked(worker, work)) {
1129 		__kthread_queue_delayed_work(worker, dwork, delay);
1130 		ret = true;
1131 	}
1132 
1133 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1134 	return ret;
1135 }
1136 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1137 
1138 struct kthread_flush_work {
1139 	struct kthread_work	work;
1140 	struct completion	done;
1141 };
1142 
1143 static void kthread_flush_work_fn(struct kthread_work *work)
1144 {
1145 	struct kthread_flush_work *fwork =
1146 		container_of(work, struct kthread_flush_work, work);
1147 	complete(&fwork->done);
1148 }
1149 
1150 /**
1151  * kthread_flush_work - flush a kthread_work
1152  * @work: work to flush
1153  *
1154  * If @work is queued or executing, wait for it to finish execution.
1155  */
1156 void kthread_flush_work(struct kthread_work *work)
1157 {
1158 	struct kthread_flush_work fwork = {
1159 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1160 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1161 	};
1162 	struct kthread_worker *worker;
1163 	bool noop = false;
1164 
1165 	worker = work->worker;
1166 	if (!worker)
1167 		return;
1168 
1169 	raw_spin_lock_irq(&worker->lock);
1170 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1171 	WARN_ON_ONCE(work->worker != worker);
1172 
1173 	if (!list_empty(&work->node))
1174 		kthread_insert_work(worker, &fwork.work, work->node.next);
1175 	else if (worker->current_work == work)
1176 		kthread_insert_work(worker, &fwork.work,
1177 				    worker->work_list.next);
1178 	else
1179 		noop = true;
1180 
1181 	raw_spin_unlock_irq(&worker->lock);
1182 
1183 	if (!noop)
1184 		wait_for_completion(&fwork.done);
1185 }
1186 EXPORT_SYMBOL_GPL(kthread_flush_work);
1187 
1188 /*
1189  * Make sure that the timer is neither set nor running and could
1190  * not manipulate the work list_head any longer.
1191  *
1192  * The function is called under worker->lock. The lock is temporary
1193  * released but the timer can't be set again in the meantime.
1194  */
1195 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1196 					      unsigned long *flags)
1197 {
1198 	struct kthread_delayed_work *dwork =
1199 		container_of(work, struct kthread_delayed_work, work);
1200 	struct kthread_worker *worker = work->worker;
1201 
1202 	/*
1203 	 * del_timer_sync() must be called to make sure that the timer
1204 	 * callback is not running. The lock must be temporary released
1205 	 * to avoid a deadlock with the callback. In the meantime,
1206 	 * any queuing is blocked by setting the canceling counter.
1207 	 */
1208 	work->canceling++;
1209 	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1210 	del_timer_sync(&dwork->timer);
1211 	raw_spin_lock_irqsave(&worker->lock, *flags);
1212 	work->canceling--;
1213 }
1214 
1215 /*
1216  * This function removes the work from the worker queue.
1217  *
1218  * It is called under worker->lock. The caller must make sure that
1219  * the timer used by delayed work is not running, e.g. by calling
1220  * kthread_cancel_delayed_work_timer().
1221  *
1222  * The work might still be in use when this function finishes. See the
1223  * current_work proceed by the worker.
1224  *
1225  * Return: %true if @work was pending and successfully canceled,
1226  *	%false if @work was not pending
1227  */
1228 static bool __kthread_cancel_work(struct kthread_work *work)
1229 {
1230 	/*
1231 	 * Try to remove the work from a worker list. It might either
1232 	 * be from worker->work_list or from worker->delayed_work_list.
1233 	 */
1234 	if (!list_empty(&work->node)) {
1235 		list_del_init(&work->node);
1236 		return true;
1237 	}
1238 
1239 	return false;
1240 }
1241 
1242 /**
1243  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1244  * @worker: kthread worker to use
1245  * @dwork: kthread delayed work to queue
1246  * @delay: number of jiffies to wait before queuing
1247  *
1248  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1249  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1250  * @work is guaranteed to be queued immediately.
1251  *
1252  * Return: %false if @dwork was idle and queued, %true otherwise.
1253  *
1254  * A special case is when the work is being canceled in parallel.
1255  * It might be caused either by the real kthread_cancel_delayed_work_sync()
1256  * or yet another kthread_mod_delayed_work() call. We let the other command
1257  * win and return %true here. The return value can be used for reference
1258  * counting and the number of queued works stays the same. Anyway, the caller
1259  * is supposed to synchronize these operations a reasonable way.
1260  *
1261  * This function is safe to call from any context including IRQ handler.
1262  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1263  * for details.
1264  */
1265 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1266 			      struct kthread_delayed_work *dwork,
1267 			      unsigned long delay)
1268 {
1269 	struct kthread_work *work = &dwork->work;
1270 	unsigned long flags;
1271 	int ret;
1272 
1273 	raw_spin_lock_irqsave(&worker->lock, flags);
1274 
1275 	/* Do not bother with canceling when never queued. */
1276 	if (!work->worker) {
1277 		ret = false;
1278 		goto fast_queue;
1279 	}
1280 
1281 	/* Work must not be used with >1 worker, see kthread_queue_work() */
1282 	WARN_ON_ONCE(work->worker != worker);
1283 
1284 	/*
1285 	 * Temporary cancel the work but do not fight with another command
1286 	 * that is canceling the work as well.
1287 	 *
1288 	 * It is a bit tricky because of possible races with another
1289 	 * mod_delayed_work() and cancel_delayed_work() callers.
1290 	 *
1291 	 * The timer must be canceled first because worker->lock is released
1292 	 * when doing so. But the work can be removed from the queue (list)
1293 	 * only when it can be queued again so that the return value can
1294 	 * be used for reference counting.
1295 	 */
1296 	kthread_cancel_delayed_work_timer(work, &flags);
1297 	if (work->canceling) {
1298 		/* The number of works in the queue does not change. */
1299 		ret = true;
1300 		goto out;
1301 	}
1302 	ret = __kthread_cancel_work(work);
1303 
1304 fast_queue:
1305 	__kthread_queue_delayed_work(worker, dwork, delay);
1306 out:
1307 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1308 	return ret;
1309 }
1310 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1311 
1312 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1313 {
1314 	struct kthread_worker *worker = work->worker;
1315 	unsigned long flags;
1316 	int ret = false;
1317 
1318 	if (!worker)
1319 		goto out;
1320 
1321 	raw_spin_lock_irqsave(&worker->lock, flags);
1322 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1323 	WARN_ON_ONCE(work->worker != worker);
1324 
1325 	if (is_dwork)
1326 		kthread_cancel_delayed_work_timer(work, &flags);
1327 
1328 	ret = __kthread_cancel_work(work);
1329 
1330 	if (worker->current_work != work)
1331 		goto out_fast;
1332 
1333 	/*
1334 	 * The work is in progress and we need to wait with the lock released.
1335 	 * In the meantime, block any queuing by setting the canceling counter.
1336 	 */
1337 	work->canceling++;
1338 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1339 	kthread_flush_work(work);
1340 	raw_spin_lock_irqsave(&worker->lock, flags);
1341 	work->canceling--;
1342 
1343 out_fast:
1344 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1345 out:
1346 	return ret;
1347 }
1348 
1349 /**
1350  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1351  * @work: the kthread work to cancel
1352  *
1353  * Cancel @work and wait for its execution to finish.  This function
1354  * can be used even if the work re-queues itself. On return from this
1355  * function, @work is guaranteed to be not pending or executing on any CPU.
1356  *
1357  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1358  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1359  *
1360  * The caller must ensure that the worker on which @work was last
1361  * queued can't be destroyed before this function returns.
1362  *
1363  * Return: %true if @work was pending, %false otherwise.
1364  */
1365 bool kthread_cancel_work_sync(struct kthread_work *work)
1366 {
1367 	return __kthread_cancel_work_sync(work, false);
1368 }
1369 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1370 
1371 /**
1372  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1373  *	wait for it to finish.
1374  * @dwork: the kthread delayed work to cancel
1375  *
1376  * This is kthread_cancel_work_sync() for delayed works.
1377  *
1378  * Return: %true if @dwork was pending, %false otherwise.
1379  */
1380 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1381 {
1382 	return __kthread_cancel_work_sync(&dwork->work, true);
1383 }
1384 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1385 
1386 /**
1387  * kthread_flush_worker - flush all current works on a kthread_worker
1388  * @worker: worker to flush
1389  *
1390  * Wait until all currently executing or pending works on @worker are
1391  * finished.
1392  */
1393 void kthread_flush_worker(struct kthread_worker *worker)
1394 {
1395 	struct kthread_flush_work fwork = {
1396 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1397 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1398 	};
1399 
1400 	kthread_queue_work(worker, &fwork.work);
1401 	wait_for_completion(&fwork.done);
1402 }
1403 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1404 
1405 /**
1406  * kthread_destroy_worker - destroy a kthread worker
1407  * @worker: worker to be destroyed
1408  *
1409  * Flush and destroy @worker.  The simple flush is enough because the kthread
1410  * worker API is used only in trivial scenarios.  There are no multi-step state
1411  * machines needed.
1412  *
1413  * Note that this function is not responsible for handling delayed work, so
1414  * caller should be responsible for queuing or canceling all delayed work items
1415  * before invoke this function.
1416  */
1417 void kthread_destroy_worker(struct kthread_worker *worker)
1418 {
1419 	struct task_struct *task;
1420 
1421 	task = worker->task;
1422 	if (WARN_ON(!task))
1423 		return;
1424 
1425 	kthread_flush_worker(worker);
1426 	kthread_stop(task);
1427 	WARN_ON(!list_empty(&worker->delayed_work_list));
1428 	WARN_ON(!list_empty(&worker->work_list));
1429 	kfree(worker);
1430 }
1431 EXPORT_SYMBOL(kthread_destroy_worker);
1432 
1433 /**
1434  * kthread_use_mm - make the calling kthread operate on an address space
1435  * @mm: address space to operate on
1436  */
1437 void kthread_use_mm(struct mm_struct *mm)
1438 {
1439 	struct mm_struct *active_mm;
1440 	struct task_struct *tsk = current;
1441 
1442 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1443 	WARN_ON_ONCE(tsk->mm);
1444 
1445 	/*
1446 	 * It is possible for mm to be the same as tsk->active_mm, but
1447 	 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1448 	 * because these references are not equivalent.
1449 	 */
1450 	mmgrab(mm);
1451 
1452 	task_lock(tsk);
1453 	/* Hold off tlb flush IPIs while switching mm's */
1454 	local_irq_disable();
1455 	active_mm = tsk->active_mm;
1456 	tsk->active_mm = mm;
1457 	tsk->mm = mm;
1458 	membarrier_update_current_mm(mm);
1459 	switch_mm_irqs_off(active_mm, mm, tsk);
1460 	local_irq_enable();
1461 	task_unlock(tsk);
1462 #ifdef finish_arch_post_lock_switch
1463 	finish_arch_post_lock_switch();
1464 #endif
1465 
1466 	/*
1467 	 * When a kthread starts operating on an address space, the loop
1468 	 * in membarrier_{private,global}_expedited() may not observe
1469 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1470 	 * memory barrier after storing to tsk->mm, before accessing
1471 	 * user-space memory. A full memory barrier for membarrier
1472 	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1473 	 * mmdrop_lazy_tlb().
1474 	 */
1475 	mmdrop_lazy_tlb(active_mm);
1476 }
1477 EXPORT_SYMBOL_GPL(kthread_use_mm);
1478 
1479 /**
1480  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1481  * @mm: address space to operate on
1482  */
1483 void kthread_unuse_mm(struct mm_struct *mm)
1484 {
1485 	struct task_struct *tsk = current;
1486 
1487 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1488 	WARN_ON_ONCE(!tsk->mm);
1489 
1490 	task_lock(tsk);
1491 	/*
1492 	 * When a kthread stops operating on an address space, the loop
1493 	 * in membarrier_{private,global}_expedited() may not observe
1494 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1495 	 * memory barrier after accessing user-space memory, before
1496 	 * clearing tsk->mm.
1497 	 */
1498 	smp_mb__after_spinlock();
1499 	local_irq_disable();
1500 	tsk->mm = NULL;
1501 	membarrier_update_current_mm(NULL);
1502 	mmgrab_lazy_tlb(mm);
1503 	/* active_mm is still 'mm' */
1504 	enter_lazy_tlb(mm, tsk);
1505 	local_irq_enable();
1506 	task_unlock(tsk);
1507 
1508 	mmdrop(mm);
1509 }
1510 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1511 
1512 #ifdef CONFIG_BLK_CGROUP
1513 /**
1514  * kthread_associate_blkcg - associate blkcg to current kthread
1515  * @css: the cgroup info
1516  *
1517  * Current thread must be a kthread. The thread is running jobs on behalf of
1518  * other threads. In some cases, we expect the jobs attach cgroup info of
1519  * original threads instead of that of current thread. This function stores
1520  * original thread's cgroup info in current kthread context for later
1521  * retrieval.
1522  */
1523 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1524 {
1525 	struct kthread *kthread;
1526 
1527 	if (!(current->flags & PF_KTHREAD))
1528 		return;
1529 	kthread = to_kthread(current);
1530 	if (!kthread)
1531 		return;
1532 
1533 	if (kthread->blkcg_css) {
1534 		css_put(kthread->blkcg_css);
1535 		kthread->blkcg_css = NULL;
1536 	}
1537 	if (css) {
1538 		css_get(css);
1539 		kthread->blkcg_css = css;
1540 	}
1541 }
1542 EXPORT_SYMBOL(kthread_associate_blkcg);
1543 
1544 /**
1545  * kthread_blkcg - get associated blkcg css of current kthread
1546  *
1547  * Current thread must be a kthread.
1548  */
1549 struct cgroup_subsys_state *kthread_blkcg(void)
1550 {
1551 	struct kthread *kthread;
1552 
1553 	if (current->flags & PF_KTHREAD) {
1554 		kthread = to_kthread(current);
1555 		if (kthread)
1556 			return kthread->blkcg_css;
1557 	}
1558 	return NULL;
1559 }
1560 #endif
1561