1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Copyright (C) 2009 Red Hat, Inc.
5 *
6 * Creation is done via kthreadd, so that we get a clean environment
7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 * etc.).
9 */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32
33
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37
38 static LIST_HEAD(kthread_affinity_list);
39 static DEFINE_MUTEX(kthread_affinity_lock);
40
41 struct kthread_create_info
42 {
43 /* Information passed to kthread() from kthreadd. */
44 char *full_name;
45 int (*threadfn)(void *data);
46 void *data;
47 int node;
48
49 /* Result passed back to kthread_create() from kthreadd. */
50 struct task_struct *result;
51 struct completion *done;
52
53 struct list_head list;
54 };
55
56 struct kthread {
57 unsigned long flags;
58 unsigned int cpu;
59 unsigned int node;
60 int started;
61 int result;
62 int (*threadfn)(void *);
63 void *data;
64 struct completion parked;
65 struct completion exited;
66 #ifdef CONFIG_BLK_CGROUP
67 struct cgroup_subsys_state *blkcg_css;
68 #endif
69 /* To store the full name if task comm is truncated. */
70 char *full_name;
71 struct task_struct *task;
72 struct list_head affinity_node;
73 struct cpumask *preferred_affinity;
74 };
75
76 enum KTHREAD_BITS {
77 KTHREAD_IS_PER_CPU = 0,
78 KTHREAD_SHOULD_STOP,
79 KTHREAD_SHOULD_PARK,
80 };
81
to_kthread(struct task_struct * k)82 static inline struct kthread *to_kthread(struct task_struct *k)
83 {
84 WARN_ON(!(k->flags & PF_KTHREAD));
85 return k->worker_private;
86 }
87
88 /*
89 * Variant of to_kthread() that doesn't assume @p is a kthread.
90 *
91 * When "(p->flags & PF_KTHREAD)" is set the task is a kthread and will
92 * always remain a kthread. For kthreads p->worker_private always
93 * points to a struct kthread. For tasks that are not kthreads
94 * p->worker_private is used to point to other things.
95 *
96 * Return NULL for any task that is not a kthread.
97 */
__to_kthread(struct task_struct * p)98 static inline struct kthread *__to_kthread(struct task_struct *p)
99 {
100 void *kthread = p->worker_private;
101 if (kthread && !(p->flags & PF_KTHREAD))
102 kthread = NULL;
103 return kthread;
104 }
105
get_kthread_comm(char * buf,size_t buf_size,struct task_struct * tsk)106 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
107 {
108 struct kthread *kthread = to_kthread(tsk);
109
110 if (!kthread || !kthread->full_name) {
111 strscpy(buf, tsk->comm, buf_size);
112 return;
113 }
114
115 strscpy_pad(buf, kthread->full_name, buf_size);
116 }
117
set_kthread_struct(struct task_struct * p)118 bool set_kthread_struct(struct task_struct *p)
119 {
120 struct kthread *kthread;
121
122 if (WARN_ON_ONCE(to_kthread(p)))
123 return false;
124
125 kthread = kzalloc_obj(*kthread, GFP_KERNEL);
126 if (!kthread)
127 return false;
128
129 init_completion(&kthread->exited);
130 init_completion(&kthread->parked);
131 INIT_LIST_HEAD(&kthread->affinity_node);
132 p->vfork_done = &kthread->exited;
133
134 kthread->task = p;
135 kthread->node = tsk_fork_get_node(current);
136 p->worker_private = kthread;
137 return true;
138 }
139
free_kthread_struct(struct task_struct * k)140 void free_kthread_struct(struct task_struct *k)
141 {
142 struct kthread *kthread;
143
144 /*
145 * Can be NULL if kmalloc() in set_kthread_struct() failed.
146 */
147 kthread = to_kthread(k);
148 if (!kthread)
149 return;
150
151 #ifdef CONFIG_BLK_CGROUP
152 WARN_ON_ONCE(kthread->blkcg_css);
153 #endif
154 k->worker_private = NULL;
155 kfree(kthread->full_name);
156 kfree(kthread);
157 }
158
159 /**
160 * kthread_should_stop - should this kthread return now?
161 *
162 * When someone calls kthread_stop() on your kthread, it will be woken
163 * and this will return true. You should then return, and your return
164 * value will be passed through to kthread_stop().
165 */
kthread_should_stop(void)166 bool kthread_should_stop(void)
167 {
168 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
169 }
170 EXPORT_SYMBOL(kthread_should_stop);
171
__kthread_should_park(struct task_struct * k)172 static bool __kthread_should_park(struct task_struct *k)
173 {
174 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
175 }
176
177 /**
178 * kthread_should_park - should this kthread park now?
179 *
180 * When someone calls kthread_park() on your kthread, it will be woken
181 * and this will return true. You should then do the necessary
182 * cleanup and call kthread_parkme()
183 *
184 * Similar to kthread_should_stop(), but this keeps the thread alive
185 * and in a park position. kthread_unpark() "restarts" the thread and
186 * calls the thread function again.
187 */
kthread_should_park(void)188 bool kthread_should_park(void)
189 {
190 return __kthread_should_park(current);
191 }
192 EXPORT_SYMBOL_GPL(kthread_should_park);
193
kthread_should_stop_or_park(void)194 bool kthread_should_stop_or_park(void)
195 {
196 struct kthread *kthread = __to_kthread(current);
197
198 if (!kthread)
199 return false;
200
201 return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
202 }
203
204 /**
205 * kthread_freezable_should_stop - should this freezable kthread return now?
206 * @was_frozen: optional out parameter, indicates whether %current was frozen
207 *
208 * kthread_should_stop() for freezable kthreads, which will enter
209 * refrigerator if necessary. This function is safe from kthread_stop() /
210 * freezer deadlock and freezable kthreads should use this function instead
211 * of calling try_to_freeze() directly.
212 */
kthread_freezable_should_stop(bool * was_frozen)213 bool kthread_freezable_should_stop(bool *was_frozen)
214 {
215 bool frozen = false;
216
217 might_sleep();
218
219 if (unlikely(freezing(current)))
220 frozen = __refrigerator(true);
221
222 if (was_frozen)
223 *was_frozen = frozen;
224
225 return kthread_should_stop();
226 }
227 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
228
229 /**
230 * kthread_func - return the function specified on kthread creation
231 * @task: kthread task in question
232 *
233 * Returns NULL if the task is not a kthread.
234 */
kthread_func(struct task_struct * task)235 void *kthread_func(struct task_struct *task)
236 {
237 struct kthread *kthread = __to_kthread(task);
238 if (kthread)
239 return kthread->threadfn;
240 return NULL;
241 }
242 EXPORT_SYMBOL_GPL(kthread_func);
243
244 /**
245 * kthread_data - return data value specified on kthread creation
246 * @task: kthread task in question
247 *
248 * Return the data value specified when kthread @task was created.
249 * The caller is responsible for ensuring the validity of @task when
250 * calling this function.
251 */
kthread_data(struct task_struct * task)252 void *kthread_data(struct task_struct *task)
253 {
254 return to_kthread(task)->data;
255 }
256 EXPORT_SYMBOL_GPL(kthread_data);
257
258 /**
259 * kthread_probe_data - speculative version of kthread_data()
260 * @task: possible kthread task in question
261 *
262 * @task could be a kthread task. Return the data value specified when it
263 * was created if accessible. If @task isn't a kthread task or its data is
264 * inaccessible for any reason, %NULL is returned. This function requires
265 * that @task itself is safe to dereference.
266 */
kthread_probe_data(struct task_struct * task)267 void *kthread_probe_data(struct task_struct *task)
268 {
269 struct kthread *kthread = __to_kthread(task);
270 void *data = NULL;
271
272 if (kthread)
273 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
274 return data;
275 }
276
__kthread_parkme(struct kthread * self)277 static void __kthread_parkme(struct kthread *self)
278 {
279 for (;;) {
280 /*
281 * TASK_PARKED is a special state; we must serialize against
282 * possible pending wakeups to avoid store-store collisions on
283 * task->state.
284 *
285 * Such a collision might possibly result in the task state
286 * changin from TASK_PARKED and us failing the
287 * wait_task_inactive() in kthread_park().
288 */
289 set_special_state(TASK_PARKED);
290 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
291 break;
292
293 /*
294 * Thread is going to call schedule(), do not preempt it,
295 * or the caller of kthread_park() may spend more time in
296 * wait_task_inactive().
297 */
298 preempt_disable();
299 complete(&self->parked);
300 schedule_preempt_disabled();
301 preempt_enable();
302 }
303 __set_current_state(TASK_RUNNING);
304 }
305
kthread_parkme(void)306 void kthread_parkme(void)
307 {
308 __kthread_parkme(to_kthread(current));
309 }
310 EXPORT_SYMBOL_GPL(kthread_parkme);
311
312 /**
313 * kthread_exit - Cause the current kthread return @result to kthread_stop().
314 * @result: The integer value to return to kthread_stop().
315 *
316 * While kthread_exit can be called directly, it exists so that
317 * functions which do some additional work in non-modular code such as
318 * module_put_and_kthread_exit can be implemented.
319 *
320 * Does not return.
321 */
kthread_exit(long result)322 void __noreturn kthread_exit(long result)
323 {
324 struct kthread *kthread = to_kthread(current);
325 kthread->result = result;
326 if (!list_empty(&kthread->affinity_node)) {
327 mutex_lock(&kthread_affinity_lock);
328 list_del(&kthread->affinity_node);
329 mutex_unlock(&kthread_affinity_lock);
330
331 if (kthread->preferred_affinity) {
332 kfree(kthread->preferred_affinity);
333 kthread->preferred_affinity = NULL;
334 }
335 }
336 do_exit(0);
337 }
338 EXPORT_SYMBOL(kthread_exit);
339
340 /**
341 * kthread_complete_and_exit - Exit the current kthread.
342 * @comp: Completion to complete
343 * @code: The integer value to return to kthread_stop().
344 *
345 * If present, complete @comp and then return code to kthread_stop().
346 *
347 * A kernel thread whose module may be removed after the completion of
348 * @comp can use this function to exit safely.
349 *
350 * Does not return.
351 */
kthread_complete_and_exit(struct completion * comp,long code)352 void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
353 {
354 if (comp)
355 complete(comp);
356
357 kthread_exit(code);
358 }
359 EXPORT_SYMBOL(kthread_complete_and_exit);
360
kthread_fetch_affinity(struct kthread * kthread,struct cpumask * cpumask)361 static void kthread_fetch_affinity(struct kthread *kthread, struct cpumask *cpumask)
362 {
363 const struct cpumask *pref;
364
365 guard(rcu)();
366
367 if (kthread->preferred_affinity) {
368 pref = kthread->preferred_affinity;
369 } else {
370 if (kthread->node == NUMA_NO_NODE)
371 pref = housekeeping_cpumask(HK_TYPE_DOMAIN);
372 else
373 pref = cpumask_of_node(kthread->node);
374 }
375
376 cpumask_and(cpumask, pref, housekeeping_cpumask(HK_TYPE_DOMAIN));
377 if (cpumask_empty(cpumask))
378 cpumask_copy(cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
379 }
380
kthread_affine_node(void)381 static void kthread_affine_node(void)
382 {
383 struct kthread *kthread = to_kthread(current);
384 cpumask_var_t affinity;
385
386 if (WARN_ON_ONCE(kthread_is_per_cpu(current)))
387 return;
388
389 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) {
390 WARN_ON_ONCE(1);
391 return;
392 }
393
394 mutex_lock(&kthread_affinity_lock);
395 WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
396 list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
397 /*
398 * The node cpumask is racy when read from kthread() but:
399 * - a racing CPU going down will either fail on the subsequent
400 * call to set_cpus_allowed_ptr() or be migrated to housekeepers
401 * afterwards by the scheduler.
402 * - a racing CPU going up will be handled by kthreads_online_cpu()
403 */
404 kthread_fetch_affinity(kthread, affinity);
405 set_cpus_allowed_ptr(current, affinity);
406 mutex_unlock(&kthread_affinity_lock);
407
408 free_cpumask_var(affinity);
409 }
410
kthread(void * _create)411 static int kthread(void *_create)
412 {
413 static const struct sched_param param = { .sched_priority = 0 };
414 /* Copy data: it's on kthread's stack */
415 struct kthread_create_info *create = _create;
416 int (*threadfn)(void *data) = create->threadfn;
417 void *data = create->data;
418 struct completion *done;
419 struct kthread *self;
420 int ret;
421
422 self = to_kthread(current);
423
424 /* Release the structure when caller killed by a fatal signal. */
425 done = xchg(&create->done, NULL);
426 if (!done) {
427 kfree(create->full_name);
428 kfree(create);
429 kthread_exit(-EINTR);
430 }
431
432 self->full_name = create->full_name;
433 self->threadfn = threadfn;
434 self->data = data;
435
436 /*
437 * The new thread inherited kthreadd's priority and CPU mask. Reset
438 * back to default in case they have been changed.
439 */
440 sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
441
442 /* OK, tell user we're spawned, wait for stop or wakeup */
443 __set_current_state(TASK_UNINTERRUPTIBLE);
444 create->result = current;
445 /*
446 * Thread is going to call schedule(), do not preempt it,
447 * or the creator may spend more time in wait_task_inactive().
448 */
449 preempt_disable();
450 complete(done);
451 schedule_preempt_disabled();
452 preempt_enable();
453
454 self->started = 1;
455
456 /*
457 * Apply default node affinity if no call to kthread_bind[_mask]() nor
458 * kthread_affine_preferred() was issued before the first wake-up.
459 */
460 if (!(current->flags & PF_NO_SETAFFINITY) && !self->preferred_affinity)
461 kthread_affine_node();
462
463 ret = -EINTR;
464 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
465 cgroup_kthread_ready();
466 __kthread_parkme(self);
467 ret = threadfn(data);
468 }
469 kthread_exit(ret);
470 }
471
472 /* called from kernel_clone() to get node information for about to be created task */
tsk_fork_get_node(struct task_struct * tsk)473 int tsk_fork_get_node(struct task_struct *tsk)
474 {
475 #ifdef CONFIG_NUMA
476 if (tsk == kthreadd_task)
477 return tsk->pref_node_fork;
478 #endif
479 return NUMA_NO_NODE;
480 }
481
create_kthread(struct kthread_create_info * create)482 static void create_kthread(struct kthread_create_info *create)
483 {
484 int pid;
485
486 #ifdef CONFIG_NUMA
487 current->pref_node_fork = create->node;
488 #endif
489 /* We want our own signal handler (we take no signals by default). */
490 pid = kernel_thread(kthread, create, create->full_name,
491 CLONE_FS | CLONE_FILES | SIGCHLD);
492 if (pid < 0) {
493 /* Release the structure when caller killed by a fatal signal. */
494 struct completion *done = xchg(&create->done, NULL);
495
496 kfree(create->full_name);
497 if (!done) {
498 kfree(create);
499 return;
500 }
501 create->result = ERR_PTR(pid);
502 complete(done);
503 }
504 }
505
506 static __printf(4, 0)
__kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],va_list args)507 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
508 void *data, int node,
509 const char namefmt[],
510 va_list args)
511 {
512 DECLARE_COMPLETION_ONSTACK(done);
513 struct task_struct *task;
514 struct kthread_create_info *create = kmalloc_obj(*create, GFP_KERNEL);
515
516 if (!create)
517 return ERR_PTR(-ENOMEM);
518 create->threadfn = threadfn;
519 create->data = data;
520 create->node = node;
521 create->done = &done;
522 create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
523 if (!create->full_name) {
524 task = ERR_PTR(-ENOMEM);
525 goto free_create;
526 }
527
528 spin_lock(&kthread_create_lock);
529 list_add_tail(&create->list, &kthread_create_list);
530 spin_unlock(&kthread_create_lock);
531
532 wake_up_process(kthreadd_task);
533 /*
534 * Wait for completion in killable state, for I might be chosen by
535 * the OOM killer while kthreadd is trying to allocate memory for
536 * new kernel thread.
537 */
538 if (unlikely(wait_for_completion_killable(&done))) {
539 /*
540 * If I was killed by a fatal signal before kthreadd (or new
541 * kernel thread) calls complete(), leave the cleanup of this
542 * structure to that thread.
543 */
544 if (xchg(&create->done, NULL))
545 return ERR_PTR(-EINTR);
546 /*
547 * kthreadd (or new kernel thread) will call complete()
548 * shortly.
549 */
550 wait_for_completion(&done);
551 }
552 task = create->result;
553 free_create:
554 kfree(create);
555 return task;
556 }
557
558 /**
559 * kthread_create_on_node - create a kthread.
560 * @threadfn: the function to run until signal_pending(current).
561 * @data: data ptr for @threadfn.
562 * @node: task and thread structures for the thread are allocated on this node
563 * @namefmt: printf-style name for the thread.
564 *
565 * Description: This helper function creates and names a kernel
566 * thread. The thread will be stopped: use wake_up_process() to start
567 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
568 * is affine to all CPUs.
569 *
570 * If thread is going to be bound on a particular cpu, give its node
571 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
572 * When woken, the thread will run @threadfn() with @data as its
573 * argument. @threadfn() can either return directly if it is a
574 * standalone thread for which no one will call kthread_stop(), or
575 * return when 'kthread_should_stop()' is true (which means
576 * kthread_stop() has been called). The return value should be zero
577 * or a negative error number; it will be passed to kthread_stop().
578 *
579 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
580 */
kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],...)581 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
582 void *data, int node,
583 const char namefmt[],
584 ...)
585 {
586 struct task_struct *task;
587 va_list args;
588
589 va_start(args, namefmt);
590 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
591 va_end(args);
592
593 return task;
594 }
595 EXPORT_SYMBOL(kthread_create_on_node);
596
__kthread_bind_mask(struct task_struct * p,const struct cpumask * mask,unsigned int state)597 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
598 {
599 if (!wait_task_inactive(p, state)) {
600 WARN_ON(1);
601 return;
602 }
603
604 scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
605 set_cpus_allowed_force(p, mask);
606
607 /* It's safe because the task is inactive. */
608 p->flags |= PF_NO_SETAFFINITY;
609 }
610
__kthread_bind(struct task_struct * p,unsigned int cpu,unsigned int state)611 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
612 {
613 __kthread_bind_mask(p, cpumask_of(cpu), state);
614 }
615
kthread_bind_mask(struct task_struct * p,const struct cpumask * mask)616 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
617 {
618 struct kthread *kthread = to_kthread(p);
619 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
620 WARN_ON_ONCE(kthread->started);
621 }
622
623 /**
624 * kthread_bind - bind a just-created kthread to a cpu.
625 * @p: thread created by kthread_create().
626 * @cpu: cpu (might not be online, must be possible) for @k to run on.
627 *
628 * Description: This function is equivalent to set_cpus_allowed(),
629 * except that @cpu doesn't need to be online, and the thread must be
630 * stopped (i.e., just returned from kthread_create()).
631 */
kthread_bind(struct task_struct * p,unsigned int cpu)632 void kthread_bind(struct task_struct *p, unsigned int cpu)
633 {
634 struct kthread *kthread = to_kthread(p);
635 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
636 WARN_ON_ONCE(kthread->started);
637 }
638 EXPORT_SYMBOL(kthread_bind);
639
640 /**
641 * kthread_create_on_cpu - Create a cpu bound kthread
642 * @threadfn: the function to run until signal_pending(current).
643 * @data: data ptr for @threadfn.
644 * @cpu: The cpu on which the thread should be bound,
645 * @namefmt: printf-style name for the thread. Format is restricted
646 * to "name.*%u". Code fills in cpu number.
647 *
648 * Description: This helper function creates and names a kernel thread
649 */
kthread_create_on_cpu(int (* threadfn)(void * data),void * data,unsigned int cpu,const char * namefmt)650 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
651 void *data, unsigned int cpu,
652 const char *namefmt)
653 {
654 struct task_struct *p;
655
656 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
657 cpu);
658 if (IS_ERR(p))
659 return p;
660 kthread_bind(p, cpu);
661 /* CPU hotplug need to bind once again when unparking the thread. */
662 to_kthread(p)->cpu = cpu;
663 return p;
664 }
665 EXPORT_SYMBOL(kthread_create_on_cpu);
666
kthread_set_per_cpu(struct task_struct * k,int cpu)667 void kthread_set_per_cpu(struct task_struct *k, int cpu)
668 {
669 struct kthread *kthread = to_kthread(k);
670 if (!kthread)
671 return;
672
673 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
674
675 if (cpu < 0) {
676 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
677 return;
678 }
679
680 kthread->cpu = cpu;
681 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
682 }
683
kthread_is_per_cpu(struct task_struct * p)684 bool kthread_is_per_cpu(struct task_struct *p)
685 {
686 struct kthread *kthread = __to_kthread(p);
687 if (!kthread)
688 return false;
689
690 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
691 }
692
693 /**
694 * kthread_unpark - unpark a thread created by kthread_create().
695 * @k: thread created by kthread_create().
696 *
697 * Sets kthread_should_park() for @k to return false, wakes it, and
698 * waits for it to return. If the thread is marked percpu then its
699 * bound to the cpu again.
700 */
kthread_unpark(struct task_struct * k)701 void kthread_unpark(struct task_struct *k)
702 {
703 struct kthread *kthread = to_kthread(k);
704
705 if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
706 return;
707 /*
708 * Newly created kthread was parked when the CPU was offline.
709 * The binding was lost and we need to set it again.
710 */
711 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
712 __kthread_bind(k, kthread->cpu, TASK_PARKED);
713
714 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
715 /*
716 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
717 */
718 wake_up_state(k, TASK_PARKED);
719 }
720 EXPORT_SYMBOL_GPL(kthread_unpark);
721
722 /**
723 * kthread_park - park a thread created by kthread_create().
724 * @k: thread created by kthread_create().
725 *
726 * Sets kthread_should_park() for @k to return true, wakes it, and
727 * waits for it to return. This can also be called after kthread_create()
728 * instead of calling wake_up_process(): the thread will park without
729 * calling threadfn().
730 *
731 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
732 * If called by the kthread itself just the park bit is set.
733 */
kthread_park(struct task_struct * k)734 int kthread_park(struct task_struct *k)
735 {
736 struct kthread *kthread = to_kthread(k);
737
738 if (WARN_ON(k->flags & PF_EXITING))
739 return -ENOSYS;
740
741 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
742 return -EBUSY;
743
744 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
745 if (k != current) {
746 wake_up_process(k);
747 /*
748 * Wait for __kthread_parkme() to complete(), this means we
749 * _will_ have TASK_PARKED and are about to call schedule().
750 */
751 wait_for_completion(&kthread->parked);
752 /*
753 * Now wait for that schedule() to complete and the task to
754 * get scheduled out.
755 */
756 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
757 }
758
759 return 0;
760 }
761 EXPORT_SYMBOL_GPL(kthread_park);
762
763 /**
764 * kthread_stop - stop a thread created by kthread_create().
765 * @k: thread created by kthread_create().
766 *
767 * Sets kthread_should_stop() for @k to return true, wakes it, and
768 * waits for it to exit. This can also be called after kthread_create()
769 * instead of calling wake_up_process(): the thread will exit without
770 * calling threadfn().
771 *
772 * If threadfn() may call kthread_exit() itself, the caller must ensure
773 * task_struct can't go away.
774 *
775 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
776 * was never called.
777 */
kthread_stop(struct task_struct * k)778 int kthread_stop(struct task_struct *k)
779 {
780 struct kthread *kthread;
781 int ret;
782
783 trace_sched_kthread_stop(k);
784
785 get_task_struct(k);
786 kthread = to_kthread(k);
787 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
788 kthread_unpark(k);
789 set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
790 wake_up_process(k);
791 wait_for_completion(&kthread->exited);
792 ret = kthread->result;
793 put_task_struct(k);
794
795 trace_sched_kthread_stop_ret(ret);
796 return ret;
797 }
798 EXPORT_SYMBOL(kthread_stop);
799
800 /**
801 * kthread_stop_put - stop a thread and put its task struct
802 * @k: thread created by kthread_create().
803 *
804 * Stops a thread created by kthread_create() and put its task_struct.
805 * Only use when holding an extra task struct reference obtained by
806 * calling get_task_struct().
807 */
kthread_stop_put(struct task_struct * k)808 int kthread_stop_put(struct task_struct *k)
809 {
810 int ret;
811
812 ret = kthread_stop(k);
813 put_task_struct(k);
814 return ret;
815 }
816 EXPORT_SYMBOL(kthread_stop_put);
817
kthreadd(void * unused)818 int kthreadd(void *unused)
819 {
820 static const char comm[TASK_COMM_LEN] = "kthreadd";
821 struct task_struct *tsk = current;
822
823 /* Setup a clean context for our children to inherit. */
824 set_task_comm(tsk, comm);
825 ignore_signals(tsk);
826 set_mems_allowed(node_states[N_MEMORY]);
827
828 current->flags |= PF_NOFREEZE;
829 cgroup_init_kthreadd();
830
831 kthread_affine_node();
832
833 for (;;) {
834 set_current_state(TASK_INTERRUPTIBLE);
835 if (list_empty(&kthread_create_list))
836 schedule();
837 __set_current_state(TASK_RUNNING);
838
839 spin_lock(&kthread_create_lock);
840 while (!list_empty(&kthread_create_list)) {
841 struct kthread_create_info *create;
842
843 create = list_entry(kthread_create_list.next,
844 struct kthread_create_info, list);
845 list_del_init(&create->list);
846 spin_unlock(&kthread_create_lock);
847
848 create_kthread(create);
849
850 spin_lock(&kthread_create_lock);
851 }
852 spin_unlock(&kthread_create_lock);
853 }
854
855 return 0;
856 }
857
858 /**
859 * kthread_affine_preferred - Define a kthread's preferred affinity
860 * @p: thread created by kthread_create().
861 * @mask: preferred mask of CPUs (might not be online, must be possible) for @p
862 * to run on.
863 *
864 * Similar to kthread_bind_mask() except that the affinity is not a requirement
865 * but rather a preference that can be constrained by CPU isolation or CPU hotplug.
866 * Must be called before the first wakeup of the kthread.
867 *
868 * Returns 0 if the affinity has been applied.
869 */
kthread_affine_preferred(struct task_struct * p,const struct cpumask * mask)870 int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
871 {
872 struct kthread *kthread = to_kthread(p);
873 cpumask_var_t affinity;
874 int ret = 0;
875
876 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE) || kthread->started) {
877 WARN_ON(1);
878 return -EINVAL;
879 }
880
881 WARN_ON_ONCE(kthread->preferred_affinity);
882
883 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
884 return -ENOMEM;
885
886 kthread->preferred_affinity = kzalloc(sizeof(struct cpumask), GFP_KERNEL);
887 if (!kthread->preferred_affinity) {
888 ret = -ENOMEM;
889 goto out;
890 }
891
892 mutex_lock(&kthread_affinity_lock);
893 cpumask_copy(kthread->preferred_affinity, mask);
894 WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
895 list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
896 kthread_fetch_affinity(kthread, affinity);
897
898 scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
899 set_cpus_allowed_force(p, affinity);
900
901 mutex_unlock(&kthread_affinity_lock);
902 out:
903 free_cpumask_var(affinity);
904
905 return ret;
906 }
907 EXPORT_SYMBOL_GPL(kthread_affine_preferred);
908
kthreads_update_affinity(bool force)909 static int kthreads_update_affinity(bool force)
910 {
911 cpumask_var_t affinity;
912 struct kthread *k;
913 int ret;
914
915 guard(mutex)(&kthread_affinity_lock);
916
917 if (list_empty(&kthread_affinity_list))
918 return 0;
919
920 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
921 return -ENOMEM;
922
923 ret = 0;
924
925 list_for_each_entry(k, &kthread_affinity_list, affinity_node) {
926 if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
927 kthread_is_per_cpu(k->task))) {
928 ret = -EINVAL;
929 continue;
930 }
931
932 /*
933 * Unbound kthreads without preferred affinity are already affine
934 * to housekeeping, whether those CPUs are online or not. So no need
935 * to handle newly online CPUs for them. However housekeeping changes
936 * have to be applied.
937 *
938 * But kthreads with a preferred affinity or node are different:
939 * if none of their preferred CPUs are online and part of
940 * housekeeping at the same time, they must be affine to housekeeping.
941 * But as soon as one of their preferred CPU becomes online, they must
942 * be affine to them.
943 */
944 if (force || k->preferred_affinity || k->node != NUMA_NO_NODE) {
945 kthread_fetch_affinity(k, affinity);
946 set_cpus_allowed_ptr(k->task, affinity);
947 }
948 }
949
950 free_cpumask_var(affinity);
951
952 return ret;
953 }
954
955 /**
956 * kthreads_update_housekeeping - Update kthreads affinity on cpuset change
957 *
958 * When cpuset changes a partition type to/from "isolated" or updates related
959 * cpumasks, propagate the housekeeping cpumask change to preferred kthreads
960 * affinity.
961 *
962 * Returns 0 if successful, -ENOMEM if temporary mask couldn't
963 * be allocated or -EINVAL in case of internal error.
964 */
kthreads_update_housekeeping(void)965 int kthreads_update_housekeeping(void)
966 {
967 return kthreads_update_affinity(true);
968 }
969
970 /*
971 * Re-affine kthreads according to their preferences
972 * and the newly online CPU. The CPU down part is handled
973 * by select_fallback_rq() which default re-affines to
974 * housekeepers from other nodes in case the preferred
975 * affinity doesn't apply anymore.
976 */
kthreads_online_cpu(unsigned int cpu)977 static int kthreads_online_cpu(unsigned int cpu)
978 {
979 return kthreads_update_affinity(false);
980 }
981
kthreads_init(void)982 static int kthreads_init(void)
983 {
984 return cpuhp_setup_state(CPUHP_AP_KTHREADS_ONLINE, "kthreads:online",
985 kthreads_online_cpu, NULL);
986 }
987 early_initcall(kthreads_init);
988
__kthread_init_worker(struct kthread_worker * worker,const char * name,struct lock_class_key * key)989 void __kthread_init_worker(struct kthread_worker *worker,
990 const char *name,
991 struct lock_class_key *key)
992 {
993 memset(worker, 0, sizeof(struct kthread_worker));
994 raw_spin_lock_init(&worker->lock);
995 lockdep_set_class_and_name(&worker->lock, key, name);
996 INIT_LIST_HEAD(&worker->work_list);
997 INIT_LIST_HEAD(&worker->delayed_work_list);
998 }
999 EXPORT_SYMBOL_GPL(__kthread_init_worker);
1000
1001 /**
1002 * kthread_worker_fn - kthread function to process kthread_worker
1003 * @worker_ptr: pointer to initialized kthread_worker
1004 *
1005 * This function implements the main cycle of kthread worker. It processes
1006 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
1007 * is empty.
1008 *
1009 * The works are not allowed to keep any locks, disable preemption or interrupts
1010 * when they finish. There is defined a safe point for freezing when one work
1011 * finishes and before a new one is started.
1012 *
1013 * Also the works must not be handled by more than one worker at the same time,
1014 * see also kthread_queue_work().
1015 */
kthread_worker_fn(void * worker_ptr)1016 int kthread_worker_fn(void *worker_ptr)
1017 {
1018 struct kthread_worker *worker = worker_ptr;
1019 struct kthread_work *work;
1020
1021 /*
1022 * FIXME: Update the check and remove the assignment when all kthread
1023 * worker users are created using kthread_create_worker*() functions.
1024 */
1025 WARN_ON(worker->task && worker->task != current);
1026 worker->task = current;
1027
1028 if (worker->flags & KTW_FREEZABLE)
1029 set_freezable();
1030
1031 repeat:
1032 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
1033
1034 if (kthread_should_stop()) {
1035 __set_current_state(TASK_RUNNING);
1036 raw_spin_lock_irq(&worker->lock);
1037 worker->task = NULL;
1038 raw_spin_unlock_irq(&worker->lock);
1039 return 0;
1040 }
1041
1042 work = NULL;
1043 raw_spin_lock_irq(&worker->lock);
1044 if (!list_empty(&worker->work_list)) {
1045 work = list_first_entry(&worker->work_list,
1046 struct kthread_work, node);
1047 list_del_init(&work->node);
1048 }
1049 worker->current_work = work;
1050 raw_spin_unlock_irq(&worker->lock);
1051
1052 if (work) {
1053 kthread_work_func_t func = work->func;
1054 __set_current_state(TASK_RUNNING);
1055 trace_sched_kthread_work_execute_start(work);
1056 work->func(work);
1057 /*
1058 * Avoid dereferencing work after this point. The trace
1059 * event only cares about the address.
1060 */
1061 trace_sched_kthread_work_execute_end(work, func);
1062 } else if (!freezing(current)) {
1063 schedule();
1064 } else {
1065 /*
1066 * Handle the case where the current remains
1067 * TASK_INTERRUPTIBLE. try_to_freeze() expects
1068 * the current to be TASK_RUNNING.
1069 */
1070 __set_current_state(TASK_RUNNING);
1071 }
1072
1073 try_to_freeze();
1074 cond_resched();
1075 goto repeat;
1076 }
1077 EXPORT_SYMBOL_GPL(kthread_worker_fn);
1078
1079 static __printf(3, 0) struct kthread_worker *
__kthread_create_worker_on_node(unsigned int flags,int node,const char namefmt[],va_list args)1080 __kthread_create_worker_on_node(unsigned int flags, int node,
1081 const char namefmt[], va_list args)
1082 {
1083 struct kthread_worker *worker;
1084 struct task_struct *task;
1085
1086 worker = kzalloc_obj(*worker, GFP_KERNEL);
1087 if (!worker)
1088 return ERR_PTR(-ENOMEM);
1089
1090 kthread_init_worker(worker);
1091
1092 task = __kthread_create_on_node(kthread_worker_fn, worker,
1093 node, namefmt, args);
1094 if (IS_ERR(task))
1095 goto fail_task;
1096
1097 worker->flags = flags;
1098 worker->task = task;
1099
1100 return worker;
1101
1102 fail_task:
1103 kfree(worker);
1104 return ERR_CAST(task);
1105 }
1106
1107 /**
1108 * kthread_create_worker_on_node - create a kthread worker
1109 * @flags: flags modifying the default behavior of the worker
1110 * @node: task structure for the thread is allocated on this node
1111 * @namefmt: printf-style name for the kthread worker (task).
1112 *
1113 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
1114 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
1115 * when the caller was killed by a fatal signal.
1116 */
1117 struct kthread_worker *
kthread_create_worker_on_node(unsigned int flags,int node,const char namefmt[],...)1118 kthread_create_worker_on_node(unsigned int flags, int node, const char namefmt[], ...)
1119 {
1120 struct kthread_worker *worker;
1121 va_list args;
1122
1123 va_start(args, namefmt);
1124 worker = __kthread_create_worker_on_node(flags, node, namefmt, args);
1125 va_end(args);
1126
1127 return worker;
1128 }
1129 EXPORT_SYMBOL(kthread_create_worker_on_node);
1130
1131 /**
1132 * kthread_create_worker_on_cpu - create a kthread worker and bind it
1133 * to a given CPU and the associated NUMA node.
1134 * @cpu: CPU number
1135 * @flags: flags modifying the default behavior of the worker
1136 * @namefmt: printf-style name for the thread. Format is restricted
1137 * to "name.*%u". Code fills in cpu number.
1138 *
1139 * Use a valid CPU number if you want to bind the kthread worker
1140 * to the given CPU and the associated NUMA node.
1141 *
1142 * A good practice is to add the cpu number also into the worker name.
1143 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
1144 *
1145 * CPU hotplug:
1146 * The kthread worker API is simple and generic. It just provides a way
1147 * to create, use, and destroy workers.
1148 *
1149 * It is up to the API user how to handle CPU hotplug. They have to decide
1150 * how to handle pending work items, prevent queuing new ones, and
1151 * restore the functionality when the CPU goes off and on. There are a
1152 * few catches:
1153 *
1154 * - CPU affinity gets lost when it is scheduled on an offline CPU.
1155 *
1156 * - The worker might not exist when the CPU was off when the user
1157 * created the workers.
1158 *
1159 * Good practice is to implement two CPU hotplug callbacks and to
1160 * destroy/create the worker when the CPU goes down/up.
1161 *
1162 * Return:
1163 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
1164 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
1165 * when the caller was killed by a fatal signal.
1166 */
1167 struct kthread_worker *
kthread_create_worker_on_cpu(int cpu,unsigned int flags,const char namefmt[])1168 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
1169 const char namefmt[])
1170 {
1171 struct kthread_worker *worker;
1172
1173 worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu);
1174 if (!IS_ERR(worker))
1175 kthread_bind(worker->task, cpu);
1176
1177 return worker;
1178 }
1179 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
1180
1181 /*
1182 * Returns true when the work could not be queued at the moment.
1183 * It happens when it is already pending in a worker list
1184 * or when it is being cancelled.
1185 */
queuing_blocked(struct kthread_worker * worker,struct kthread_work * work)1186 static inline bool queuing_blocked(struct kthread_worker *worker,
1187 struct kthread_work *work)
1188 {
1189 lockdep_assert_held(&worker->lock);
1190
1191 return !list_empty(&work->node) || work->canceling;
1192 }
1193
kthread_insert_work_sanity_check(struct kthread_worker * worker,struct kthread_work * work)1194 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
1195 struct kthread_work *work)
1196 {
1197 lockdep_assert_held(&worker->lock);
1198 WARN_ON_ONCE(!list_empty(&work->node));
1199 /* Do not use a work with >1 worker, see kthread_queue_work() */
1200 WARN_ON_ONCE(work->worker && work->worker != worker);
1201 }
1202
1203 /* insert @work before @pos in @worker */
kthread_insert_work(struct kthread_worker * worker,struct kthread_work * work,struct list_head * pos)1204 static void kthread_insert_work(struct kthread_worker *worker,
1205 struct kthread_work *work,
1206 struct list_head *pos)
1207 {
1208 kthread_insert_work_sanity_check(worker, work);
1209
1210 trace_sched_kthread_work_queue_work(worker, work);
1211
1212 list_add_tail(&work->node, pos);
1213 work->worker = worker;
1214 if (!worker->current_work && likely(worker->task))
1215 wake_up_process(worker->task);
1216 }
1217
1218 /**
1219 * kthread_queue_work - queue a kthread_work
1220 * @worker: target kthread_worker
1221 * @work: kthread_work to queue
1222 *
1223 * Queue @work to work processor @task for async execution. @task
1224 * must have been created with kthread_create_worker(). Returns %true
1225 * if @work was successfully queued, %false if it was already pending.
1226 *
1227 * Reinitialize the work if it needs to be used by another worker.
1228 * For example, when the worker was stopped and started again.
1229 */
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)1230 bool kthread_queue_work(struct kthread_worker *worker,
1231 struct kthread_work *work)
1232 {
1233 bool ret = false;
1234 unsigned long flags;
1235
1236 raw_spin_lock_irqsave(&worker->lock, flags);
1237 if (!queuing_blocked(worker, work)) {
1238 kthread_insert_work(worker, work, &worker->work_list);
1239 ret = true;
1240 }
1241 raw_spin_unlock_irqrestore(&worker->lock, flags);
1242 return ret;
1243 }
1244 EXPORT_SYMBOL_GPL(kthread_queue_work);
1245
1246 /**
1247 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1248 * delayed work when the timer expires.
1249 * @t: pointer to the expired timer
1250 *
1251 * The format of the function is defined by struct timer_list.
1252 * It should have been called from irqsafe timer with irq already off.
1253 */
kthread_delayed_work_timer_fn(struct timer_list * t)1254 void kthread_delayed_work_timer_fn(struct timer_list *t)
1255 {
1256 struct kthread_delayed_work *dwork = timer_container_of(dwork, t,
1257 timer);
1258 struct kthread_work *work = &dwork->work;
1259 struct kthread_worker *worker = work->worker;
1260 unsigned long flags;
1261
1262 /*
1263 * This might happen when a pending work is reinitialized.
1264 * It means that it is used a wrong way.
1265 */
1266 if (WARN_ON_ONCE(!worker))
1267 return;
1268
1269 raw_spin_lock_irqsave(&worker->lock, flags);
1270 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1271 WARN_ON_ONCE(work->worker != worker);
1272
1273 /* Move the work from worker->delayed_work_list. */
1274 WARN_ON_ONCE(list_empty(&work->node));
1275 list_del_init(&work->node);
1276 if (!work->canceling)
1277 kthread_insert_work(worker, work, &worker->work_list);
1278
1279 raw_spin_unlock_irqrestore(&worker->lock, flags);
1280 }
1281 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1282
__kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1283 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1284 struct kthread_delayed_work *dwork,
1285 unsigned long delay)
1286 {
1287 struct timer_list *timer = &dwork->timer;
1288 struct kthread_work *work = &dwork->work;
1289
1290 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1291
1292 /*
1293 * If @delay is 0, queue @dwork->work immediately. This is for
1294 * both optimization and correctness. The earliest @timer can
1295 * expire is on the closest next tick and delayed_work users depend
1296 * on that there's no such delay when @delay is 0.
1297 */
1298 if (!delay) {
1299 kthread_insert_work(worker, work, &worker->work_list);
1300 return;
1301 }
1302
1303 /* Be paranoid and try to detect possible races already now. */
1304 kthread_insert_work_sanity_check(worker, work);
1305
1306 list_add(&work->node, &worker->delayed_work_list);
1307 work->worker = worker;
1308 timer->expires = jiffies + delay;
1309 add_timer(timer);
1310 }
1311
1312 /**
1313 * kthread_queue_delayed_work - queue the associated kthread work
1314 * after a delay.
1315 * @worker: target kthread_worker
1316 * @dwork: kthread_delayed_work to queue
1317 * @delay: number of jiffies to wait before queuing
1318 *
1319 * If the work has not been pending it starts a timer that will queue
1320 * the work after the given @delay. If @delay is zero, it queues the
1321 * work immediately.
1322 *
1323 * Return: %false if the @work has already been pending. It means that
1324 * either the timer was running or the work was queued. It returns %true
1325 * otherwise.
1326 */
kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1327 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1328 struct kthread_delayed_work *dwork,
1329 unsigned long delay)
1330 {
1331 struct kthread_work *work = &dwork->work;
1332 unsigned long flags;
1333 bool ret = false;
1334
1335 raw_spin_lock_irqsave(&worker->lock, flags);
1336
1337 if (!queuing_blocked(worker, work)) {
1338 __kthread_queue_delayed_work(worker, dwork, delay);
1339 ret = true;
1340 }
1341
1342 raw_spin_unlock_irqrestore(&worker->lock, flags);
1343 return ret;
1344 }
1345 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1346
1347 struct kthread_flush_work {
1348 struct kthread_work work;
1349 struct completion done;
1350 };
1351
kthread_flush_work_fn(struct kthread_work * work)1352 static void kthread_flush_work_fn(struct kthread_work *work)
1353 {
1354 struct kthread_flush_work *fwork =
1355 container_of(work, struct kthread_flush_work, work);
1356 complete(&fwork->done);
1357 }
1358
1359 /**
1360 * kthread_flush_work - flush a kthread_work
1361 * @work: work to flush
1362 *
1363 * If @work is queued or executing, wait for it to finish execution.
1364 */
kthread_flush_work(struct kthread_work * work)1365 void kthread_flush_work(struct kthread_work *work)
1366 {
1367 struct kthread_flush_work fwork = {
1368 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1369 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1370 };
1371 struct kthread_worker *worker;
1372 bool noop = false;
1373
1374 worker = work->worker;
1375 if (!worker)
1376 return;
1377
1378 raw_spin_lock_irq(&worker->lock);
1379 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1380 WARN_ON_ONCE(work->worker != worker);
1381
1382 if (!list_empty(&work->node))
1383 kthread_insert_work(worker, &fwork.work, work->node.next);
1384 else if (worker->current_work == work)
1385 kthread_insert_work(worker, &fwork.work,
1386 worker->work_list.next);
1387 else
1388 noop = true;
1389
1390 raw_spin_unlock_irq(&worker->lock);
1391
1392 if (!noop)
1393 wait_for_completion(&fwork.done);
1394 }
1395 EXPORT_SYMBOL_GPL(kthread_flush_work);
1396
1397 /*
1398 * Make sure that the timer is neither set nor running and could
1399 * not manipulate the work list_head any longer.
1400 *
1401 * The function is called under worker->lock. The lock is temporary
1402 * released but the timer can't be set again in the meantime.
1403 */
kthread_cancel_delayed_work_timer(struct kthread_work * work,unsigned long * flags)1404 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1405 unsigned long *flags)
1406 {
1407 struct kthread_delayed_work *dwork =
1408 container_of(work, struct kthread_delayed_work, work);
1409 struct kthread_worker *worker = work->worker;
1410
1411 /*
1412 * timer_delete_sync() must be called to make sure that the timer
1413 * callback is not running. The lock must be temporary released
1414 * to avoid a deadlock with the callback. In the meantime,
1415 * any queuing is blocked by setting the canceling counter.
1416 */
1417 work->canceling++;
1418 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1419 timer_delete_sync(&dwork->timer);
1420 raw_spin_lock_irqsave(&worker->lock, *flags);
1421 work->canceling--;
1422 }
1423
1424 /*
1425 * This function removes the work from the worker queue.
1426 *
1427 * It is called under worker->lock. The caller must make sure that
1428 * the timer used by delayed work is not running, e.g. by calling
1429 * kthread_cancel_delayed_work_timer().
1430 *
1431 * The work might still be in use when this function finishes. See the
1432 * current_work proceed by the worker.
1433 *
1434 * Return: %true if @work was pending and successfully canceled,
1435 * %false if @work was not pending
1436 */
__kthread_cancel_work(struct kthread_work * work)1437 static bool __kthread_cancel_work(struct kthread_work *work)
1438 {
1439 /*
1440 * Try to remove the work from a worker list. It might either
1441 * be from worker->work_list or from worker->delayed_work_list.
1442 */
1443 if (!list_empty(&work->node)) {
1444 list_del_init(&work->node);
1445 return true;
1446 }
1447
1448 return false;
1449 }
1450
1451 /**
1452 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1453 * @worker: kthread worker to use
1454 * @dwork: kthread delayed work to queue
1455 * @delay: number of jiffies to wait before queuing
1456 *
1457 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1458 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1459 * @work is guaranteed to be queued immediately.
1460 *
1461 * Return: %false if @dwork was idle and queued, %true otherwise.
1462 *
1463 * A special case is when the work is being canceled in parallel.
1464 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1465 * or yet another kthread_mod_delayed_work() call. We let the other command
1466 * win and return %true here. The return value can be used for reference
1467 * counting and the number of queued works stays the same. Anyway, the caller
1468 * is supposed to synchronize these operations a reasonable way.
1469 *
1470 * This function is safe to call from any context including IRQ handler.
1471 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1472 * for details.
1473 */
kthread_mod_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1474 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1475 struct kthread_delayed_work *dwork,
1476 unsigned long delay)
1477 {
1478 struct kthread_work *work = &dwork->work;
1479 unsigned long flags;
1480 int ret;
1481
1482 raw_spin_lock_irqsave(&worker->lock, flags);
1483
1484 /* Do not bother with canceling when never queued. */
1485 if (!work->worker) {
1486 ret = false;
1487 goto fast_queue;
1488 }
1489
1490 /* Work must not be used with >1 worker, see kthread_queue_work() */
1491 WARN_ON_ONCE(work->worker != worker);
1492
1493 /*
1494 * Temporary cancel the work but do not fight with another command
1495 * that is canceling the work as well.
1496 *
1497 * It is a bit tricky because of possible races with another
1498 * mod_delayed_work() and cancel_delayed_work() callers.
1499 *
1500 * The timer must be canceled first because worker->lock is released
1501 * when doing so. But the work can be removed from the queue (list)
1502 * only when it can be queued again so that the return value can
1503 * be used for reference counting.
1504 */
1505 kthread_cancel_delayed_work_timer(work, &flags);
1506 if (work->canceling) {
1507 /* The number of works in the queue does not change. */
1508 ret = true;
1509 goto out;
1510 }
1511 ret = __kthread_cancel_work(work);
1512
1513 fast_queue:
1514 __kthread_queue_delayed_work(worker, dwork, delay);
1515 out:
1516 raw_spin_unlock_irqrestore(&worker->lock, flags);
1517 return ret;
1518 }
1519 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1520
__kthread_cancel_work_sync(struct kthread_work * work,bool is_dwork)1521 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1522 {
1523 struct kthread_worker *worker = work->worker;
1524 unsigned long flags;
1525 int ret = false;
1526
1527 if (!worker)
1528 goto out;
1529
1530 raw_spin_lock_irqsave(&worker->lock, flags);
1531 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1532 WARN_ON_ONCE(work->worker != worker);
1533
1534 if (is_dwork)
1535 kthread_cancel_delayed_work_timer(work, &flags);
1536
1537 ret = __kthread_cancel_work(work);
1538
1539 if (worker->current_work != work)
1540 goto out_fast;
1541
1542 /*
1543 * The work is in progress and we need to wait with the lock released.
1544 * In the meantime, block any queuing by setting the canceling counter.
1545 */
1546 work->canceling++;
1547 raw_spin_unlock_irqrestore(&worker->lock, flags);
1548 kthread_flush_work(work);
1549 raw_spin_lock_irqsave(&worker->lock, flags);
1550 work->canceling--;
1551
1552 out_fast:
1553 raw_spin_unlock_irqrestore(&worker->lock, flags);
1554 out:
1555 return ret;
1556 }
1557
1558 /**
1559 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1560 * @work: the kthread work to cancel
1561 *
1562 * Cancel @work and wait for its execution to finish. This function
1563 * can be used even if the work re-queues itself. On return from this
1564 * function, @work is guaranteed to be not pending or executing on any CPU.
1565 *
1566 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1567 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1568 *
1569 * The caller must ensure that the worker on which @work was last
1570 * queued can't be destroyed before this function returns.
1571 *
1572 * Return: %true if @work was pending, %false otherwise.
1573 */
kthread_cancel_work_sync(struct kthread_work * work)1574 bool kthread_cancel_work_sync(struct kthread_work *work)
1575 {
1576 return __kthread_cancel_work_sync(work, false);
1577 }
1578 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1579
1580 /**
1581 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1582 * wait for it to finish.
1583 * @dwork: the kthread delayed work to cancel
1584 *
1585 * This is kthread_cancel_work_sync() for delayed works.
1586 *
1587 * Return: %true if @dwork was pending, %false otherwise.
1588 */
kthread_cancel_delayed_work_sync(struct kthread_delayed_work * dwork)1589 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1590 {
1591 return __kthread_cancel_work_sync(&dwork->work, true);
1592 }
1593 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1594
1595 /**
1596 * kthread_flush_worker - flush all current works on a kthread_worker
1597 * @worker: worker to flush
1598 *
1599 * Wait until all currently executing or pending works on @worker are
1600 * finished.
1601 */
kthread_flush_worker(struct kthread_worker * worker)1602 void kthread_flush_worker(struct kthread_worker *worker)
1603 {
1604 struct kthread_flush_work fwork = {
1605 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1606 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1607 };
1608
1609 kthread_queue_work(worker, &fwork.work);
1610 wait_for_completion(&fwork.done);
1611 }
1612 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1613
1614 /**
1615 * kthread_destroy_worker - destroy a kthread worker
1616 * @worker: worker to be destroyed
1617 *
1618 * Flush and destroy @worker. The simple flush is enough because the kthread
1619 * worker API is used only in trivial scenarios. There are no multi-step state
1620 * machines needed.
1621 *
1622 * Note that this function is not responsible for handling delayed work, so
1623 * caller should be responsible for queuing or canceling all delayed work items
1624 * before invoke this function.
1625 */
kthread_destroy_worker(struct kthread_worker * worker)1626 void kthread_destroy_worker(struct kthread_worker *worker)
1627 {
1628 struct task_struct *task;
1629
1630 task = worker->task;
1631 if (WARN_ON(!task))
1632 return;
1633
1634 kthread_flush_worker(worker);
1635 kthread_stop(task);
1636 WARN_ON(!list_empty(&worker->delayed_work_list));
1637 WARN_ON(!list_empty(&worker->work_list));
1638 kfree(worker);
1639 }
1640 EXPORT_SYMBOL(kthread_destroy_worker);
1641
1642 /**
1643 * kthread_use_mm - make the calling kthread operate on an address space
1644 * @mm: address space to operate on
1645 */
kthread_use_mm(struct mm_struct * mm)1646 void kthread_use_mm(struct mm_struct *mm)
1647 {
1648 struct mm_struct *active_mm;
1649 struct task_struct *tsk = current;
1650
1651 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1652 WARN_ON_ONCE(tsk->mm);
1653 WARN_ON_ONCE(!mm->user_ns);
1654
1655 /*
1656 * It is possible for mm to be the same as tsk->active_mm, but
1657 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1658 * because these references are not equivalent.
1659 */
1660 mmgrab(mm);
1661
1662 task_lock(tsk);
1663 /* Hold off tlb flush IPIs while switching mm's */
1664 local_irq_disable();
1665 active_mm = tsk->active_mm;
1666 tsk->active_mm = mm;
1667 tsk->mm = mm;
1668 membarrier_update_current_mm(mm);
1669 switch_mm_irqs_off(active_mm, mm, tsk);
1670 local_irq_enable();
1671 task_unlock(tsk);
1672 #ifdef finish_arch_post_lock_switch
1673 finish_arch_post_lock_switch();
1674 #endif
1675
1676 /*
1677 * When a kthread starts operating on an address space, the loop
1678 * in membarrier_{private,global}_expedited() may not observe
1679 * that tsk->mm, and not issue an IPI. Membarrier requires a
1680 * memory barrier after storing to tsk->mm, before accessing
1681 * user-space memory. A full memory barrier for membarrier
1682 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1683 * mmdrop_lazy_tlb().
1684 */
1685 mmdrop_lazy_tlb(active_mm);
1686 }
1687 EXPORT_SYMBOL_GPL(kthread_use_mm);
1688
1689 /**
1690 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1691 * @mm: address space to operate on
1692 */
kthread_unuse_mm(struct mm_struct * mm)1693 void kthread_unuse_mm(struct mm_struct *mm)
1694 {
1695 struct task_struct *tsk = current;
1696
1697 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1698 WARN_ON_ONCE(!tsk->mm);
1699
1700 task_lock(tsk);
1701 /*
1702 * When a kthread stops operating on an address space, the loop
1703 * in membarrier_{private,global}_expedited() may not observe
1704 * that tsk->mm, and not issue an IPI. Membarrier requires a
1705 * memory barrier after accessing user-space memory, before
1706 * clearing tsk->mm.
1707 */
1708 smp_mb__after_spinlock();
1709 local_irq_disable();
1710 tsk->mm = NULL;
1711 membarrier_update_current_mm(NULL);
1712 mmgrab_lazy_tlb(mm);
1713 /* active_mm is still 'mm' */
1714 enter_lazy_tlb(mm, tsk);
1715 local_irq_enable();
1716 task_unlock(tsk);
1717
1718 mmdrop(mm);
1719 }
1720 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1721
1722 #ifdef CONFIG_BLK_CGROUP
1723 /**
1724 * kthread_associate_blkcg - associate blkcg to current kthread
1725 * @css: the cgroup info
1726 *
1727 * Current thread must be a kthread. The thread is running jobs on behalf of
1728 * other threads. In some cases, we expect the jobs attach cgroup info of
1729 * original threads instead of that of current thread. This function stores
1730 * original thread's cgroup info in current kthread context for later
1731 * retrieval.
1732 */
kthread_associate_blkcg(struct cgroup_subsys_state * css)1733 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1734 {
1735 struct kthread *kthread;
1736
1737 if (!(current->flags & PF_KTHREAD))
1738 return;
1739 kthread = to_kthread(current);
1740 if (!kthread)
1741 return;
1742
1743 if (kthread->blkcg_css) {
1744 css_put(kthread->blkcg_css);
1745 kthread->blkcg_css = NULL;
1746 }
1747 if (css) {
1748 css_get(css);
1749 kthread->blkcg_css = css;
1750 }
1751 }
1752 EXPORT_SYMBOL(kthread_associate_blkcg);
1753
1754 /**
1755 * kthread_blkcg - get associated blkcg css of current kthread
1756 *
1757 * Current thread must be a kthread.
1758 */
kthread_blkcg(void)1759 struct cgroup_subsys_state *kthread_blkcg(void)
1760 {
1761 struct kthread *kthread;
1762
1763 if (current->flags & PF_KTHREAD) {
1764 kthread = to_kthread(current);
1765 if (kthread)
1766 return kthread->blkcg_css;
1767 }
1768 return NULL;
1769 }
1770 #endif
1771