1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Kernel thread helper functions. 3 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 4 * Copyright (C) 2009 Red Hat, Inc. 5 * 6 * Creation is done via kthreadd, so that we get a clean environment 7 * even if we're invoked from userspace (think modprobe, hotplug cpu, 8 * etc.). 9 */ 10 #include <uapi/linux/sched/types.h> 11 #include <linux/mm.h> 12 #include <linux/mmu_context.h> 13 #include <linux/sched.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/task.h> 16 #include <linux/kthread.h> 17 #include <linux/completion.h> 18 #include <linux/err.h> 19 #include <linux/cgroup.h> 20 #include <linux/cpuset.h> 21 #include <linux/unistd.h> 22 #include <linux/file.h> 23 #include <linux/export.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/freezer.h> 27 #include <linux/ptrace.h> 28 #include <linux/uaccess.h> 29 #include <linux/numa.h> 30 #include <linux/sched/isolation.h> 31 #include <trace/events/sched.h> 32 33 34 static DEFINE_SPINLOCK(kthread_create_lock); 35 static LIST_HEAD(kthread_create_list); 36 struct task_struct *kthreadd_task; 37 38 static LIST_HEAD(kthread_affinity_list); 39 static DEFINE_MUTEX(kthread_affinity_lock); 40 41 struct kthread_create_info 42 { 43 /* Information passed to kthread() from kthreadd. */ 44 char *full_name; 45 int (*threadfn)(void *data); 46 void *data; 47 int node; 48 49 /* Result passed back to kthread_create() from kthreadd. */ 50 struct task_struct *result; 51 struct completion *done; 52 53 struct list_head list; 54 }; 55 56 struct kthread { 57 unsigned long flags; 58 unsigned int cpu; 59 unsigned int node; 60 int started; 61 int result; 62 int (*threadfn)(void *); 63 void *data; 64 struct completion parked; 65 struct completion exited; 66 #ifdef CONFIG_BLK_CGROUP 67 struct cgroup_subsys_state *blkcg_css; 68 #endif 69 /* To store the full name if task comm is truncated. */ 70 char *full_name; 71 struct task_struct *task; 72 struct list_head affinity_node; 73 struct cpumask *preferred_affinity; 74 }; 75 76 enum KTHREAD_BITS { 77 KTHREAD_IS_PER_CPU = 0, 78 KTHREAD_SHOULD_STOP, 79 KTHREAD_SHOULD_PARK, 80 }; 81 82 static inline struct kthread *to_kthread(struct task_struct *k) 83 { 84 WARN_ON(!(k->flags & PF_KTHREAD)); 85 return k->worker_private; 86 } 87 88 /* 89 * Variant of to_kthread() that doesn't assume @p is a kthread. 90 * 91 * When "(p->flags & PF_KTHREAD)" is set the task is a kthread and will 92 * always remain a kthread. For kthreads p->worker_private always 93 * points to a struct kthread. For tasks that are not kthreads 94 * p->worker_private is used to point to other things. 95 * 96 * Return NULL for any task that is not a kthread. 97 */ 98 static inline struct kthread *__to_kthread(struct task_struct *p) 99 { 100 void *kthread = p->worker_private; 101 if (kthread && !(p->flags & PF_KTHREAD)) 102 kthread = NULL; 103 return kthread; 104 } 105 106 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk) 107 { 108 struct kthread *kthread = to_kthread(tsk); 109 110 if (!kthread || !kthread->full_name) { 111 strscpy(buf, tsk->comm, buf_size); 112 return; 113 } 114 115 strscpy_pad(buf, kthread->full_name, buf_size); 116 } 117 118 bool set_kthread_struct(struct task_struct *p) 119 { 120 struct kthread *kthread; 121 122 if (WARN_ON_ONCE(to_kthread(p))) 123 return false; 124 125 kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); 126 if (!kthread) 127 return false; 128 129 init_completion(&kthread->exited); 130 init_completion(&kthread->parked); 131 INIT_LIST_HEAD(&kthread->affinity_node); 132 p->vfork_done = &kthread->exited; 133 134 kthread->task = p; 135 kthread->node = tsk_fork_get_node(current); 136 p->worker_private = kthread; 137 return true; 138 } 139 140 void free_kthread_struct(struct task_struct *k) 141 { 142 struct kthread *kthread; 143 144 /* 145 * Can be NULL if kmalloc() in set_kthread_struct() failed. 146 */ 147 kthread = to_kthread(k); 148 if (!kthread) 149 return; 150 151 #ifdef CONFIG_BLK_CGROUP 152 WARN_ON_ONCE(kthread->blkcg_css); 153 #endif 154 k->worker_private = NULL; 155 kfree(kthread->full_name); 156 kfree(kthread); 157 } 158 159 /** 160 * kthread_should_stop - should this kthread return now? 161 * 162 * When someone calls kthread_stop() on your kthread, it will be woken 163 * and this will return true. You should then return, and your return 164 * value will be passed through to kthread_stop(). 165 */ 166 bool kthread_should_stop(void) 167 { 168 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 169 } 170 EXPORT_SYMBOL(kthread_should_stop); 171 172 static bool __kthread_should_park(struct task_struct *k) 173 { 174 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); 175 } 176 177 /** 178 * kthread_should_park - should this kthread park now? 179 * 180 * When someone calls kthread_park() on your kthread, it will be woken 181 * and this will return true. You should then do the necessary 182 * cleanup and call kthread_parkme() 183 * 184 * Similar to kthread_should_stop(), but this keeps the thread alive 185 * and in a park position. kthread_unpark() "restarts" the thread and 186 * calls the thread function again. 187 */ 188 bool kthread_should_park(void) 189 { 190 return __kthread_should_park(current); 191 } 192 EXPORT_SYMBOL_GPL(kthread_should_park); 193 194 bool kthread_should_stop_or_park(void) 195 { 196 struct kthread *kthread = __to_kthread(current); 197 198 if (!kthread) 199 return false; 200 201 return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK)); 202 } 203 204 /** 205 * kthread_freezable_should_stop - should this freezable kthread return now? 206 * @was_frozen: optional out parameter, indicates whether %current was frozen 207 * 208 * kthread_should_stop() for freezable kthreads, which will enter 209 * refrigerator if necessary. This function is safe from kthread_stop() / 210 * freezer deadlock and freezable kthreads should use this function instead 211 * of calling try_to_freeze() directly. 212 */ 213 bool kthread_freezable_should_stop(bool *was_frozen) 214 { 215 bool frozen = false; 216 217 might_sleep(); 218 219 if (unlikely(freezing(current))) 220 frozen = __refrigerator(true); 221 222 if (was_frozen) 223 *was_frozen = frozen; 224 225 return kthread_should_stop(); 226 } 227 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 228 229 /** 230 * kthread_func - return the function specified on kthread creation 231 * @task: kthread task in question 232 * 233 * Returns NULL if the task is not a kthread. 234 */ 235 void *kthread_func(struct task_struct *task) 236 { 237 struct kthread *kthread = __to_kthread(task); 238 if (kthread) 239 return kthread->threadfn; 240 return NULL; 241 } 242 EXPORT_SYMBOL_GPL(kthread_func); 243 244 /** 245 * kthread_data - return data value specified on kthread creation 246 * @task: kthread task in question 247 * 248 * Return the data value specified when kthread @task was created. 249 * The caller is responsible for ensuring the validity of @task when 250 * calling this function. 251 */ 252 void *kthread_data(struct task_struct *task) 253 { 254 return to_kthread(task)->data; 255 } 256 EXPORT_SYMBOL_GPL(kthread_data); 257 258 /** 259 * kthread_probe_data - speculative version of kthread_data() 260 * @task: possible kthread task in question 261 * 262 * @task could be a kthread task. Return the data value specified when it 263 * was created if accessible. If @task isn't a kthread task or its data is 264 * inaccessible for any reason, %NULL is returned. This function requires 265 * that @task itself is safe to dereference. 266 */ 267 void *kthread_probe_data(struct task_struct *task) 268 { 269 struct kthread *kthread = __to_kthread(task); 270 void *data = NULL; 271 272 if (kthread) 273 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); 274 return data; 275 } 276 277 static void __kthread_parkme(struct kthread *self) 278 { 279 for (;;) { 280 /* 281 * TASK_PARKED is a special state; we must serialize against 282 * possible pending wakeups to avoid store-store collisions on 283 * task->state. 284 * 285 * Such a collision might possibly result in the task state 286 * changin from TASK_PARKED and us failing the 287 * wait_task_inactive() in kthread_park(). 288 */ 289 set_special_state(TASK_PARKED); 290 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 291 break; 292 293 /* 294 * Thread is going to call schedule(), do not preempt it, 295 * or the caller of kthread_park() may spend more time in 296 * wait_task_inactive(). 297 */ 298 preempt_disable(); 299 complete(&self->parked); 300 schedule_preempt_disabled(); 301 preempt_enable(); 302 } 303 __set_current_state(TASK_RUNNING); 304 } 305 306 void kthread_parkme(void) 307 { 308 __kthread_parkme(to_kthread(current)); 309 } 310 EXPORT_SYMBOL_GPL(kthread_parkme); 311 312 /** 313 * kthread_exit - Cause the current kthread return @result to kthread_stop(). 314 * @result: The integer value to return to kthread_stop(). 315 * 316 * While kthread_exit can be called directly, it exists so that 317 * functions which do some additional work in non-modular code such as 318 * module_put_and_kthread_exit can be implemented. 319 * 320 * Does not return. 321 */ 322 void __noreturn kthread_exit(long result) 323 { 324 struct kthread *kthread = to_kthread(current); 325 kthread->result = result; 326 if (!list_empty(&kthread->affinity_node)) { 327 mutex_lock(&kthread_affinity_lock); 328 list_del(&kthread->affinity_node); 329 mutex_unlock(&kthread_affinity_lock); 330 331 if (kthread->preferred_affinity) { 332 kfree(kthread->preferred_affinity); 333 kthread->preferred_affinity = NULL; 334 } 335 } 336 do_exit(0); 337 } 338 EXPORT_SYMBOL(kthread_exit); 339 340 /** 341 * kthread_complete_and_exit - Exit the current kthread. 342 * @comp: Completion to complete 343 * @code: The integer value to return to kthread_stop(). 344 * 345 * If present, complete @comp and then return code to kthread_stop(). 346 * 347 * A kernel thread whose module may be removed after the completion of 348 * @comp can use this function to exit safely. 349 * 350 * Does not return. 351 */ 352 void __noreturn kthread_complete_and_exit(struct completion *comp, long code) 353 { 354 if (comp) 355 complete(comp); 356 357 kthread_exit(code); 358 } 359 EXPORT_SYMBOL(kthread_complete_and_exit); 360 361 static void kthread_fetch_affinity(struct kthread *kthread, struct cpumask *cpumask) 362 { 363 const struct cpumask *pref; 364 365 guard(rcu)(); 366 367 if (kthread->preferred_affinity) { 368 pref = kthread->preferred_affinity; 369 } else { 370 if (kthread->node == NUMA_NO_NODE) 371 pref = housekeeping_cpumask(HK_TYPE_DOMAIN); 372 else 373 pref = cpumask_of_node(kthread->node); 374 } 375 376 cpumask_and(cpumask, pref, housekeeping_cpumask(HK_TYPE_DOMAIN)); 377 if (cpumask_empty(cpumask)) 378 cpumask_copy(cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN)); 379 } 380 381 static void kthread_affine_node(void) 382 { 383 struct kthread *kthread = to_kthread(current); 384 cpumask_var_t affinity; 385 386 if (WARN_ON_ONCE(kthread_is_per_cpu(current))) 387 return; 388 389 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) { 390 WARN_ON_ONCE(1); 391 return; 392 } 393 394 mutex_lock(&kthread_affinity_lock); 395 WARN_ON_ONCE(!list_empty(&kthread->affinity_node)); 396 list_add_tail(&kthread->affinity_node, &kthread_affinity_list); 397 /* 398 * The node cpumask is racy when read from kthread() but: 399 * - a racing CPU going down will either fail on the subsequent 400 * call to set_cpus_allowed_ptr() or be migrated to housekeepers 401 * afterwards by the scheduler. 402 * - a racing CPU going up will be handled by kthreads_online_cpu() 403 */ 404 kthread_fetch_affinity(kthread, affinity); 405 set_cpus_allowed_ptr(current, affinity); 406 mutex_unlock(&kthread_affinity_lock); 407 408 free_cpumask_var(affinity); 409 } 410 411 static int kthread(void *_create) 412 { 413 static const struct sched_param param = { .sched_priority = 0 }; 414 /* Copy data: it's on kthread's stack */ 415 struct kthread_create_info *create = _create; 416 int (*threadfn)(void *data) = create->threadfn; 417 void *data = create->data; 418 struct completion *done; 419 struct kthread *self; 420 int ret; 421 422 self = to_kthread(current); 423 424 /* Release the structure when caller killed by a fatal signal. */ 425 done = xchg(&create->done, NULL); 426 if (!done) { 427 kfree(create->full_name); 428 kfree(create); 429 kthread_exit(-EINTR); 430 } 431 432 self->full_name = create->full_name; 433 self->threadfn = threadfn; 434 self->data = data; 435 436 /* 437 * The new thread inherited kthreadd's priority and CPU mask. Reset 438 * back to default in case they have been changed. 439 */ 440 sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m); 441 442 /* OK, tell user we're spawned, wait for stop or wakeup */ 443 __set_current_state(TASK_UNINTERRUPTIBLE); 444 create->result = current; 445 /* 446 * Thread is going to call schedule(), do not preempt it, 447 * or the creator may spend more time in wait_task_inactive(). 448 */ 449 preempt_disable(); 450 complete(done); 451 schedule_preempt_disabled(); 452 preempt_enable(); 453 454 self->started = 1; 455 456 /* 457 * Apply default node affinity if no call to kthread_bind[_mask]() nor 458 * kthread_affine_preferred() was issued before the first wake-up. 459 */ 460 if (!(current->flags & PF_NO_SETAFFINITY) && !self->preferred_affinity) 461 kthread_affine_node(); 462 463 ret = -EINTR; 464 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 465 cgroup_kthread_ready(); 466 __kthread_parkme(self); 467 ret = threadfn(data); 468 } 469 kthread_exit(ret); 470 } 471 472 /* called from kernel_clone() to get node information for about to be created task */ 473 int tsk_fork_get_node(struct task_struct *tsk) 474 { 475 #ifdef CONFIG_NUMA 476 if (tsk == kthreadd_task) 477 return tsk->pref_node_fork; 478 #endif 479 return NUMA_NO_NODE; 480 } 481 482 static void create_kthread(struct kthread_create_info *create) 483 { 484 int pid; 485 486 #ifdef CONFIG_NUMA 487 current->pref_node_fork = create->node; 488 #endif 489 /* We want our own signal handler (we take no signals by default). */ 490 pid = kernel_thread(kthread, create, create->full_name, 491 CLONE_FS | CLONE_FILES | SIGCHLD); 492 if (pid < 0) { 493 /* Release the structure when caller killed by a fatal signal. */ 494 struct completion *done = xchg(&create->done, NULL); 495 496 kfree(create->full_name); 497 if (!done) { 498 kfree(create); 499 return; 500 } 501 create->result = ERR_PTR(pid); 502 complete(done); 503 } 504 } 505 506 static __printf(4, 0) 507 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), 508 void *data, int node, 509 const char namefmt[], 510 va_list args) 511 { 512 DECLARE_COMPLETION_ONSTACK(done); 513 struct task_struct *task; 514 struct kthread_create_info *create = kmalloc(sizeof(*create), 515 GFP_KERNEL); 516 517 if (!create) 518 return ERR_PTR(-ENOMEM); 519 create->threadfn = threadfn; 520 create->data = data; 521 create->node = node; 522 create->done = &done; 523 create->full_name = kvasprintf(GFP_KERNEL, namefmt, args); 524 if (!create->full_name) { 525 task = ERR_PTR(-ENOMEM); 526 goto free_create; 527 } 528 529 spin_lock(&kthread_create_lock); 530 list_add_tail(&create->list, &kthread_create_list); 531 spin_unlock(&kthread_create_lock); 532 533 wake_up_process(kthreadd_task); 534 /* 535 * Wait for completion in killable state, for I might be chosen by 536 * the OOM killer while kthreadd is trying to allocate memory for 537 * new kernel thread. 538 */ 539 if (unlikely(wait_for_completion_killable(&done))) { 540 /* 541 * If I was killed by a fatal signal before kthreadd (or new 542 * kernel thread) calls complete(), leave the cleanup of this 543 * structure to that thread. 544 */ 545 if (xchg(&create->done, NULL)) 546 return ERR_PTR(-EINTR); 547 /* 548 * kthreadd (or new kernel thread) will call complete() 549 * shortly. 550 */ 551 wait_for_completion(&done); 552 } 553 task = create->result; 554 free_create: 555 kfree(create); 556 return task; 557 } 558 559 /** 560 * kthread_create_on_node - create a kthread. 561 * @threadfn: the function to run until signal_pending(current). 562 * @data: data ptr for @threadfn. 563 * @node: task and thread structures for the thread are allocated on this node 564 * @namefmt: printf-style name for the thread. 565 * 566 * Description: This helper function creates and names a kernel 567 * thread. The thread will be stopped: use wake_up_process() to start 568 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and 569 * is affine to all CPUs. 570 * 571 * If thread is going to be bound on a particular cpu, give its node 572 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. 573 * When woken, the thread will run @threadfn() with @data as its 574 * argument. @threadfn() can either return directly if it is a 575 * standalone thread for which no one will call kthread_stop(), or 576 * return when 'kthread_should_stop()' is true (which means 577 * kthread_stop() has been called). The return value should be zero 578 * or a negative error number; it will be passed to kthread_stop(). 579 * 580 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). 581 */ 582 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 583 void *data, int node, 584 const char namefmt[], 585 ...) 586 { 587 struct task_struct *task; 588 va_list args; 589 590 va_start(args, namefmt); 591 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 592 va_end(args); 593 594 return task; 595 } 596 EXPORT_SYMBOL(kthread_create_on_node); 597 598 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state) 599 { 600 if (!wait_task_inactive(p, state)) { 601 WARN_ON(1); 602 return; 603 } 604 605 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) 606 set_cpus_allowed_force(p, mask); 607 608 /* It's safe because the task is inactive. */ 609 p->flags |= PF_NO_SETAFFINITY; 610 } 611 612 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state) 613 { 614 __kthread_bind_mask(p, cpumask_of(cpu), state); 615 } 616 617 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) 618 { 619 struct kthread *kthread = to_kthread(p); 620 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 621 WARN_ON_ONCE(kthread->started); 622 } 623 624 /** 625 * kthread_bind - bind a just-created kthread to a cpu. 626 * @p: thread created by kthread_create(). 627 * @cpu: cpu (might not be online, must be possible) for @k to run on. 628 * 629 * Description: This function is equivalent to set_cpus_allowed(), 630 * except that @cpu doesn't need to be online, and the thread must be 631 * stopped (i.e., just returned from kthread_create()). 632 */ 633 void kthread_bind(struct task_struct *p, unsigned int cpu) 634 { 635 struct kthread *kthread = to_kthread(p); 636 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); 637 WARN_ON_ONCE(kthread->started); 638 } 639 EXPORT_SYMBOL(kthread_bind); 640 641 /** 642 * kthread_create_on_cpu - Create a cpu bound kthread 643 * @threadfn: the function to run until signal_pending(current). 644 * @data: data ptr for @threadfn. 645 * @cpu: The cpu on which the thread should be bound, 646 * @namefmt: printf-style name for the thread. Format is restricted 647 * to "name.*%u". Code fills in cpu number. 648 * 649 * Description: This helper function creates and names a kernel thread 650 */ 651 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 652 void *data, unsigned int cpu, 653 const char *namefmt) 654 { 655 struct task_struct *p; 656 657 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 658 cpu); 659 if (IS_ERR(p)) 660 return p; 661 kthread_bind(p, cpu); 662 /* CPU hotplug need to bind once again when unparking the thread. */ 663 to_kthread(p)->cpu = cpu; 664 return p; 665 } 666 EXPORT_SYMBOL(kthread_create_on_cpu); 667 668 void kthread_set_per_cpu(struct task_struct *k, int cpu) 669 { 670 struct kthread *kthread = to_kthread(k); 671 if (!kthread) 672 return; 673 674 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); 675 676 if (cpu < 0) { 677 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 678 return; 679 } 680 681 kthread->cpu = cpu; 682 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 683 } 684 685 bool kthread_is_per_cpu(struct task_struct *p) 686 { 687 struct kthread *kthread = __to_kthread(p); 688 if (!kthread) 689 return false; 690 691 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 692 } 693 694 /** 695 * kthread_unpark - unpark a thread created by kthread_create(). 696 * @k: thread created by kthread_create(). 697 * 698 * Sets kthread_should_park() for @k to return false, wakes it, and 699 * waits for it to return. If the thread is marked percpu then its 700 * bound to the cpu again. 701 */ 702 void kthread_unpark(struct task_struct *k) 703 { 704 struct kthread *kthread = to_kthread(k); 705 706 if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)) 707 return; 708 /* 709 * Newly created kthread was parked when the CPU was offline. 710 * The binding was lost and we need to set it again. 711 */ 712 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 713 __kthread_bind(k, kthread->cpu, TASK_PARKED); 714 715 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 716 /* 717 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. 718 */ 719 wake_up_state(k, TASK_PARKED); 720 } 721 EXPORT_SYMBOL_GPL(kthread_unpark); 722 723 /** 724 * kthread_park - park a thread created by kthread_create(). 725 * @k: thread created by kthread_create(). 726 * 727 * Sets kthread_should_park() for @k to return true, wakes it, and 728 * waits for it to return. This can also be called after kthread_create() 729 * instead of calling wake_up_process(): the thread will park without 730 * calling threadfn(). 731 * 732 * Returns 0 if the thread is parked, -ENOSYS if the thread exited. 733 * If called by the kthread itself just the park bit is set. 734 */ 735 int kthread_park(struct task_struct *k) 736 { 737 struct kthread *kthread = to_kthread(k); 738 739 if (WARN_ON(k->flags & PF_EXITING)) 740 return -ENOSYS; 741 742 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) 743 return -EBUSY; 744 745 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 746 if (k != current) { 747 wake_up_process(k); 748 /* 749 * Wait for __kthread_parkme() to complete(), this means we 750 * _will_ have TASK_PARKED and are about to call schedule(). 751 */ 752 wait_for_completion(&kthread->parked); 753 /* 754 * Now wait for that schedule() to complete and the task to 755 * get scheduled out. 756 */ 757 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); 758 } 759 760 return 0; 761 } 762 EXPORT_SYMBOL_GPL(kthread_park); 763 764 /** 765 * kthread_stop - stop a thread created by kthread_create(). 766 * @k: thread created by kthread_create(). 767 * 768 * Sets kthread_should_stop() for @k to return true, wakes it, and 769 * waits for it to exit. This can also be called after kthread_create() 770 * instead of calling wake_up_process(): the thread will exit without 771 * calling threadfn(). 772 * 773 * If threadfn() may call kthread_exit() itself, the caller must ensure 774 * task_struct can't go away. 775 * 776 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 777 * was never called. 778 */ 779 int kthread_stop(struct task_struct *k) 780 { 781 struct kthread *kthread; 782 int ret; 783 784 trace_sched_kthread_stop(k); 785 786 get_task_struct(k); 787 kthread = to_kthread(k); 788 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 789 kthread_unpark(k); 790 set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL); 791 wake_up_process(k); 792 wait_for_completion(&kthread->exited); 793 ret = kthread->result; 794 put_task_struct(k); 795 796 trace_sched_kthread_stop_ret(ret); 797 return ret; 798 } 799 EXPORT_SYMBOL(kthread_stop); 800 801 /** 802 * kthread_stop_put - stop a thread and put its task struct 803 * @k: thread created by kthread_create(). 804 * 805 * Stops a thread created by kthread_create() and put its task_struct. 806 * Only use when holding an extra task struct reference obtained by 807 * calling get_task_struct(). 808 */ 809 int kthread_stop_put(struct task_struct *k) 810 { 811 int ret; 812 813 ret = kthread_stop(k); 814 put_task_struct(k); 815 return ret; 816 } 817 EXPORT_SYMBOL(kthread_stop_put); 818 819 int kthreadd(void *unused) 820 { 821 static const char comm[TASK_COMM_LEN] = "kthreadd"; 822 struct task_struct *tsk = current; 823 824 /* Setup a clean context for our children to inherit. */ 825 set_task_comm(tsk, comm); 826 ignore_signals(tsk); 827 set_mems_allowed(node_states[N_MEMORY]); 828 829 current->flags |= PF_NOFREEZE; 830 cgroup_init_kthreadd(); 831 832 kthread_affine_node(); 833 834 for (;;) { 835 set_current_state(TASK_INTERRUPTIBLE); 836 if (list_empty(&kthread_create_list)) 837 schedule(); 838 __set_current_state(TASK_RUNNING); 839 840 spin_lock(&kthread_create_lock); 841 while (!list_empty(&kthread_create_list)) { 842 struct kthread_create_info *create; 843 844 create = list_entry(kthread_create_list.next, 845 struct kthread_create_info, list); 846 list_del_init(&create->list); 847 spin_unlock(&kthread_create_lock); 848 849 create_kthread(create); 850 851 spin_lock(&kthread_create_lock); 852 } 853 spin_unlock(&kthread_create_lock); 854 } 855 856 return 0; 857 } 858 859 /** 860 * kthread_affine_preferred - Define a kthread's preferred affinity 861 * @p: thread created by kthread_create(). 862 * @mask: preferred mask of CPUs (might not be online, must be possible) for @p 863 * to run on. 864 * 865 * Similar to kthread_bind_mask() except that the affinity is not a requirement 866 * but rather a preference that can be constrained by CPU isolation or CPU hotplug. 867 * Must be called before the first wakeup of the kthread. 868 * 869 * Returns 0 if the affinity has been applied. 870 */ 871 int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask) 872 { 873 struct kthread *kthread = to_kthread(p); 874 cpumask_var_t affinity; 875 int ret = 0; 876 877 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE) || kthread->started) { 878 WARN_ON(1); 879 return -EINVAL; 880 } 881 882 WARN_ON_ONCE(kthread->preferred_affinity); 883 884 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) 885 return -ENOMEM; 886 887 kthread->preferred_affinity = kzalloc(sizeof(struct cpumask), GFP_KERNEL); 888 if (!kthread->preferred_affinity) { 889 ret = -ENOMEM; 890 goto out; 891 } 892 893 mutex_lock(&kthread_affinity_lock); 894 cpumask_copy(kthread->preferred_affinity, mask); 895 WARN_ON_ONCE(!list_empty(&kthread->affinity_node)); 896 list_add_tail(&kthread->affinity_node, &kthread_affinity_list); 897 kthread_fetch_affinity(kthread, affinity); 898 899 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) 900 set_cpus_allowed_force(p, affinity); 901 902 mutex_unlock(&kthread_affinity_lock); 903 out: 904 free_cpumask_var(affinity); 905 906 return ret; 907 } 908 EXPORT_SYMBOL_GPL(kthread_affine_preferred); 909 910 static int kthreads_update_affinity(bool force) 911 { 912 cpumask_var_t affinity; 913 struct kthread *k; 914 int ret; 915 916 guard(mutex)(&kthread_affinity_lock); 917 918 if (list_empty(&kthread_affinity_list)) 919 return 0; 920 921 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) 922 return -ENOMEM; 923 924 ret = 0; 925 926 list_for_each_entry(k, &kthread_affinity_list, affinity_node) { 927 if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) || 928 kthread_is_per_cpu(k->task))) { 929 ret = -EINVAL; 930 continue; 931 } 932 933 /* 934 * Unbound kthreads without preferred affinity are already affine 935 * to housekeeping, whether those CPUs are online or not. So no need 936 * to handle newly online CPUs for them. However housekeeping changes 937 * have to be applied. 938 * 939 * But kthreads with a preferred affinity or node are different: 940 * if none of their preferred CPUs are online and part of 941 * housekeeping at the same time, they must be affine to housekeeping. 942 * But as soon as one of their preferred CPU becomes online, they must 943 * be affine to them. 944 */ 945 if (force || k->preferred_affinity || k->node != NUMA_NO_NODE) { 946 kthread_fetch_affinity(k, affinity); 947 set_cpus_allowed_ptr(k->task, affinity); 948 } 949 } 950 951 free_cpumask_var(affinity); 952 953 return ret; 954 } 955 956 /** 957 * kthreads_update_housekeeping - Update kthreads affinity on cpuset change 958 * 959 * When cpuset changes a partition type to/from "isolated" or updates related 960 * cpumasks, propagate the housekeeping cpumask change to preferred kthreads 961 * affinity. 962 * 963 * Returns 0 if successful, -ENOMEM if temporary mask couldn't 964 * be allocated or -EINVAL in case of internal error. 965 */ 966 int kthreads_update_housekeeping(void) 967 { 968 return kthreads_update_affinity(true); 969 } 970 971 /* 972 * Re-affine kthreads according to their preferences 973 * and the newly online CPU. The CPU down part is handled 974 * by select_fallback_rq() which default re-affines to 975 * housekeepers from other nodes in case the preferred 976 * affinity doesn't apply anymore. 977 */ 978 static int kthreads_online_cpu(unsigned int cpu) 979 { 980 return kthreads_update_affinity(false); 981 } 982 983 static int kthreads_init(void) 984 { 985 return cpuhp_setup_state(CPUHP_AP_KTHREADS_ONLINE, "kthreads:online", 986 kthreads_online_cpu, NULL); 987 } 988 early_initcall(kthreads_init); 989 990 void __kthread_init_worker(struct kthread_worker *worker, 991 const char *name, 992 struct lock_class_key *key) 993 { 994 memset(worker, 0, sizeof(struct kthread_worker)); 995 raw_spin_lock_init(&worker->lock); 996 lockdep_set_class_and_name(&worker->lock, key, name); 997 INIT_LIST_HEAD(&worker->work_list); 998 INIT_LIST_HEAD(&worker->delayed_work_list); 999 } 1000 EXPORT_SYMBOL_GPL(__kthread_init_worker); 1001 1002 /** 1003 * kthread_worker_fn - kthread function to process kthread_worker 1004 * @worker_ptr: pointer to initialized kthread_worker 1005 * 1006 * This function implements the main cycle of kthread worker. It processes 1007 * work_list until it is stopped with kthread_stop(). It sleeps when the queue 1008 * is empty. 1009 * 1010 * The works are not allowed to keep any locks, disable preemption or interrupts 1011 * when they finish. There is defined a safe point for freezing when one work 1012 * finishes and before a new one is started. 1013 * 1014 * Also the works must not be handled by more than one worker at the same time, 1015 * see also kthread_queue_work(). 1016 */ 1017 int kthread_worker_fn(void *worker_ptr) 1018 { 1019 struct kthread_worker *worker = worker_ptr; 1020 struct kthread_work *work; 1021 1022 /* 1023 * FIXME: Update the check and remove the assignment when all kthread 1024 * worker users are created using kthread_create_worker*() functions. 1025 */ 1026 WARN_ON(worker->task && worker->task != current); 1027 worker->task = current; 1028 1029 if (worker->flags & KTW_FREEZABLE) 1030 set_freezable(); 1031 1032 repeat: 1033 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 1034 1035 if (kthread_should_stop()) { 1036 __set_current_state(TASK_RUNNING); 1037 raw_spin_lock_irq(&worker->lock); 1038 worker->task = NULL; 1039 raw_spin_unlock_irq(&worker->lock); 1040 return 0; 1041 } 1042 1043 work = NULL; 1044 raw_spin_lock_irq(&worker->lock); 1045 if (!list_empty(&worker->work_list)) { 1046 work = list_first_entry(&worker->work_list, 1047 struct kthread_work, node); 1048 list_del_init(&work->node); 1049 } 1050 worker->current_work = work; 1051 raw_spin_unlock_irq(&worker->lock); 1052 1053 if (work) { 1054 kthread_work_func_t func = work->func; 1055 __set_current_state(TASK_RUNNING); 1056 trace_sched_kthread_work_execute_start(work); 1057 work->func(work); 1058 /* 1059 * Avoid dereferencing work after this point. The trace 1060 * event only cares about the address. 1061 */ 1062 trace_sched_kthread_work_execute_end(work, func); 1063 } else if (!freezing(current)) { 1064 schedule(); 1065 } else { 1066 /* 1067 * Handle the case where the current remains 1068 * TASK_INTERRUPTIBLE. try_to_freeze() expects 1069 * the current to be TASK_RUNNING. 1070 */ 1071 __set_current_state(TASK_RUNNING); 1072 } 1073 1074 try_to_freeze(); 1075 cond_resched(); 1076 goto repeat; 1077 } 1078 EXPORT_SYMBOL_GPL(kthread_worker_fn); 1079 1080 static __printf(3, 0) struct kthread_worker * 1081 __kthread_create_worker_on_node(unsigned int flags, int node, 1082 const char namefmt[], va_list args) 1083 { 1084 struct kthread_worker *worker; 1085 struct task_struct *task; 1086 1087 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 1088 if (!worker) 1089 return ERR_PTR(-ENOMEM); 1090 1091 kthread_init_worker(worker); 1092 1093 task = __kthread_create_on_node(kthread_worker_fn, worker, 1094 node, namefmt, args); 1095 if (IS_ERR(task)) 1096 goto fail_task; 1097 1098 worker->flags = flags; 1099 worker->task = task; 1100 1101 return worker; 1102 1103 fail_task: 1104 kfree(worker); 1105 return ERR_CAST(task); 1106 } 1107 1108 /** 1109 * kthread_create_worker_on_node - create a kthread worker 1110 * @flags: flags modifying the default behavior of the worker 1111 * @node: task structure for the thread is allocated on this node 1112 * @namefmt: printf-style name for the kthread worker (task). 1113 * 1114 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 1115 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 1116 * when the caller was killed by a fatal signal. 1117 */ 1118 struct kthread_worker * 1119 kthread_create_worker_on_node(unsigned int flags, int node, const char namefmt[], ...) 1120 { 1121 struct kthread_worker *worker; 1122 va_list args; 1123 1124 va_start(args, namefmt); 1125 worker = __kthread_create_worker_on_node(flags, node, namefmt, args); 1126 va_end(args); 1127 1128 return worker; 1129 } 1130 EXPORT_SYMBOL(kthread_create_worker_on_node); 1131 1132 /** 1133 * kthread_create_worker_on_cpu - create a kthread worker and bind it 1134 * to a given CPU and the associated NUMA node. 1135 * @cpu: CPU number 1136 * @flags: flags modifying the default behavior of the worker 1137 * @namefmt: printf-style name for the thread. Format is restricted 1138 * to "name.*%u". Code fills in cpu number. 1139 * 1140 * Use a valid CPU number if you want to bind the kthread worker 1141 * to the given CPU and the associated NUMA node. 1142 * 1143 * A good practice is to add the cpu number also into the worker name. 1144 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). 1145 * 1146 * CPU hotplug: 1147 * The kthread worker API is simple and generic. It just provides a way 1148 * to create, use, and destroy workers. 1149 * 1150 * It is up to the API user how to handle CPU hotplug. They have to decide 1151 * how to handle pending work items, prevent queuing new ones, and 1152 * restore the functionality when the CPU goes off and on. There are a 1153 * few catches: 1154 * 1155 * - CPU affinity gets lost when it is scheduled on an offline CPU. 1156 * 1157 * - The worker might not exist when the CPU was off when the user 1158 * created the workers. 1159 * 1160 * Good practice is to implement two CPU hotplug callbacks and to 1161 * destroy/create the worker when the CPU goes down/up. 1162 * 1163 * Return: 1164 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 1165 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 1166 * when the caller was killed by a fatal signal. 1167 */ 1168 struct kthread_worker * 1169 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 1170 const char namefmt[]) 1171 { 1172 struct kthread_worker *worker; 1173 1174 worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu); 1175 if (!IS_ERR(worker)) 1176 kthread_bind(worker->task, cpu); 1177 1178 return worker; 1179 } 1180 EXPORT_SYMBOL(kthread_create_worker_on_cpu); 1181 1182 /* 1183 * Returns true when the work could not be queued at the moment. 1184 * It happens when it is already pending in a worker list 1185 * or when it is being cancelled. 1186 */ 1187 static inline bool queuing_blocked(struct kthread_worker *worker, 1188 struct kthread_work *work) 1189 { 1190 lockdep_assert_held(&worker->lock); 1191 1192 return !list_empty(&work->node) || work->canceling; 1193 } 1194 1195 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, 1196 struct kthread_work *work) 1197 { 1198 lockdep_assert_held(&worker->lock); 1199 WARN_ON_ONCE(!list_empty(&work->node)); 1200 /* Do not use a work with >1 worker, see kthread_queue_work() */ 1201 WARN_ON_ONCE(work->worker && work->worker != worker); 1202 } 1203 1204 /* insert @work before @pos in @worker */ 1205 static void kthread_insert_work(struct kthread_worker *worker, 1206 struct kthread_work *work, 1207 struct list_head *pos) 1208 { 1209 kthread_insert_work_sanity_check(worker, work); 1210 1211 trace_sched_kthread_work_queue_work(worker, work); 1212 1213 list_add_tail(&work->node, pos); 1214 work->worker = worker; 1215 if (!worker->current_work && likely(worker->task)) 1216 wake_up_process(worker->task); 1217 } 1218 1219 /** 1220 * kthread_queue_work - queue a kthread_work 1221 * @worker: target kthread_worker 1222 * @work: kthread_work to queue 1223 * 1224 * Queue @work to work processor @task for async execution. @task 1225 * must have been created with kthread_create_worker(). Returns %true 1226 * if @work was successfully queued, %false if it was already pending. 1227 * 1228 * Reinitialize the work if it needs to be used by another worker. 1229 * For example, when the worker was stopped and started again. 1230 */ 1231 bool kthread_queue_work(struct kthread_worker *worker, 1232 struct kthread_work *work) 1233 { 1234 bool ret = false; 1235 unsigned long flags; 1236 1237 raw_spin_lock_irqsave(&worker->lock, flags); 1238 if (!queuing_blocked(worker, work)) { 1239 kthread_insert_work(worker, work, &worker->work_list); 1240 ret = true; 1241 } 1242 raw_spin_unlock_irqrestore(&worker->lock, flags); 1243 return ret; 1244 } 1245 EXPORT_SYMBOL_GPL(kthread_queue_work); 1246 1247 /** 1248 * kthread_delayed_work_timer_fn - callback that queues the associated kthread 1249 * delayed work when the timer expires. 1250 * @t: pointer to the expired timer 1251 * 1252 * The format of the function is defined by struct timer_list. 1253 * It should have been called from irqsafe timer with irq already off. 1254 */ 1255 void kthread_delayed_work_timer_fn(struct timer_list *t) 1256 { 1257 struct kthread_delayed_work *dwork = timer_container_of(dwork, t, 1258 timer); 1259 struct kthread_work *work = &dwork->work; 1260 struct kthread_worker *worker = work->worker; 1261 unsigned long flags; 1262 1263 /* 1264 * This might happen when a pending work is reinitialized. 1265 * It means that it is used a wrong way. 1266 */ 1267 if (WARN_ON_ONCE(!worker)) 1268 return; 1269 1270 raw_spin_lock_irqsave(&worker->lock, flags); 1271 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1272 WARN_ON_ONCE(work->worker != worker); 1273 1274 /* Move the work from worker->delayed_work_list. */ 1275 WARN_ON_ONCE(list_empty(&work->node)); 1276 list_del_init(&work->node); 1277 if (!work->canceling) 1278 kthread_insert_work(worker, work, &worker->work_list); 1279 1280 raw_spin_unlock_irqrestore(&worker->lock, flags); 1281 } 1282 EXPORT_SYMBOL(kthread_delayed_work_timer_fn); 1283 1284 static void __kthread_queue_delayed_work(struct kthread_worker *worker, 1285 struct kthread_delayed_work *dwork, 1286 unsigned long delay) 1287 { 1288 struct timer_list *timer = &dwork->timer; 1289 struct kthread_work *work = &dwork->work; 1290 1291 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn); 1292 1293 /* 1294 * If @delay is 0, queue @dwork->work immediately. This is for 1295 * both optimization and correctness. The earliest @timer can 1296 * expire is on the closest next tick and delayed_work users depend 1297 * on that there's no such delay when @delay is 0. 1298 */ 1299 if (!delay) { 1300 kthread_insert_work(worker, work, &worker->work_list); 1301 return; 1302 } 1303 1304 /* Be paranoid and try to detect possible races already now. */ 1305 kthread_insert_work_sanity_check(worker, work); 1306 1307 list_add(&work->node, &worker->delayed_work_list); 1308 work->worker = worker; 1309 timer->expires = jiffies + delay; 1310 add_timer(timer); 1311 } 1312 1313 /** 1314 * kthread_queue_delayed_work - queue the associated kthread work 1315 * after a delay. 1316 * @worker: target kthread_worker 1317 * @dwork: kthread_delayed_work to queue 1318 * @delay: number of jiffies to wait before queuing 1319 * 1320 * If the work has not been pending it starts a timer that will queue 1321 * the work after the given @delay. If @delay is zero, it queues the 1322 * work immediately. 1323 * 1324 * Return: %false if the @work has already been pending. It means that 1325 * either the timer was running or the work was queued. It returns %true 1326 * otherwise. 1327 */ 1328 bool kthread_queue_delayed_work(struct kthread_worker *worker, 1329 struct kthread_delayed_work *dwork, 1330 unsigned long delay) 1331 { 1332 struct kthread_work *work = &dwork->work; 1333 unsigned long flags; 1334 bool ret = false; 1335 1336 raw_spin_lock_irqsave(&worker->lock, flags); 1337 1338 if (!queuing_blocked(worker, work)) { 1339 __kthread_queue_delayed_work(worker, dwork, delay); 1340 ret = true; 1341 } 1342 1343 raw_spin_unlock_irqrestore(&worker->lock, flags); 1344 return ret; 1345 } 1346 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); 1347 1348 struct kthread_flush_work { 1349 struct kthread_work work; 1350 struct completion done; 1351 }; 1352 1353 static void kthread_flush_work_fn(struct kthread_work *work) 1354 { 1355 struct kthread_flush_work *fwork = 1356 container_of(work, struct kthread_flush_work, work); 1357 complete(&fwork->done); 1358 } 1359 1360 /** 1361 * kthread_flush_work - flush a kthread_work 1362 * @work: work to flush 1363 * 1364 * If @work is queued or executing, wait for it to finish execution. 1365 */ 1366 void kthread_flush_work(struct kthread_work *work) 1367 { 1368 struct kthread_flush_work fwork = { 1369 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1370 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1371 }; 1372 struct kthread_worker *worker; 1373 bool noop = false; 1374 1375 worker = work->worker; 1376 if (!worker) 1377 return; 1378 1379 raw_spin_lock_irq(&worker->lock); 1380 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1381 WARN_ON_ONCE(work->worker != worker); 1382 1383 if (!list_empty(&work->node)) 1384 kthread_insert_work(worker, &fwork.work, work->node.next); 1385 else if (worker->current_work == work) 1386 kthread_insert_work(worker, &fwork.work, 1387 worker->work_list.next); 1388 else 1389 noop = true; 1390 1391 raw_spin_unlock_irq(&worker->lock); 1392 1393 if (!noop) 1394 wait_for_completion(&fwork.done); 1395 } 1396 EXPORT_SYMBOL_GPL(kthread_flush_work); 1397 1398 /* 1399 * Make sure that the timer is neither set nor running and could 1400 * not manipulate the work list_head any longer. 1401 * 1402 * The function is called under worker->lock. The lock is temporary 1403 * released but the timer can't be set again in the meantime. 1404 */ 1405 static void kthread_cancel_delayed_work_timer(struct kthread_work *work, 1406 unsigned long *flags) 1407 { 1408 struct kthread_delayed_work *dwork = 1409 container_of(work, struct kthread_delayed_work, work); 1410 struct kthread_worker *worker = work->worker; 1411 1412 /* 1413 * timer_delete_sync() must be called to make sure that the timer 1414 * callback is not running. The lock must be temporary released 1415 * to avoid a deadlock with the callback. In the meantime, 1416 * any queuing is blocked by setting the canceling counter. 1417 */ 1418 work->canceling++; 1419 raw_spin_unlock_irqrestore(&worker->lock, *flags); 1420 timer_delete_sync(&dwork->timer); 1421 raw_spin_lock_irqsave(&worker->lock, *flags); 1422 work->canceling--; 1423 } 1424 1425 /* 1426 * This function removes the work from the worker queue. 1427 * 1428 * It is called under worker->lock. The caller must make sure that 1429 * the timer used by delayed work is not running, e.g. by calling 1430 * kthread_cancel_delayed_work_timer(). 1431 * 1432 * The work might still be in use when this function finishes. See the 1433 * current_work proceed by the worker. 1434 * 1435 * Return: %true if @work was pending and successfully canceled, 1436 * %false if @work was not pending 1437 */ 1438 static bool __kthread_cancel_work(struct kthread_work *work) 1439 { 1440 /* 1441 * Try to remove the work from a worker list. It might either 1442 * be from worker->work_list or from worker->delayed_work_list. 1443 */ 1444 if (!list_empty(&work->node)) { 1445 list_del_init(&work->node); 1446 return true; 1447 } 1448 1449 return false; 1450 } 1451 1452 /** 1453 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work 1454 * @worker: kthread worker to use 1455 * @dwork: kthread delayed work to queue 1456 * @delay: number of jiffies to wait before queuing 1457 * 1458 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, 1459 * modify @dwork's timer so that it expires after @delay. If @delay is zero, 1460 * @work is guaranteed to be queued immediately. 1461 * 1462 * Return: %false if @dwork was idle and queued, %true otherwise. 1463 * 1464 * A special case is when the work is being canceled in parallel. 1465 * It might be caused either by the real kthread_cancel_delayed_work_sync() 1466 * or yet another kthread_mod_delayed_work() call. We let the other command 1467 * win and return %true here. The return value can be used for reference 1468 * counting and the number of queued works stays the same. Anyway, the caller 1469 * is supposed to synchronize these operations a reasonable way. 1470 * 1471 * This function is safe to call from any context including IRQ handler. 1472 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() 1473 * for details. 1474 */ 1475 bool kthread_mod_delayed_work(struct kthread_worker *worker, 1476 struct kthread_delayed_work *dwork, 1477 unsigned long delay) 1478 { 1479 struct kthread_work *work = &dwork->work; 1480 unsigned long flags; 1481 int ret; 1482 1483 raw_spin_lock_irqsave(&worker->lock, flags); 1484 1485 /* Do not bother with canceling when never queued. */ 1486 if (!work->worker) { 1487 ret = false; 1488 goto fast_queue; 1489 } 1490 1491 /* Work must not be used with >1 worker, see kthread_queue_work() */ 1492 WARN_ON_ONCE(work->worker != worker); 1493 1494 /* 1495 * Temporary cancel the work but do not fight with another command 1496 * that is canceling the work as well. 1497 * 1498 * It is a bit tricky because of possible races with another 1499 * mod_delayed_work() and cancel_delayed_work() callers. 1500 * 1501 * The timer must be canceled first because worker->lock is released 1502 * when doing so. But the work can be removed from the queue (list) 1503 * only when it can be queued again so that the return value can 1504 * be used for reference counting. 1505 */ 1506 kthread_cancel_delayed_work_timer(work, &flags); 1507 if (work->canceling) { 1508 /* The number of works in the queue does not change. */ 1509 ret = true; 1510 goto out; 1511 } 1512 ret = __kthread_cancel_work(work); 1513 1514 fast_queue: 1515 __kthread_queue_delayed_work(worker, dwork, delay); 1516 out: 1517 raw_spin_unlock_irqrestore(&worker->lock, flags); 1518 return ret; 1519 } 1520 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); 1521 1522 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) 1523 { 1524 struct kthread_worker *worker = work->worker; 1525 unsigned long flags; 1526 int ret = false; 1527 1528 if (!worker) 1529 goto out; 1530 1531 raw_spin_lock_irqsave(&worker->lock, flags); 1532 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1533 WARN_ON_ONCE(work->worker != worker); 1534 1535 if (is_dwork) 1536 kthread_cancel_delayed_work_timer(work, &flags); 1537 1538 ret = __kthread_cancel_work(work); 1539 1540 if (worker->current_work != work) 1541 goto out_fast; 1542 1543 /* 1544 * The work is in progress and we need to wait with the lock released. 1545 * In the meantime, block any queuing by setting the canceling counter. 1546 */ 1547 work->canceling++; 1548 raw_spin_unlock_irqrestore(&worker->lock, flags); 1549 kthread_flush_work(work); 1550 raw_spin_lock_irqsave(&worker->lock, flags); 1551 work->canceling--; 1552 1553 out_fast: 1554 raw_spin_unlock_irqrestore(&worker->lock, flags); 1555 out: 1556 return ret; 1557 } 1558 1559 /** 1560 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish 1561 * @work: the kthread work to cancel 1562 * 1563 * Cancel @work and wait for its execution to finish. This function 1564 * can be used even if the work re-queues itself. On return from this 1565 * function, @work is guaranteed to be not pending or executing on any CPU. 1566 * 1567 * kthread_cancel_work_sync(&delayed_work->work) must not be used for 1568 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. 1569 * 1570 * The caller must ensure that the worker on which @work was last 1571 * queued can't be destroyed before this function returns. 1572 * 1573 * Return: %true if @work was pending, %false otherwise. 1574 */ 1575 bool kthread_cancel_work_sync(struct kthread_work *work) 1576 { 1577 return __kthread_cancel_work_sync(work, false); 1578 } 1579 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync); 1580 1581 /** 1582 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and 1583 * wait for it to finish. 1584 * @dwork: the kthread delayed work to cancel 1585 * 1586 * This is kthread_cancel_work_sync() for delayed works. 1587 * 1588 * Return: %true if @dwork was pending, %false otherwise. 1589 */ 1590 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) 1591 { 1592 return __kthread_cancel_work_sync(&dwork->work, true); 1593 } 1594 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); 1595 1596 /** 1597 * kthread_flush_worker - flush all current works on a kthread_worker 1598 * @worker: worker to flush 1599 * 1600 * Wait until all currently executing or pending works on @worker are 1601 * finished. 1602 */ 1603 void kthread_flush_worker(struct kthread_worker *worker) 1604 { 1605 struct kthread_flush_work fwork = { 1606 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1607 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1608 }; 1609 1610 kthread_queue_work(worker, &fwork.work); 1611 wait_for_completion(&fwork.done); 1612 } 1613 EXPORT_SYMBOL_GPL(kthread_flush_worker); 1614 1615 /** 1616 * kthread_destroy_worker - destroy a kthread worker 1617 * @worker: worker to be destroyed 1618 * 1619 * Flush and destroy @worker. The simple flush is enough because the kthread 1620 * worker API is used only in trivial scenarios. There are no multi-step state 1621 * machines needed. 1622 * 1623 * Note that this function is not responsible for handling delayed work, so 1624 * caller should be responsible for queuing or canceling all delayed work items 1625 * before invoke this function. 1626 */ 1627 void kthread_destroy_worker(struct kthread_worker *worker) 1628 { 1629 struct task_struct *task; 1630 1631 task = worker->task; 1632 if (WARN_ON(!task)) 1633 return; 1634 1635 kthread_flush_worker(worker); 1636 kthread_stop(task); 1637 WARN_ON(!list_empty(&worker->delayed_work_list)); 1638 WARN_ON(!list_empty(&worker->work_list)); 1639 kfree(worker); 1640 } 1641 EXPORT_SYMBOL(kthread_destroy_worker); 1642 1643 /** 1644 * kthread_use_mm - make the calling kthread operate on an address space 1645 * @mm: address space to operate on 1646 */ 1647 void kthread_use_mm(struct mm_struct *mm) 1648 { 1649 struct mm_struct *active_mm; 1650 struct task_struct *tsk = current; 1651 1652 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1653 WARN_ON_ONCE(tsk->mm); 1654 WARN_ON_ONCE(!mm->user_ns); 1655 1656 /* 1657 * It is possible for mm to be the same as tsk->active_mm, but 1658 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm), 1659 * because these references are not equivalent. 1660 */ 1661 mmgrab(mm); 1662 1663 task_lock(tsk); 1664 /* Hold off tlb flush IPIs while switching mm's */ 1665 local_irq_disable(); 1666 active_mm = tsk->active_mm; 1667 tsk->active_mm = mm; 1668 tsk->mm = mm; 1669 membarrier_update_current_mm(mm); 1670 switch_mm_irqs_off(active_mm, mm, tsk); 1671 local_irq_enable(); 1672 task_unlock(tsk); 1673 #ifdef finish_arch_post_lock_switch 1674 finish_arch_post_lock_switch(); 1675 #endif 1676 1677 /* 1678 * When a kthread starts operating on an address space, the loop 1679 * in membarrier_{private,global}_expedited() may not observe 1680 * that tsk->mm, and not issue an IPI. Membarrier requires a 1681 * memory barrier after storing to tsk->mm, before accessing 1682 * user-space memory. A full memory barrier for membarrier 1683 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by 1684 * mmdrop_lazy_tlb(). 1685 */ 1686 mmdrop_lazy_tlb(active_mm); 1687 } 1688 EXPORT_SYMBOL_GPL(kthread_use_mm); 1689 1690 /** 1691 * kthread_unuse_mm - reverse the effect of kthread_use_mm() 1692 * @mm: address space to operate on 1693 */ 1694 void kthread_unuse_mm(struct mm_struct *mm) 1695 { 1696 struct task_struct *tsk = current; 1697 1698 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1699 WARN_ON_ONCE(!tsk->mm); 1700 1701 task_lock(tsk); 1702 /* 1703 * When a kthread stops operating on an address space, the loop 1704 * in membarrier_{private,global}_expedited() may not observe 1705 * that tsk->mm, and not issue an IPI. Membarrier requires a 1706 * memory barrier after accessing user-space memory, before 1707 * clearing tsk->mm. 1708 */ 1709 smp_mb__after_spinlock(); 1710 local_irq_disable(); 1711 tsk->mm = NULL; 1712 membarrier_update_current_mm(NULL); 1713 mmgrab_lazy_tlb(mm); 1714 /* active_mm is still 'mm' */ 1715 enter_lazy_tlb(mm, tsk); 1716 local_irq_enable(); 1717 task_unlock(tsk); 1718 1719 mmdrop(mm); 1720 } 1721 EXPORT_SYMBOL_GPL(kthread_unuse_mm); 1722 1723 #ifdef CONFIG_BLK_CGROUP 1724 /** 1725 * kthread_associate_blkcg - associate blkcg to current kthread 1726 * @css: the cgroup info 1727 * 1728 * Current thread must be a kthread. The thread is running jobs on behalf of 1729 * other threads. In some cases, we expect the jobs attach cgroup info of 1730 * original threads instead of that of current thread. This function stores 1731 * original thread's cgroup info in current kthread context for later 1732 * retrieval. 1733 */ 1734 void kthread_associate_blkcg(struct cgroup_subsys_state *css) 1735 { 1736 struct kthread *kthread; 1737 1738 if (!(current->flags & PF_KTHREAD)) 1739 return; 1740 kthread = to_kthread(current); 1741 if (!kthread) 1742 return; 1743 1744 if (kthread->blkcg_css) { 1745 css_put(kthread->blkcg_css); 1746 kthread->blkcg_css = NULL; 1747 } 1748 if (css) { 1749 css_get(css); 1750 kthread->blkcg_css = css; 1751 } 1752 } 1753 EXPORT_SYMBOL(kthread_associate_blkcg); 1754 1755 /** 1756 * kthread_blkcg - get associated blkcg css of current kthread 1757 * 1758 * Current thread must be a kthread. 1759 */ 1760 struct cgroup_subsys_state *kthread_blkcg(void) 1761 { 1762 struct kthread *kthread; 1763 1764 if (current->flags & PF_KTHREAD) { 1765 kthread = to_kthread(current); 1766 if (kthread) 1767 return kthread->blkcg_css; 1768 } 1769 return NULL; 1770 } 1771 #endif 1772