1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Kernel thread helper functions. 3 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 4 * Copyright (C) 2009 Red Hat, Inc. 5 * 6 * Creation is done via kthreadd, so that we get a clean environment 7 * even if we're invoked from userspace (think modprobe, hotplug cpu, 8 * etc.). 9 */ 10 #include <uapi/linux/sched/types.h> 11 #include <linux/mm.h> 12 #include <linux/mmu_context.h> 13 #include <linux/sched.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/task.h> 16 #include <linux/kthread.h> 17 #include <linux/completion.h> 18 #include <linux/err.h> 19 #include <linux/cgroup.h> 20 #include <linux/cpuset.h> 21 #include <linux/unistd.h> 22 #include <linux/file.h> 23 #include <linux/export.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/freezer.h> 27 #include <linux/ptrace.h> 28 #include <linux/uaccess.h> 29 #include <linux/numa.h> 30 #include <linux/sched/isolation.h> 31 #include <trace/events/sched.h> 32 33 34 static DEFINE_SPINLOCK(kthread_create_lock); 35 static LIST_HEAD(kthread_create_list); 36 struct task_struct *kthreadd_task; 37 38 struct kthread_create_info 39 { 40 /* Information passed to kthread() from kthreadd. */ 41 int (*threadfn)(void *data); 42 void *data; 43 int node; 44 45 /* Result passed back to kthread_create() from kthreadd. */ 46 struct task_struct *result; 47 struct completion *done; 48 49 struct list_head list; 50 }; 51 52 struct kthread { 53 unsigned long flags; 54 unsigned int cpu; 55 int (*threadfn)(void *); 56 void *data; 57 mm_segment_t oldfs; 58 struct completion parked; 59 struct completion exited; 60 #ifdef CONFIG_BLK_CGROUP 61 struct cgroup_subsys_state *blkcg_css; 62 #endif 63 }; 64 65 enum KTHREAD_BITS { 66 KTHREAD_IS_PER_CPU = 0, 67 KTHREAD_SHOULD_STOP, 68 KTHREAD_SHOULD_PARK, 69 }; 70 71 static inline void set_kthread_struct(void *kthread) 72 { 73 /* 74 * We abuse ->set_child_tid to avoid the new member and because it 75 * can't be wrongly copied by copy_process(). We also rely on fact 76 * that the caller can't exec, so PF_KTHREAD can't be cleared. 77 */ 78 current->set_child_tid = (__force void __user *)kthread; 79 } 80 81 static inline struct kthread *to_kthread(struct task_struct *k) 82 { 83 WARN_ON(!(k->flags & PF_KTHREAD)); 84 return (__force void *)k->set_child_tid; 85 } 86 87 void free_kthread_struct(struct task_struct *k) 88 { 89 struct kthread *kthread; 90 91 /* 92 * Can be NULL if this kthread was created by kernel_thread() 93 * or if kmalloc() in kthread() failed. 94 */ 95 kthread = to_kthread(k); 96 #ifdef CONFIG_BLK_CGROUP 97 WARN_ON_ONCE(kthread && kthread->blkcg_css); 98 #endif 99 kfree(kthread); 100 } 101 102 /** 103 * kthread_should_stop - should this kthread return now? 104 * 105 * When someone calls kthread_stop() on your kthread, it will be woken 106 * and this will return true. You should then return, and your return 107 * value will be passed through to kthread_stop(). 108 */ 109 bool kthread_should_stop(void) 110 { 111 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 112 } 113 EXPORT_SYMBOL(kthread_should_stop); 114 115 bool __kthread_should_park(struct task_struct *k) 116 { 117 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); 118 } 119 EXPORT_SYMBOL_GPL(__kthread_should_park); 120 121 /** 122 * kthread_should_park - should this kthread park now? 123 * 124 * When someone calls kthread_park() on your kthread, it will be woken 125 * and this will return true. You should then do the necessary 126 * cleanup and call kthread_parkme() 127 * 128 * Similar to kthread_should_stop(), but this keeps the thread alive 129 * and in a park position. kthread_unpark() "restarts" the thread and 130 * calls the thread function again. 131 */ 132 bool kthread_should_park(void) 133 { 134 return __kthread_should_park(current); 135 } 136 EXPORT_SYMBOL_GPL(kthread_should_park); 137 138 /** 139 * kthread_freezable_should_stop - should this freezable kthread return now? 140 * @was_frozen: optional out parameter, indicates whether %current was frozen 141 * 142 * kthread_should_stop() for freezable kthreads, which will enter 143 * refrigerator if necessary. This function is safe from kthread_stop() / 144 * freezer deadlock and freezable kthreads should use this function instead 145 * of calling try_to_freeze() directly. 146 */ 147 bool kthread_freezable_should_stop(bool *was_frozen) 148 { 149 bool frozen = false; 150 151 might_sleep(); 152 153 if (unlikely(freezing(current))) 154 frozen = __refrigerator(true); 155 156 if (was_frozen) 157 *was_frozen = frozen; 158 159 return kthread_should_stop(); 160 } 161 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 162 163 /** 164 * kthread_func - return the function specified on kthread creation 165 * @task: kthread task in question 166 * 167 * Returns NULL if the task is not a kthread. 168 */ 169 void *kthread_func(struct task_struct *task) 170 { 171 if (task->flags & PF_KTHREAD) 172 return to_kthread(task)->threadfn; 173 return NULL; 174 } 175 EXPORT_SYMBOL_GPL(kthread_func); 176 177 /** 178 * kthread_data - return data value specified on kthread creation 179 * @task: kthread task in question 180 * 181 * Return the data value specified when kthread @task was created. 182 * The caller is responsible for ensuring the validity of @task when 183 * calling this function. 184 */ 185 void *kthread_data(struct task_struct *task) 186 { 187 return to_kthread(task)->data; 188 } 189 EXPORT_SYMBOL_GPL(kthread_data); 190 191 /** 192 * kthread_probe_data - speculative version of kthread_data() 193 * @task: possible kthread task in question 194 * 195 * @task could be a kthread task. Return the data value specified when it 196 * was created if accessible. If @task isn't a kthread task or its data is 197 * inaccessible for any reason, %NULL is returned. This function requires 198 * that @task itself is safe to dereference. 199 */ 200 void *kthread_probe_data(struct task_struct *task) 201 { 202 struct kthread *kthread = to_kthread(task); 203 void *data = NULL; 204 205 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); 206 return data; 207 } 208 209 static void __kthread_parkme(struct kthread *self) 210 { 211 for (;;) { 212 /* 213 * TASK_PARKED is a special state; we must serialize against 214 * possible pending wakeups to avoid store-store collisions on 215 * task->state. 216 * 217 * Such a collision might possibly result in the task state 218 * changin from TASK_PARKED and us failing the 219 * wait_task_inactive() in kthread_park(). 220 */ 221 set_special_state(TASK_PARKED); 222 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 223 break; 224 225 /* 226 * Thread is going to call schedule(), do not preempt it, 227 * or the caller of kthread_park() may spend more time in 228 * wait_task_inactive(). 229 */ 230 preempt_disable(); 231 complete(&self->parked); 232 schedule_preempt_disabled(); 233 preempt_enable(); 234 } 235 __set_current_state(TASK_RUNNING); 236 } 237 238 void kthread_parkme(void) 239 { 240 __kthread_parkme(to_kthread(current)); 241 } 242 EXPORT_SYMBOL_GPL(kthread_parkme); 243 244 static int kthread(void *_create) 245 { 246 /* Copy data: it's on kthread's stack */ 247 struct kthread_create_info *create = _create; 248 int (*threadfn)(void *data) = create->threadfn; 249 void *data = create->data; 250 struct completion *done; 251 struct kthread *self; 252 int ret; 253 254 self = kzalloc(sizeof(*self), GFP_KERNEL); 255 set_kthread_struct(self); 256 257 /* If user was SIGKILLed, I release the structure. */ 258 done = xchg(&create->done, NULL); 259 if (!done) { 260 kfree(create); 261 do_exit(-EINTR); 262 } 263 264 if (!self) { 265 create->result = ERR_PTR(-ENOMEM); 266 complete(done); 267 do_exit(-ENOMEM); 268 } 269 270 self->threadfn = threadfn; 271 self->data = data; 272 init_completion(&self->exited); 273 init_completion(&self->parked); 274 current->vfork_done = &self->exited; 275 276 /* OK, tell user we're spawned, wait for stop or wakeup */ 277 __set_current_state(TASK_UNINTERRUPTIBLE); 278 create->result = current; 279 /* 280 * Thread is going to call schedule(), do not preempt it, 281 * or the creator may spend more time in wait_task_inactive(). 282 */ 283 preempt_disable(); 284 complete(done); 285 schedule_preempt_disabled(); 286 preempt_enable(); 287 288 ret = -EINTR; 289 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 290 cgroup_kthread_ready(); 291 __kthread_parkme(self); 292 ret = threadfn(data); 293 } 294 do_exit(ret); 295 } 296 297 /* called from kernel_clone() to get node information for about to be created task */ 298 int tsk_fork_get_node(struct task_struct *tsk) 299 { 300 #ifdef CONFIG_NUMA 301 if (tsk == kthreadd_task) 302 return tsk->pref_node_fork; 303 #endif 304 return NUMA_NO_NODE; 305 } 306 307 static void create_kthread(struct kthread_create_info *create) 308 { 309 int pid; 310 311 #ifdef CONFIG_NUMA 312 current->pref_node_fork = create->node; 313 #endif 314 /* We want our own signal handler (we take no signals by default). */ 315 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 316 if (pid < 0) { 317 /* If user was SIGKILLed, I release the structure. */ 318 struct completion *done = xchg(&create->done, NULL); 319 320 if (!done) { 321 kfree(create); 322 return; 323 } 324 create->result = ERR_PTR(pid); 325 complete(done); 326 } 327 } 328 329 static __printf(4, 0) 330 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), 331 void *data, int node, 332 const char namefmt[], 333 va_list args) 334 { 335 DECLARE_COMPLETION_ONSTACK(done); 336 struct task_struct *task; 337 struct kthread_create_info *create = kmalloc(sizeof(*create), 338 GFP_KERNEL); 339 340 if (!create) 341 return ERR_PTR(-ENOMEM); 342 create->threadfn = threadfn; 343 create->data = data; 344 create->node = node; 345 create->done = &done; 346 347 spin_lock(&kthread_create_lock); 348 list_add_tail(&create->list, &kthread_create_list); 349 spin_unlock(&kthread_create_lock); 350 351 wake_up_process(kthreadd_task); 352 /* 353 * Wait for completion in killable state, for I might be chosen by 354 * the OOM killer while kthreadd is trying to allocate memory for 355 * new kernel thread. 356 */ 357 if (unlikely(wait_for_completion_killable(&done))) { 358 /* 359 * If I was SIGKILLed before kthreadd (or new kernel thread) 360 * calls complete(), leave the cleanup of this structure to 361 * that thread. 362 */ 363 if (xchg(&create->done, NULL)) 364 return ERR_PTR(-EINTR); 365 /* 366 * kthreadd (or new kernel thread) will call complete() 367 * shortly. 368 */ 369 wait_for_completion(&done); 370 } 371 task = create->result; 372 if (!IS_ERR(task)) { 373 static const struct sched_param param = { .sched_priority = 0 }; 374 char name[TASK_COMM_LEN]; 375 376 /* 377 * task is already visible to other tasks, so updating 378 * COMM must be protected. 379 */ 380 vsnprintf(name, sizeof(name), namefmt, args); 381 set_task_comm(task, name); 382 /* 383 * root may have changed our (kthreadd's) priority or CPU mask. 384 * The kernel thread should not inherit these properties. 385 */ 386 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); 387 set_cpus_allowed_ptr(task, 388 housekeeping_cpumask(HK_FLAG_KTHREAD)); 389 } 390 kfree(create); 391 return task; 392 } 393 394 /** 395 * kthread_create_on_node - create a kthread. 396 * @threadfn: the function to run until signal_pending(current). 397 * @data: data ptr for @threadfn. 398 * @node: task and thread structures for the thread are allocated on this node 399 * @namefmt: printf-style name for the thread. 400 * 401 * Description: This helper function creates and names a kernel 402 * thread. The thread will be stopped: use wake_up_process() to start 403 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and 404 * is affine to all CPUs. 405 * 406 * If thread is going to be bound on a particular cpu, give its node 407 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. 408 * When woken, the thread will run @threadfn() with @data as its 409 * argument. @threadfn() can either call do_exit() directly if it is a 410 * standalone thread for which no one will call kthread_stop(), or 411 * return when 'kthread_should_stop()' is true (which means 412 * kthread_stop() has been called). The return value should be zero 413 * or a negative error number; it will be passed to kthread_stop(). 414 * 415 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). 416 */ 417 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 418 void *data, int node, 419 const char namefmt[], 420 ...) 421 { 422 struct task_struct *task; 423 va_list args; 424 425 va_start(args, namefmt); 426 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 427 va_end(args); 428 429 return task; 430 } 431 EXPORT_SYMBOL(kthread_create_on_node); 432 433 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) 434 { 435 unsigned long flags; 436 437 if (!wait_task_inactive(p, state)) { 438 WARN_ON(1); 439 return; 440 } 441 442 /* It's safe because the task is inactive. */ 443 raw_spin_lock_irqsave(&p->pi_lock, flags); 444 do_set_cpus_allowed(p, mask); 445 p->flags |= PF_NO_SETAFFINITY; 446 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 447 } 448 449 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) 450 { 451 __kthread_bind_mask(p, cpumask_of(cpu), state); 452 } 453 454 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) 455 { 456 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 457 } 458 459 /** 460 * kthread_bind - bind a just-created kthread to a cpu. 461 * @p: thread created by kthread_create(). 462 * @cpu: cpu (might not be online, must be possible) for @k to run on. 463 * 464 * Description: This function is equivalent to set_cpus_allowed(), 465 * except that @cpu doesn't need to be online, and the thread must be 466 * stopped (i.e., just returned from kthread_create()). 467 */ 468 void kthread_bind(struct task_struct *p, unsigned int cpu) 469 { 470 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); 471 } 472 EXPORT_SYMBOL(kthread_bind); 473 474 /** 475 * kthread_create_on_cpu - Create a cpu bound kthread 476 * @threadfn: the function to run until signal_pending(current). 477 * @data: data ptr for @threadfn. 478 * @cpu: The cpu on which the thread should be bound, 479 * @namefmt: printf-style name for the thread. Format is restricted 480 * to "name.*%u". Code fills in cpu number. 481 * 482 * Description: This helper function creates and names a kernel thread 483 */ 484 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 485 void *data, unsigned int cpu, 486 const char *namefmt) 487 { 488 struct task_struct *p; 489 490 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 491 cpu); 492 if (IS_ERR(p)) 493 return p; 494 kthread_bind(p, cpu); 495 /* CPU hotplug need to bind once again when unparking the thread. */ 496 to_kthread(p)->cpu = cpu; 497 return p; 498 } 499 500 void kthread_set_per_cpu(struct task_struct *k, int cpu) 501 { 502 struct kthread *kthread = to_kthread(k); 503 if (!kthread) 504 return; 505 506 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); 507 508 if (cpu < 0) { 509 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 510 return; 511 } 512 513 kthread->cpu = cpu; 514 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 515 } 516 517 bool kthread_is_per_cpu(struct task_struct *k) 518 { 519 struct kthread *kthread = to_kthread(k); 520 if (!kthread) 521 return false; 522 523 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 524 } 525 526 /** 527 * kthread_unpark - unpark a thread created by kthread_create(). 528 * @k: thread created by kthread_create(). 529 * 530 * Sets kthread_should_park() for @k to return false, wakes it, and 531 * waits for it to return. If the thread is marked percpu then its 532 * bound to the cpu again. 533 */ 534 void kthread_unpark(struct task_struct *k) 535 { 536 struct kthread *kthread = to_kthread(k); 537 538 /* 539 * Newly created kthread was parked when the CPU was offline. 540 * The binding was lost and we need to set it again. 541 */ 542 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 543 __kthread_bind(k, kthread->cpu, TASK_PARKED); 544 545 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 546 /* 547 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. 548 */ 549 wake_up_state(k, TASK_PARKED); 550 } 551 EXPORT_SYMBOL_GPL(kthread_unpark); 552 553 /** 554 * kthread_park - park a thread created by kthread_create(). 555 * @k: thread created by kthread_create(). 556 * 557 * Sets kthread_should_park() for @k to return true, wakes it, and 558 * waits for it to return. This can also be called after kthread_create() 559 * instead of calling wake_up_process(): the thread will park without 560 * calling threadfn(). 561 * 562 * Returns 0 if the thread is parked, -ENOSYS if the thread exited. 563 * If called by the kthread itself just the park bit is set. 564 */ 565 int kthread_park(struct task_struct *k) 566 { 567 struct kthread *kthread = to_kthread(k); 568 569 if (WARN_ON(k->flags & PF_EXITING)) 570 return -ENOSYS; 571 572 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) 573 return -EBUSY; 574 575 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 576 if (k != current) { 577 wake_up_process(k); 578 /* 579 * Wait for __kthread_parkme() to complete(), this means we 580 * _will_ have TASK_PARKED and are about to call schedule(). 581 */ 582 wait_for_completion(&kthread->parked); 583 /* 584 * Now wait for that schedule() to complete and the task to 585 * get scheduled out. 586 */ 587 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); 588 } 589 590 return 0; 591 } 592 EXPORT_SYMBOL_GPL(kthread_park); 593 594 /** 595 * kthread_stop - stop a thread created by kthread_create(). 596 * @k: thread created by kthread_create(). 597 * 598 * Sets kthread_should_stop() for @k to return true, wakes it, and 599 * waits for it to exit. This can also be called after kthread_create() 600 * instead of calling wake_up_process(): the thread will exit without 601 * calling threadfn(). 602 * 603 * If threadfn() may call do_exit() itself, the caller must ensure 604 * task_struct can't go away. 605 * 606 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 607 * was never called. 608 */ 609 int kthread_stop(struct task_struct *k) 610 { 611 struct kthread *kthread; 612 int ret; 613 614 trace_sched_kthread_stop(k); 615 616 get_task_struct(k); 617 kthread = to_kthread(k); 618 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 619 kthread_unpark(k); 620 wake_up_process(k); 621 wait_for_completion(&kthread->exited); 622 ret = k->exit_code; 623 put_task_struct(k); 624 625 trace_sched_kthread_stop_ret(ret); 626 return ret; 627 } 628 EXPORT_SYMBOL(kthread_stop); 629 630 int kthreadd(void *unused) 631 { 632 struct task_struct *tsk = current; 633 634 /* Setup a clean context for our children to inherit. */ 635 set_task_comm(tsk, "kthreadd"); 636 ignore_signals(tsk); 637 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD)); 638 set_mems_allowed(node_states[N_MEMORY]); 639 640 current->flags |= PF_NOFREEZE; 641 cgroup_init_kthreadd(); 642 643 for (;;) { 644 set_current_state(TASK_INTERRUPTIBLE); 645 if (list_empty(&kthread_create_list)) 646 schedule(); 647 __set_current_state(TASK_RUNNING); 648 649 spin_lock(&kthread_create_lock); 650 while (!list_empty(&kthread_create_list)) { 651 struct kthread_create_info *create; 652 653 create = list_entry(kthread_create_list.next, 654 struct kthread_create_info, list); 655 list_del_init(&create->list); 656 spin_unlock(&kthread_create_lock); 657 658 create_kthread(create); 659 660 spin_lock(&kthread_create_lock); 661 } 662 spin_unlock(&kthread_create_lock); 663 } 664 665 return 0; 666 } 667 668 void __kthread_init_worker(struct kthread_worker *worker, 669 const char *name, 670 struct lock_class_key *key) 671 { 672 memset(worker, 0, sizeof(struct kthread_worker)); 673 raw_spin_lock_init(&worker->lock); 674 lockdep_set_class_and_name(&worker->lock, key, name); 675 INIT_LIST_HEAD(&worker->work_list); 676 INIT_LIST_HEAD(&worker->delayed_work_list); 677 } 678 EXPORT_SYMBOL_GPL(__kthread_init_worker); 679 680 /** 681 * kthread_worker_fn - kthread function to process kthread_worker 682 * @worker_ptr: pointer to initialized kthread_worker 683 * 684 * This function implements the main cycle of kthread worker. It processes 685 * work_list until it is stopped with kthread_stop(). It sleeps when the queue 686 * is empty. 687 * 688 * The works are not allowed to keep any locks, disable preemption or interrupts 689 * when they finish. There is defined a safe point for freezing when one work 690 * finishes and before a new one is started. 691 * 692 * Also the works must not be handled by more than one worker at the same time, 693 * see also kthread_queue_work(). 694 */ 695 int kthread_worker_fn(void *worker_ptr) 696 { 697 struct kthread_worker *worker = worker_ptr; 698 struct kthread_work *work; 699 700 /* 701 * FIXME: Update the check and remove the assignment when all kthread 702 * worker users are created using kthread_create_worker*() functions. 703 */ 704 WARN_ON(worker->task && worker->task != current); 705 worker->task = current; 706 707 if (worker->flags & KTW_FREEZABLE) 708 set_freezable(); 709 710 repeat: 711 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 712 713 if (kthread_should_stop()) { 714 __set_current_state(TASK_RUNNING); 715 raw_spin_lock_irq(&worker->lock); 716 worker->task = NULL; 717 raw_spin_unlock_irq(&worker->lock); 718 return 0; 719 } 720 721 work = NULL; 722 raw_spin_lock_irq(&worker->lock); 723 if (!list_empty(&worker->work_list)) { 724 work = list_first_entry(&worker->work_list, 725 struct kthread_work, node); 726 list_del_init(&work->node); 727 } 728 worker->current_work = work; 729 raw_spin_unlock_irq(&worker->lock); 730 731 if (work) { 732 kthread_work_func_t func = work->func; 733 __set_current_state(TASK_RUNNING); 734 trace_sched_kthread_work_execute_start(work); 735 work->func(work); 736 /* 737 * Avoid dereferencing work after this point. The trace 738 * event only cares about the address. 739 */ 740 trace_sched_kthread_work_execute_end(work, func); 741 } else if (!freezing(current)) 742 schedule(); 743 744 try_to_freeze(); 745 cond_resched(); 746 goto repeat; 747 } 748 EXPORT_SYMBOL_GPL(kthread_worker_fn); 749 750 static __printf(3, 0) struct kthread_worker * 751 __kthread_create_worker(int cpu, unsigned int flags, 752 const char namefmt[], va_list args) 753 { 754 struct kthread_worker *worker; 755 struct task_struct *task; 756 int node = NUMA_NO_NODE; 757 758 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 759 if (!worker) 760 return ERR_PTR(-ENOMEM); 761 762 kthread_init_worker(worker); 763 764 if (cpu >= 0) 765 node = cpu_to_node(cpu); 766 767 task = __kthread_create_on_node(kthread_worker_fn, worker, 768 node, namefmt, args); 769 if (IS_ERR(task)) 770 goto fail_task; 771 772 if (cpu >= 0) 773 kthread_bind(task, cpu); 774 775 worker->flags = flags; 776 worker->task = task; 777 wake_up_process(task); 778 return worker; 779 780 fail_task: 781 kfree(worker); 782 return ERR_CAST(task); 783 } 784 785 /** 786 * kthread_create_worker - create a kthread worker 787 * @flags: flags modifying the default behavior of the worker 788 * @namefmt: printf-style name for the kthread worker (task). 789 * 790 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 791 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 792 * when the worker was SIGKILLed. 793 */ 794 struct kthread_worker * 795 kthread_create_worker(unsigned int flags, const char namefmt[], ...) 796 { 797 struct kthread_worker *worker; 798 va_list args; 799 800 va_start(args, namefmt); 801 worker = __kthread_create_worker(-1, flags, namefmt, args); 802 va_end(args); 803 804 return worker; 805 } 806 EXPORT_SYMBOL(kthread_create_worker); 807 808 /** 809 * kthread_create_worker_on_cpu - create a kthread worker and bind it 810 * to a given CPU and the associated NUMA node. 811 * @cpu: CPU number 812 * @flags: flags modifying the default behavior of the worker 813 * @namefmt: printf-style name for the kthread worker (task). 814 * 815 * Use a valid CPU number if you want to bind the kthread worker 816 * to the given CPU and the associated NUMA node. 817 * 818 * A good practice is to add the cpu number also into the worker name. 819 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). 820 * 821 * CPU hotplug: 822 * The kthread worker API is simple and generic. It just provides a way 823 * to create, use, and destroy workers. 824 * 825 * It is up to the API user how to handle CPU hotplug. They have to decide 826 * how to handle pending work items, prevent queuing new ones, and 827 * restore the functionality when the CPU goes off and on. There are a 828 * few catches: 829 * 830 * - CPU affinity gets lost when it is scheduled on an offline CPU. 831 * 832 * - The worker might not exist when the CPU was off when the user 833 * created the workers. 834 * 835 * Good practice is to implement two CPU hotplug callbacks and to 836 * destroy/create the worker when the CPU goes down/up. 837 * 838 * Return: 839 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 840 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 841 * when the worker was SIGKILLed. 842 */ 843 struct kthread_worker * 844 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 845 const char namefmt[], ...) 846 { 847 struct kthread_worker *worker; 848 va_list args; 849 850 va_start(args, namefmt); 851 worker = __kthread_create_worker(cpu, flags, namefmt, args); 852 va_end(args); 853 854 return worker; 855 } 856 EXPORT_SYMBOL(kthread_create_worker_on_cpu); 857 858 /* 859 * Returns true when the work could not be queued at the moment. 860 * It happens when it is already pending in a worker list 861 * or when it is being cancelled. 862 */ 863 static inline bool queuing_blocked(struct kthread_worker *worker, 864 struct kthread_work *work) 865 { 866 lockdep_assert_held(&worker->lock); 867 868 return !list_empty(&work->node) || work->canceling; 869 } 870 871 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, 872 struct kthread_work *work) 873 { 874 lockdep_assert_held(&worker->lock); 875 WARN_ON_ONCE(!list_empty(&work->node)); 876 /* Do not use a work with >1 worker, see kthread_queue_work() */ 877 WARN_ON_ONCE(work->worker && work->worker != worker); 878 } 879 880 /* insert @work before @pos in @worker */ 881 static void kthread_insert_work(struct kthread_worker *worker, 882 struct kthread_work *work, 883 struct list_head *pos) 884 { 885 kthread_insert_work_sanity_check(worker, work); 886 887 trace_sched_kthread_work_queue_work(worker, work); 888 889 list_add_tail(&work->node, pos); 890 work->worker = worker; 891 if (!worker->current_work && likely(worker->task)) 892 wake_up_process(worker->task); 893 } 894 895 /** 896 * kthread_queue_work - queue a kthread_work 897 * @worker: target kthread_worker 898 * @work: kthread_work to queue 899 * 900 * Queue @work to work processor @task for async execution. @task 901 * must have been created with kthread_worker_create(). Returns %true 902 * if @work was successfully queued, %false if it was already pending. 903 * 904 * Reinitialize the work if it needs to be used by another worker. 905 * For example, when the worker was stopped and started again. 906 */ 907 bool kthread_queue_work(struct kthread_worker *worker, 908 struct kthread_work *work) 909 { 910 bool ret = false; 911 unsigned long flags; 912 913 raw_spin_lock_irqsave(&worker->lock, flags); 914 if (!queuing_blocked(worker, work)) { 915 kthread_insert_work(worker, work, &worker->work_list); 916 ret = true; 917 } 918 raw_spin_unlock_irqrestore(&worker->lock, flags); 919 return ret; 920 } 921 EXPORT_SYMBOL_GPL(kthread_queue_work); 922 923 /** 924 * kthread_delayed_work_timer_fn - callback that queues the associated kthread 925 * delayed work when the timer expires. 926 * @t: pointer to the expired timer 927 * 928 * The format of the function is defined by struct timer_list. 929 * It should have been called from irqsafe timer with irq already off. 930 */ 931 void kthread_delayed_work_timer_fn(struct timer_list *t) 932 { 933 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); 934 struct kthread_work *work = &dwork->work; 935 struct kthread_worker *worker = work->worker; 936 unsigned long flags; 937 938 /* 939 * This might happen when a pending work is reinitialized. 940 * It means that it is used a wrong way. 941 */ 942 if (WARN_ON_ONCE(!worker)) 943 return; 944 945 raw_spin_lock_irqsave(&worker->lock, flags); 946 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 947 WARN_ON_ONCE(work->worker != worker); 948 949 /* Move the work from worker->delayed_work_list. */ 950 WARN_ON_ONCE(list_empty(&work->node)); 951 list_del_init(&work->node); 952 if (!work->canceling) 953 kthread_insert_work(worker, work, &worker->work_list); 954 955 raw_spin_unlock_irqrestore(&worker->lock, flags); 956 } 957 EXPORT_SYMBOL(kthread_delayed_work_timer_fn); 958 959 static void __kthread_queue_delayed_work(struct kthread_worker *worker, 960 struct kthread_delayed_work *dwork, 961 unsigned long delay) 962 { 963 struct timer_list *timer = &dwork->timer; 964 struct kthread_work *work = &dwork->work; 965 966 WARN_ON_FUNCTION_MISMATCH(timer->function, 967 kthread_delayed_work_timer_fn); 968 969 /* 970 * If @delay is 0, queue @dwork->work immediately. This is for 971 * both optimization and correctness. The earliest @timer can 972 * expire is on the closest next tick and delayed_work users depend 973 * on that there's no such delay when @delay is 0. 974 */ 975 if (!delay) { 976 kthread_insert_work(worker, work, &worker->work_list); 977 return; 978 } 979 980 /* Be paranoid and try to detect possible races already now. */ 981 kthread_insert_work_sanity_check(worker, work); 982 983 list_add(&work->node, &worker->delayed_work_list); 984 work->worker = worker; 985 timer->expires = jiffies + delay; 986 add_timer(timer); 987 } 988 989 /** 990 * kthread_queue_delayed_work - queue the associated kthread work 991 * after a delay. 992 * @worker: target kthread_worker 993 * @dwork: kthread_delayed_work to queue 994 * @delay: number of jiffies to wait before queuing 995 * 996 * If the work has not been pending it starts a timer that will queue 997 * the work after the given @delay. If @delay is zero, it queues the 998 * work immediately. 999 * 1000 * Return: %false if the @work has already been pending. It means that 1001 * either the timer was running or the work was queued. It returns %true 1002 * otherwise. 1003 */ 1004 bool kthread_queue_delayed_work(struct kthread_worker *worker, 1005 struct kthread_delayed_work *dwork, 1006 unsigned long delay) 1007 { 1008 struct kthread_work *work = &dwork->work; 1009 unsigned long flags; 1010 bool ret = false; 1011 1012 raw_spin_lock_irqsave(&worker->lock, flags); 1013 1014 if (!queuing_blocked(worker, work)) { 1015 __kthread_queue_delayed_work(worker, dwork, delay); 1016 ret = true; 1017 } 1018 1019 raw_spin_unlock_irqrestore(&worker->lock, flags); 1020 return ret; 1021 } 1022 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); 1023 1024 struct kthread_flush_work { 1025 struct kthread_work work; 1026 struct completion done; 1027 }; 1028 1029 static void kthread_flush_work_fn(struct kthread_work *work) 1030 { 1031 struct kthread_flush_work *fwork = 1032 container_of(work, struct kthread_flush_work, work); 1033 complete(&fwork->done); 1034 } 1035 1036 /** 1037 * kthread_flush_work - flush a kthread_work 1038 * @work: work to flush 1039 * 1040 * If @work is queued or executing, wait for it to finish execution. 1041 */ 1042 void kthread_flush_work(struct kthread_work *work) 1043 { 1044 struct kthread_flush_work fwork = { 1045 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1046 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1047 }; 1048 struct kthread_worker *worker; 1049 bool noop = false; 1050 1051 worker = work->worker; 1052 if (!worker) 1053 return; 1054 1055 raw_spin_lock_irq(&worker->lock); 1056 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1057 WARN_ON_ONCE(work->worker != worker); 1058 1059 if (!list_empty(&work->node)) 1060 kthread_insert_work(worker, &fwork.work, work->node.next); 1061 else if (worker->current_work == work) 1062 kthread_insert_work(worker, &fwork.work, 1063 worker->work_list.next); 1064 else 1065 noop = true; 1066 1067 raw_spin_unlock_irq(&worker->lock); 1068 1069 if (!noop) 1070 wait_for_completion(&fwork.done); 1071 } 1072 EXPORT_SYMBOL_GPL(kthread_flush_work); 1073 1074 /* 1075 * This function removes the work from the worker queue. Also it makes sure 1076 * that it won't get queued later via the delayed work's timer. 1077 * 1078 * The work might still be in use when this function finishes. See the 1079 * current_work proceed by the worker. 1080 * 1081 * Return: %true if @work was pending and successfully canceled, 1082 * %false if @work was not pending 1083 */ 1084 static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, 1085 unsigned long *flags) 1086 { 1087 /* Try to cancel the timer if exists. */ 1088 if (is_dwork) { 1089 struct kthread_delayed_work *dwork = 1090 container_of(work, struct kthread_delayed_work, work); 1091 struct kthread_worker *worker = work->worker; 1092 1093 /* 1094 * del_timer_sync() must be called to make sure that the timer 1095 * callback is not running. The lock must be temporary released 1096 * to avoid a deadlock with the callback. In the meantime, 1097 * any queuing is blocked by setting the canceling counter. 1098 */ 1099 work->canceling++; 1100 raw_spin_unlock_irqrestore(&worker->lock, *flags); 1101 del_timer_sync(&dwork->timer); 1102 raw_spin_lock_irqsave(&worker->lock, *flags); 1103 work->canceling--; 1104 } 1105 1106 /* 1107 * Try to remove the work from a worker list. It might either 1108 * be from worker->work_list or from worker->delayed_work_list. 1109 */ 1110 if (!list_empty(&work->node)) { 1111 list_del_init(&work->node); 1112 return true; 1113 } 1114 1115 return false; 1116 } 1117 1118 /** 1119 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work 1120 * @worker: kthread worker to use 1121 * @dwork: kthread delayed work to queue 1122 * @delay: number of jiffies to wait before queuing 1123 * 1124 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, 1125 * modify @dwork's timer so that it expires after @delay. If @delay is zero, 1126 * @work is guaranteed to be queued immediately. 1127 * 1128 * Return: %true if @dwork was pending and its timer was modified, 1129 * %false otherwise. 1130 * 1131 * A special case is when the work is being canceled in parallel. 1132 * It might be caused either by the real kthread_cancel_delayed_work_sync() 1133 * or yet another kthread_mod_delayed_work() call. We let the other command 1134 * win and return %false here. The caller is supposed to synchronize these 1135 * operations a reasonable way. 1136 * 1137 * This function is safe to call from any context including IRQ handler. 1138 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() 1139 * for details. 1140 */ 1141 bool kthread_mod_delayed_work(struct kthread_worker *worker, 1142 struct kthread_delayed_work *dwork, 1143 unsigned long delay) 1144 { 1145 struct kthread_work *work = &dwork->work; 1146 unsigned long flags; 1147 int ret = false; 1148 1149 raw_spin_lock_irqsave(&worker->lock, flags); 1150 1151 /* Do not bother with canceling when never queued. */ 1152 if (!work->worker) 1153 goto fast_queue; 1154 1155 /* Work must not be used with >1 worker, see kthread_queue_work() */ 1156 WARN_ON_ONCE(work->worker != worker); 1157 1158 /* Do not fight with another command that is canceling this work. */ 1159 if (work->canceling) 1160 goto out; 1161 1162 ret = __kthread_cancel_work(work, true, &flags); 1163 fast_queue: 1164 __kthread_queue_delayed_work(worker, dwork, delay); 1165 out: 1166 raw_spin_unlock_irqrestore(&worker->lock, flags); 1167 return ret; 1168 } 1169 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); 1170 1171 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) 1172 { 1173 struct kthread_worker *worker = work->worker; 1174 unsigned long flags; 1175 int ret = false; 1176 1177 if (!worker) 1178 goto out; 1179 1180 raw_spin_lock_irqsave(&worker->lock, flags); 1181 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1182 WARN_ON_ONCE(work->worker != worker); 1183 1184 ret = __kthread_cancel_work(work, is_dwork, &flags); 1185 1186 if (worker->current_work != work) 1187 goto out_fast; 1188 1189 /* 1190 * The work is in progress and we need to wait with the lock released. 1191 * In the meantime, block any queuing by setting the canceling counter. 1192 */ 1193 work->canceling++; 1194 raw_spin_unlock_irqrestore(&worker->lock, flags); 1195 kthread_flush_work(work); 1196 raw_spin_lock_irqsave(&worker->lock, flags); 1197 work->canceling--; 1198 1199 out_fast: 1200 raw_spin_unlock_irqrestore(&worker->lock, flags); 1201 out: 1202 return ret; 1203 } 1204 1205 /** 1206 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish 1207 * @work: the kthread work to cancel 1208 * 1209 * Cancel @work and wait for its execution to finish. This function 1210 * can be used even if the work re-queues itself. On return from this 1211 * function, @work is guaranteed to be not pending or executing on any CPU. 1212 * 1213 * kthread_cancel_work_sync(&delayed_work->work) must not be used for 1214 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. 1215 * 1216 * The caller must ensure that the worker on which @work was last 1217 * queued can't be destroyed before this function returns. 1218 * 1219 * Return: %true if @work was pending, %false otherwise. 1220 */ 1221 bool kthread_cancel_work_sync(struct kthread_work *work) 1222 { 1223 return __kthread_cancel_work_sync(work, false); 1224 } 1225 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync); 1226 1227 /** 1228 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and 1229 * wait for it to finish. 1230 * @dwork: the kthread delayed work to cancel 1231 * 1232 * This is kthread_cancel_work_sync() for delayed works. 1233 * 1234 * Return: %true if @dwork was pending, %false otherwise. 1235 */ 1236 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) 1237 { 1238 return __kthread_cancel_work_sync(&dwork->work, true); 1239 } 1240 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); 1241 1242 /** 1243 * kthread_flush_worker - flush all current works on a kthread_worker 1244 * @worker: worker to flush 1245 * 1246 * Wait until all currently executing or pending works on @worker are 1247 * finished. 1248 */ 1249 void kthread_flush_worker(struct kthread_worker *worker) 1250 { 1251 struct kthread_flush_work fwork = { 1252 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1253 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1254 }; 1255 1256 kthread_queue_work(worker, &fwork.work); 1257 wait_for_completion(&fwork.done); 1258 } 1259 EXPORT_SYMBOL_GPL(kthread_flush_worker); 1260 1261 /** 1262 * kthread_destroy_worker - destroy a kthread worker 1263 * @worker: worker to be destroyed 1264 * 1265 * Flush and destroy @worker. The simple flush is enough because the kthread 1266 * worker API is used only in trivial scenarios. There are no multi-step state 1267 * machines needed. 1268 */ 1269 void kthread_destroy_worker(struct kthread_worker *worker) 1270 { 1271 struct task_struct *task; 1272 1273 task = worker->task; 1274 if (WARN_ON(!task)) 1275 return; 1276 1277 kthread_flush_worker(worker); 1278 kthread_stop(task); 1279 WARN_ON(!list_empty(&worker->work_list)); 1280 kfree(worker); 1281 } 1282 EXPORT_SYMBOL(kthread_destroy_worker); 1283 1284 /** 1285 * kthread_use_mm - make the calling kthread operate on an address space 1286 * @mm: address space to operate on 1287 */ 1288 void kthread_use_mm(struct mm_struct *mm) 1289 { 1290 struct mm_struct *active_mm; 1291 struct task_struct *tsk = current; 1292 1293 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1294 WARN_ON_ONCE(tsk->mm); 1295 1296 task_lock(tsk); 1297 /* Hold off tlb flush IPIs while switching mm's */ 1298 local_irq_disable(); 1299 active_mm = tsk->active_mm; 1300 if (active_mm != mm) { 1301 mmgrab(mm); 1302 tsk->active_mm = mm; 1303 } 1304 tsk->mm = mm; 1305 membarrier_update_current_mm(mm); 1306 switch_mm_irqs_off(active_mm, mm, tsk); 1307 local_irq_enable(); 1308 task_unlock(tsk); 1309 #ifdef finish_arch_post_lock_switch 1310 finish_arch_post_lock_switch(); 1311 #endif 1312 1313 /* 1314 * When a kthread starts operating on an address space, the loop 1315 * in membarrier_{private,global}_expedited() may not observe 1316 * that tsk->mm, and not issue an IPI. Membarrier requires a 1317 * memory barrier after storing to tsk->mm, before accessing 1318 * user-space memory. A full memory barrier for membarrier 1319 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by 1320 * mmdrop(), or explicitly with smp_mb(). 1321 */ 1322 if (active_mm != mm) 1323 mmdrop(active_mm); 1324 else 1325 smp_mb(); 1326 1327 to_kthread(tsk)->oldfs = force_uaccess_begin(); 1328 } 1329 EXPORT_SYMBOL_GPL(kthread_use_mm); 1330 1331 /** 1332 * kthread_unuse_mm - reverse the effect of kthread_use_mm() 1333 * @mm: address space to operate on 1334 */ 1335 void kthread_unuse_mm(struct mm_struct *mm) 1336 { 1337 struct task_struct *tsk = current; 1338 1339 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1340 WARN_ON_ONCE(!tsk->mm); 1341 1342 force_uaccess_end(to_kthread(tsk)->oldfs); 1343 1344 task_lock(tsk); 1345 /* 1346 * When a kthread stops operating on an address space, the loop 1347 * in membarrier_{private,global}_expedited() may not observe 1348 * that tsk->mm, and not issue an IPI. Membarrier requires a 1349 * memory barrier after accessing user-space memory, before 1350 * clearing tsk->mm. 1351 */ 1352 smp_mb__after_spinlock(); 1353 sync_mm_rss(mm); 1354 local_irq_disable(); 1355 tsk->mm = NULL; 1356 membarrier_update_current_mm(NULL); 1357 /* active_mm is still 'mm' */ 1358 enter_lazy_tlb(mm, tsk); 1359 local_irq_enable(); 1360 task_unlock(tsk); 1361 } 1362 EXPORT_SYMBOL_GPL(kthread_unuse_mm); 1363 1364 #ifdef CONFIG_BLK_CGROUP 1365 /** 1366 * kthread_associate_blkcg - associate blkcg to current kthread 1367 * @css: the cgroup info 1368 * 1369 * Current thread must be a kthread. The thread is running jobs on behalf of 1370 * other threads. In some cases, we expect the jobs attach cgroup info of 1371 * original threads instead of that of current thread. This function stores 1372 * original thread's cgroup info in current kthread context for later 1373 * retrieval. 1374 */ 1375 void kthread_associate_blkcg(struct cgroup_subsys_state *css) 1376 { 1377 struct kthread *kthread; 1378 1379 if (!(current->flags & PF_KTHREAD)) 1380 return; 1381 kthread = to_kthread(current); 1382 if (!kthread) 1383 return; 1384 1385 if (kthread->blkcg_css) { 1386 css_put(kthread->blkcg_css); 1387 kthread->blkcg_css = NULL; 1388 } 1389 if (css) { 1390 css_get(css); 1391 kthread->blkcg_css = css; 1392 } 1393 } 1394 EXPORT_SYMBOL(kthread_associate_blkcg); 1395 1396 /** 1397 * kthread_blkcg - get associated blkcg css of current kthread 1398 * 1399 * Current thread must be a kthread. 1400 */ 1401 struct cgroup_subsys_state *kthread_blkcg(void) 1402 { 1403 struct kthread *kthread; 1404 1405 if (current->flags & PF_KTHREAD) { 1406 kthread = to_kthread(current); 1407 if (kthread) 1408 return kthread->blkcg_css; 1409 } 1410 return NULL; 1411 } 1412 EXPORT_SYMBOL(kthread_blkcg); 1413 #endif 1414