1 /* Kernel thread helper functions. 2 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 3 * 4 * Creation is done via kthreadd, so that we get a clean environment 5 * even if we're invoked from userspace (think modprobe, hotplug cpu, 6 * etc.). 7 */ 8 #include <uapi/linux/sched/types.h> 9 #include <linux/sched.h> 10 #include <linux/sched/task.h> 11 #include <linux/kthread.h> 12 #include <linux/completion.h> 13 #include <linux/err.h> 14 #include <linux/cpuset.h> 15 #include <linux/unistd.h> 16 #include <linux/file.h> 17 #include <linux/export.h> 18 #include <linux/mutex.h> 19 #include <linux/slab.h> 20 #include <linux/freezer.h> 21 #include <linux/ptrace.h> 22 #include <linux/uaccess.h> 23 #include <trace/events/sched.h> 24 25 static DEFINE_SPINLOCK(kthread_create_lock); 26 static LIST_HEAD(kthread_create_list); 27 struct task_struct *kthreadd_task; 28 29 struct kthread_create_info 30 { 31 /* Information passed to kthread() from kthreadd. */ 32 int (*threadfn)(void *data); 33 void *data; 34 int node; 35 36 /* Result passed back to kthread_create() from kthreadd. */ 37 struct task_struct *result; 38 struct completion *done; 39 40 struct list_head list; 41 }; 42 43 struct kthread { 44 unsigned long flags; 45 unsigned int cpu; 46 void *data; 47 struct completion parked; 48 struct completion exited; 49 #ifdef CONFIG_BLK_CGROUP 50 struct cgroup_subsys_state *blkcg_css; 51 #endif 52 }; 53 54 enum KTHREAD_BITS { 55 KTHREAD_IS_PER_CPU = 0, 56 KTHREAD_SHOULD_STOP, 57 KTHREAD_SHOULD_PARK, 58 }; 59 60 static inline void set_kthread_struct(void *kthread) 61 { 62 /* 63 * We abuse ->set_child_tid to avoid the new member and because it 64 * can't be wrongly copied by copy_process(). We also rely on fact 65 * that the caller can't exec, so PF_KTHREAD can't be cleared. 66 */ 67 current->set_child_tid = (__force void __user *)kthread; 68 } 69 70 static inline struct kthread *to_kthread(struct task_struct *k) 71 { 72 WARN_ON(!(k->flags & PF_KTHREAD)); 73 return (__force void *)k->set_child_tid; 74 } 75 76 void free_kthread_struct(struct task_struct *k) 77 { 78 struct kthread *kthread; 79 80 /* 81 * Can be NULL if this kthread was created by kernel_thread() 82 * or if kmalloc() in kthread() failed. 83 */ 84 kthread = to_kthread(k); 85 #ifdef CONFIG_BLK_CGROUP 86 WARN_ON_ONCE(kthread && kthread->blkcg_css); 87 #endif 88 kfree(kthread); 89 } 90 91 /** 92 * kthread_should_stop - should this kthread return now? 93 * 94 * When someone calls kthread_stop() on your kthread, it will be woken 95 * and this will return true. You should then return, and your return 96 * value will be passed through to kthread_stop(). 97 */ 98 bool kthread_should_stop(void) 99 { 100 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 101 } 102 EXPORT_SYMBOL(kthread_should_stop); 103 104 /** 105 * kthread_should_park - should this kthread park now? 106 * 107 * When someone calls kthread_park() on your kthread, it will be woken 108 * and this will return true. You should then do the necessary 109 * cleanup and call kthread_parkme() 110 * 111 * Similar to kthread_should_stop(), but this keeps the thread alive 112 * and in a park position. kthread_unpark() "restarts" the thread and 113 * calls the thread function again. 114 */ 115 bool kthread_should_park(void) 116 { 117 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 118 } 119 EXPORT_SYMBOL_GPL(kthread_should_park); 120 121 /** 122 * kthread_freezable_should_stop - should this freezable kthread return now? 123 * @was_frozen: optional out parameter, indicates whether %current was frozen 124 * 125 * kthread_should_stop() for freezable kthreads, which will enter 126 * refrigerator if necessary. This function is safe from kthread_stop() / 127 * freezer deadlock and freezable kthreads should use this function instead 128 * of calling try_to_freeze() directly. 129 */ 130 bool kthread_freezable_should_stop(bool *was_frozen) 131 { 132 bool frozen = false; 133 134 might_sleep(); 135 136 if (unlikely(freezing(current))) 137 frozen = __refrigerator(true); 138 139 if (was_frozen) 140 *was_frozen = frozen; 141 142 return kthread_should_stop(); 143 } 144 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 145 146 /** 147 * kthread_data - return data value specified on kthread creation 148 * @task: kthread task in question 149 * 150 * Return the data value specified when kthread @task was created. 151 * The caller is responsible for ensuring the validity of @task when 152 * calling this function. 153 */ 154 void *kthread_data(struct task_struct *task) 155 { 156 return to_kthread(task)->data; 157 } 158 159 /** 160 * kthread_probe_data - speculative version of kthread_data() 161 * @task: possible kthread task in question 162 * 163 * @task could be a kthread task. Return the data value specified when it 164 * was created if accessible. If @task isn't a kthread task or its data is 165 * inaccessible for any reason, %NULL is returned. This function requires 166 * that @task itself is safe to dereference. 167 */ 168 void *kthread_probe_data(struct task_struct *task) 169 { 170 struct kthread *kthread = to_kthread(task); 171 void *data = NULL; 172 173 probe_kernel_read(&data, &kthread->data, sizeof(data)); 174 return data; 175 } 176 177 static void __kthread_parkme(struct kthread *self) 178 { 179 for (;;) { 180 /* 181 * TASK_PARKED is a special state; we must serialize against 182 * possible pending wakeups to avoid store-store collisions on 183 * task->state. 184 * 185 * Such a collision might possibly result in the task state 186 * changin from TASK_PARKED and us failing the 187 * wait_task_inactive() in kthread_park(). 188 */ 189 set_special_state(TASK_PARKED); 190 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 191 break; 192 193 complete_all(&self->parked); 194 schedule(); 195 } 196 __set_current_state(TASK_RUNNING); 197 } 198 199 void kthread_parkme(void) 200 { 201 __kthread_parkme(to_kthread(current)); 202 } 203 EXPORT_SYMBOL_GPL(kthread_parkme); 204 205 static int kthread(void *_create) 206 { 207 /* Copy data: it's on kthread's stack */ 208 struct kthread_create_info *create = _create; 209 int (*threadfn)(void *data) = create->threadfn; 210 void *data = create->data; 211 struct completion *done; 212 struct kthread *self; 213 int ret; 214 215 self = kzalloc(sizeof(*self), GFP_KERNEL); 216 set_kthread_struct(self); 217 218 /* If user was SIGKILLed, I release the structure. */ 219 done = xchg(&create->done, NULL); 220 if (!done) { 221 kfree(create); 222 do_exit(-EINTR); 223 } 224 225 if (!self) { 226 create->result = ERR_PTR(-ENOMEM); 227 complete(done); 228 do_exit(-ENOMEM); 229 } 230 231 self->data = data; 232 init_completion(&self->exited); 233 init_completion(&self->parked); 234 current->vfork_done = &self->exited; 235 236 /* OK, tell user we're spawned, wait for stop or wakeup */ 237 __set_current_state(TASK_UNINTERRUPTIBLE); 238 create->result = current; 239 complete(done); 240 schedule(); 241 242 ret = -EINTR; 243 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 244 cgroup_kthread_ready(); 245 __kthread_parkme(self); 246 ret = threadfn(data); 247 } 248 do_exit(ret); 249 } 250 251 /* called from do_fork() to get node information for about to be created task */ 252 int tsk_fork_get_node(struct task_struct *tsk) 253 { 254 #ifdef CONFIG_NUMA 255 if (tsk == kthreadd_task) 256 return tsk->pref_node_fork; 257 #endif 258 return NUMA_NO_NODE; 259 } 260 261 static void create_kthread(struct kthread_create_info *create) 262 { 263 int pid; 264 265 #ifdef CONFIG_NUMA 266 current->pref_node_fork = create->node; 267 #endif 268 /* We want our own signal handler (we take no signals by default). */ 269 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 270 if (pid < 0) { 271 /* If user was SIGKILLed, I release the structure. */ 272 struct completion *done = xchg(&create->done, NULL); 273 274 if (!done) { 275 kfree(create); 276 return; 277 } 278 create->result = ERR_PTR(pid); 279 complete(done); 280 } 281 } 282 283 static __printf(4, 0) 284 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), 285 void *data, int node, 286 const char namefmt[], 287 va_list args) 288 { 289 DECLARE_COMPLETION_ONSTACK(done); 290 struct task_struct *task; 291 struct kthread_create_info *create = kmalloc(sizeof(*create), 292 GFP_KERNEL); 293 294 if (!create) 295 return ERR_PTR(-ENOMEM); 296 create->threadfn = threadfn; 297 create->data = data; 298 create->node = node; 299 create->done = &done; 300 301 spin_lock(&kthread_create_lock); 302 list_add_tail(&create->list, &kthread_create_list); 303 spin_unlock(&kthread_create_lock); 304 305 wake_up_process(kthreadd_task); 306 /* 307 * Wait for completion in killable state, for I might be chosen by 308 * the OOM killer while kthreadd is trying to allocate memory for 309 * new kernel thread. 310 */ 311 if (unlikely(wait_for_completion_killable(&done))) { 312 /* 313 * If I was SIGKILLed before kthreadd (or new kernel thread) 314 * calls complete(), leave the cleanup of this structure to 315 * that thread. 316 */ 317 if (xchg(&create->done, NULL)) 318 return ERR_PTR(-EINTR); 319 /* 320 * kthreadd (or new kernel thread) will call complete() 321 * shortly. 322 */ 323 wait_for_completion(&done); 324 } 325 task = create->result; 326 if (!IS_ERR(task)) { 327 static const struct sched_param param = { .sched_priority = 0 }; 328 char name[TASK_COMM_LEN]; 329 330 /* 331 * task is already visible to other tasks, so updating 332 * COMM must be protected. 333 */ 334 vsnprintf(name, sizeof(name), namefmt, args); 335 set_task_comm(task, name); 336 /* 337 * root may have changed our (kthreadd's) priority or CPU mask. 338 * The kernel thread should not inherit these properties. 339 */ 340 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); 341 set_cpus_allowed_ptr(task, cpu_all_mask); 342 } 343 kfree(create); 344 return task; 345 } 346 347 /** 348 * kthread_create_on_node - create a kthread. 349 * @threadfn: the function to run until signal_pending(current). 350 * @data: data ptr for @threadfn. 351 * @node: task and thread structures for the thread are allocated on this node 352 * @namefmt: printf-style name for the thread. 353 * 354 * Description: This helper function creates and names a kernel 355 * thread. The thread will be stopped: use wake_up_process() to start 356 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and 357 * is affine to all CPUs. 358 * 359 * If thread is going to be bound on a particular cpu, give its node 360 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. 361 * When woken, the thread will run @threadfn() with @data as its 362 * argument. @threadfn() can either call do_exit() directly if it is a 363 * standalone thread for which no one will call kthread_stop(), or 364 * return when 'kthread_should_stop()' is true (which means 365 * kthread_stop() has been called). The return value should be zero 366 * or a negative error number; it will be passed to kthread_stop(). 367 * 368 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). 369 */ 370 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 371 void *data, int node, 372 const char namefmt[], 373 ...) 374 { 375 struct task_struct *task; 376 va_list args; 377 378 va_start(args, namefmt); 379 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 380 va_end(args); 381 382 return task; 383 } 384 EXPORT_SYMBOL(kthread_create_on_node); 385 386 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) 387 { 388 unsigned long flags; 389 390 if (!wait_task_inactive(p, state)) { 391 WARN_ON(1); 392 return; 393 } 394 395 /* It's safe because the task is inactive. */ 396 raw_spin_lock_irqsave(&p->pi_lock, flags); 397 do_set_cpus_allowed(p, mask); 398 p->flags |= PF_NO_SETAFFINITY; 399 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 400 } 401 402 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) 403 { 404 __kthread_bind_mask(p, cpumask_of(cpu), state); 405 } 406 407 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) 408 { 409 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 410 } 411 412 /** 413 * kthread_bind - bind a just-created kthread to a cpu. 414 * @p: thread created by kthread_create(). 415 * @cpu: cpu (might not be online, must be possible) for @k to run on. 416 * 417 * Description: This function is equivalent to set_cpus_allowed(), 418 * except that @cpu doesn't need to be online, and the thread must be 419 * stopped (i.e., just returned from kthread_create()). 420 */ 421 void kthread_bind(struct task_struct *p, unsigned int cpu) 422 { 423 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); 424 } 425 EXPORT_SYMBOL(kthread_bind); 426 427 /** 428 * kthread_create_on_cpu - Create a cpu bound kthread 429 * @threadfn: the function to run until signal_pending(current). 430 * @data: data ptr for @threadfn. 431 * @cpu: The cpu on which the thread should be bound, 432 * @namefmt: printf-style name for the thread. Format is restricted 433 * to "name.*%u". Code fills in cpu number. 434 * 435 * Description: This helper function creates and names a kernel thread 436 * The thread will be woken and put into park mode. 437 */ 438 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 439 void *data, unsigned int cpu, 440 const char *namefmt) 441 { 442 struct task_struct *p; 443 444 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 445 cpu); 446 if (IS_ERR(p)) 447 return p; 448 kthread_bind(p, cpu); 449 /* CPU hotplug need to bind once again when unparking the thread. */ 450 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); 451 to_kthread(p)->cpu = cpu; 452 return p; 453 } 454 455 /** 456 * kthread_unpark - unpark a thread created by kthread_create(). 457 * @k: thread created by kthread_create(). 458 * 459 * Sets kthread_should_park() for @k to return false, wakes it, and 460 * waits for it to return. If the thread is marked percpu then its 461 * bound to the cpu again. 462 */ 463 void kthread_unpark(struct task_struct *k) 464 { 465 struct kthread *kthread = to_kthread(k); 466 467 /* 468 * Newly created kthread was parked when the CPU was offline. 469 * The binding was lost and we need to set it again. 470 */ 471 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 472 __kthread_bind(k, kthread->cpu, TASK_PARKED); 473 474 reinit_completion(&kthread->parked); 475 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 476 /* 477 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. 478 */ 479 wake_up_state(k, TASK_PARKED); 480 } 481 EXPORT_SYMBOL_GPL(kthread_unpark); 482 483 /** 484 * kthread_park - park a thread created by kthread_create(). 485 * @k: thread created by kthread_create(). 486 * 487 * Sets kthread_should_park() for @k to return true, wakes it, and 488 * waits for it to return. This can also be called after kthread_create() 489 * instead of calling wake_up_process(): the thread will park without 490 * calling threadfn(). 491 * 492 * Returns 0 if the thread is parked, -ENOSYS if the thread exited. 493 * If called by the kthread itself just the park bit is set. 494 */ 495 int kthread_park(struct task_struct *k) 496 { 497 struct kthread *kthread = to_kthread(k); 498 499 if (WARN_ON(k->flags & PF_EXITING)) 500 return -ENOSYS; 501 502 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 503 if (k != current) { 504 wake_up_process(k); 505 /* 506 * Wait for __kthread_parkme() to complete(), this means we 507 * _will_ have TASK_PARKED and are about to call schedule(). 508 */ 509 wait_for_completion(&kthread->parked); 510 /* 511 * Now wait for that schedule() to complete and the task to 512 * get scheduled out. 513 */ 514 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); 515 } 516 517 return 0; 518 } 519 EXPORT_SYMBOL_GPL(kthread_park); 520 521 /** 522 * kthread_stop - stop a thread created by kthread_create(). 523 * @k: thread created by kthread_create(). 524 * 525 * Sets kthread_should_stop() for @k to return true, wakes it, and 526 * waits for it to exit. This can also be called after kthread_create() 527 * instead of calling wake_up_process(): the thread will exit without 528 * calling threadfn(). 529 * 530 * If threadfn() may call do_exit() itself, the caller must ensure 531 * task_struct can't go away. 532 * 533 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 534 * was never called. 535 */ 536 int kthread_stop(struct task_struct *k) 537 { 538 struct kthread *kthread; 539 int ret; 540 541 trace_sched_kthread_stop(k); 542 543 get_task_struct(k); 544 kthread = to_kthread(k); 545 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 546 kthread_unpark(k); 547 wake_up_process(k); 548 wait_for_completion(&kthread->exited); 549 ret = k->exit_code; 550 put_task_struct(k); 551 552 trace_sched_kthread_stop_ret(ret); 553 return ret; 554 } 555 EXPORT_SYMBOL(kthread_stop); 556 557 int kthreadd(void *unused) 558 { 559 struct task_struct *tsk = current; 560 561 /* Setup a clean context for our children to inherit. */ 562 set_task_comm(tsk, "kthreadd"); 563 ignore_signals(tsk); 564 set_cpus_allowed_ptr(tsk, cpu_all_mask); 565 set_mems_allowed(node_states[N_MEMORY]); 566 567 current->flags |= PF_NOFREEZE; 568 cgroup_init_kthreadd(); 569 570 for (;;) { 571 set_current_state(TASK_INTERRUPTIBLE); 572 if (list_empty(&kthread_create_list)) 573 schedule(); 574 __set_current_state(TASK_RUNNING); 575 576 spin_lock(&kthread_create_lock); 577 while (!list_empty(&kthread_create_list)) { 578 struct kthread_create_info *create; 579 580 create = list_entry(kthread_create_list.next, 581 struct kthread_create_info, list); 582 list_del_init(&create->list); 583 spin_unlock(&kthread_create_lock); 584 585 create_kthread(create); 586 587 spin_lock(&kthread_create_lock); 588 } 589 spin_unlock(&kthread_create_lock); 590 } 591 592 return 0; 593 } 594 595 void __kthread_init_worker(struct kthread_worker *worker, 596 const char *name, 597 struct lock_class_key *key) 598 { 599 memset(worker, 0, sizeof(struct kthread_worker)); 600 spin_lock_init(&worker->lock); 601 lockdep_set_class_and_name(&worker->lock, key, name); 602 INIT_LIST_HEAD(&worker->work_list); 603 INIT_LIST_HEAD(&worker->delayed_work_list); 604 } 605 EXPORT_SYMBOL_GPL(__kthread_init_worker); 606 607 /** 608 * kthread_worker_fn - kthread function to process kthread_worker 609 * @worker_ptr: pointer to initialized kthread_worker 610 * 611 * This function implements the main cycle of kthread worker. It processes 612 * work_list until it is stopped with kthread_stop(). It sleeps when the queue 613 * is empty. 614 * 615 * The works are not allowed to keep any locks, disable preemption or interrupts 616 * when they finish. There is defined a safe point for freezing when one work 617 * finishes and before a new one is started. 618 * 619 * Also the works must not be handled by more than one worker at the same time, 620 * see also kthread_queue_work(). 621 */ 622 int kthread_worker_fn(void *worker_ptr) 623 { 624 struct kthread_worker *worker = worker_ptr; 625 struct kthread_work *work; 626 627 /* 628 * FIXME: Update the check and remove the assignment when all kthread 629 * worker users are created using kthread_create_worker*() functions. 630 */ 631 WARN_ON(worker->task && worker->task != current); 632 worker->task = current; 633 634 if (worker->flags & KTW_FREEZABLE) 635 set_freezable(); 636 637 repeat: 638 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 639 640 if (kthread_should_stop()) { 641 __set_current_state(TASK_RUNNING); 642 spin_lock_irq(&worker->lock); 643 worker->task = NULL; 644 spin_unlock_irq(&worker->lock); 645 return 0; 646 } 647 648 work = NULL; 649 spin_lock_irq(&worker->lock); 650 if (!list_empty(&worker->work_list)) { 651 work = list_first_entry(&worker->work_list, 652 struct kthread_work, node); 653 list_del_init(&work->node); 654 } 655 worker->current_work = work; 656 spin_unlock_irq(&worker->lock); 657 658 if (work) { 659 __set_current_state(TASK_RUNNING); 660 work->func(work); 661 } else if (!freezing(current)) 662 schedule(); 663 664 try_to_freeze(); 665 cond_resched(); 666 goto repeat; 667 } 668 EXPORT_SYMBOL_GPL(kthread_worker_fn); 669 670 static __printf(3, 0) struct kthread_worker * 671 __kthread_create_worker(int cpu, unsigned int flags, 672 const char namefmt[], va_list args) 673 { 674 struct kthread_worker *worker; 675 struct task_struct *task; 676 int node = -1; 677 678 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 679 if (!worker) 680 return ERR_PTR(-ENOMEM); 681 682 kthread_init_worker(worker); 683 684 if (cpu >= 0) 685 node = cpu_to_node(cpu); 686 687 task = __kthread_create_on_node(kthread_worker_fn, worker, 688 node, namefmt, args); 689 if (IS_ERR(task)) 690 goto fail_task; 691 692 if (cpu >= 0) 693 kthread_bind(task, cpu); 694 695 worker->flags = flags; 696 worker->task = task; 697 wake_up_process(task); 698 return worker; 699 700 fail_task: 701 kfree(worker); 702 return ERR_CAST(task); 703 } 704 705 /** 706 * kthread_create_worker - create a kthread worker 707 * @flags: flags modifying the default behavior of the worker 708 * @namefmt: printf-style name for the kthread worker (task). 709 * 710 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 711 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 712 * when the worker was SIGKILLed. 713 */ 714 struct kthread_worker * 715 kthread_create_worker(unsigned int flags, const char namefmt[], ...) 716 { 717 struct kthread_worker *worker; 718 va_list args; 719 720 va_start(args, namefmt); 721 worker = __kthread_create_worker(-1, flags, namefmt, args); 722 va_end(args); 723 724 return worker; 725 } 726 EXPORT_SYMBOL(kthread_create_worker); 727 728 /** 729 * kthread_create_worker_on_cpu - create a kthread worker and bind it 730 * it to a given CPU and the associated NUMA node. 731 * @cpu: CPU number 732 * @flags: flags modifying the default behavior of the worker 733 * @namefmt: printf-style name for the kthread worker (task). 734 * 735 * Use a valid CPU number if you want to bind the kthread worker 736 * to the given CPU and the associated NUMA node. 737 * 738 * A good practice is to add the cpu number also into the worker name. 739 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). 740 * 741 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 742 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 743 * when the worker was SIGKILLed. 744 */ 745 struct kthread_worker * 746 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 747 const char namefmt[], ...) 748 { 749 struct kthread_worker *worker; 750 va_list args; 751 752 va_start(args, namefmt); 753 worker = __kthread_create_worker(cpu, flags, namefmt, args); 754 va_end(args); 755 756 return worker; 757 } 758 EXPORT_SYMBOL(kthread_create_worker_on_cpu); 759 760 /* 761 * Returns true when the work could not be queued at the moment. 762 * It happens when it is already pending in a worker list 763 * or when it is being cancelled. 764 */ 765 static inline bool queuing_blocked(struct kthread_worker *worker, 766 struct kthread_work *work) 767 { 768 lockdep_assert_held(&worker->lock); 769 770 return !list_empty(&work->node) || work->canceling; 771 } 772 773 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, 774 struct kthread_work *work) 775 { 776 lockdep_assert_held(&worker->lock); 777 WARN_ON_ONCE(!list_empty(&work->node)); 778 /* Do not use a work with >1 worker, see kthread_queue_work() */ 779 WARN_ON_ONCE(work->worker && work->worker != worker); 780 } 781 782 /* insert @work before @pos in @worker */ 783 static void kthread_insert_work(struct kthread_worker *worker, 784 struct kthread_work *work, 785 struct list_head *pos) 786 { 787 kthread_insert_work_sanity_check(worker, work); 788 789 list_add_tail(&work->node, pos); 790 work->worker = worker; 791 if (!worker->current_work && likely(worker->task)) 792 wake_up_process(worker->task); 793 } 794 795 /** 796 * kthread_queue_work - queue a kthread_work 797 * @worker: target kthread_worker 798 * @work: kthread_work to queue 799 * 800 * Queue @work to work processor @task for async execution. @task 801 * must have been created with kthread_worker_create(). Returns %true 802 * if @work was successfully queued, %false if it was already pending. 803 * 804 * Reinitialize the work if it needs to be used by another worker. 805 * For example, when the worker was stopped and started again. 806 */ 807 bool kthread_queue_work(struct kthread_worker *worker, 808 struct kthread_work *work) 809 { 810 bool ret = false; 811 unsigned long flags; 812 813 spin_lock_irqsave(&worker->lock, flags); 814 if (!queuing_blocked(worker, work)) { 815 kthread_insert_work(worker, work, &worker->work_list); 816 ret = true; 817 } 818 spin_unlock_irqrestore(&worker->lock, flags); 819 return ret; 820 } 821 EXPORT_SYMBOL_GPL(kthread_queue_work); 822 823 /** 824 * kthread_delayed_work_timer_fn - callback that queues the associated kthread 825 * delayed work when the timer expires. 826 * @t: pointer to the expired timer 827 * 828 * The format of the function is defined by struct timer_list. 829 * It should have been called from irqsafe timer with irq already off. 830 */ 831 void kthread_delayed_work_timer_fn(struct timer_list *t) 832 { 833 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); 834 struct kthread_work *work = &dwork->work; 835 struct kthread_worker *worker = work->worker; 836 837 /* 838 * This might happen when a pending work is reinitialized. 839 * It means that it is used a wrong way. 840 */ 841 if (WARN_ON_ONCE(!worker)) 842 return; 843 844 spin_lock(&worker->lock); 845 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 846 WARN_ON_ONCE(work->worker != worker); 847 848 /* Move the work from worker->delayed_work_list. */ 849 WARN_ON_ONCE(list_empty(&work->node)); 850 list_del_init(&work->node); 851 kthread_insert_work(worker, work, &worker->work_list); 852 853 spin_unlock(&worker->lock); 854 } 855 EXPORT_SYMBOL(kthread_delayed_work_timer_fn); 856 857 void __kthread_queue_delayed_work(struct kthread_worker *worker, 858 struct kthread_delayed_work *dwork, 859 unsigned long delay) 860 { 861 struct timer_list *timer = &dwork->timer; 862 struct kthread_work *work = &dwork->work; 863 864 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn); 865 866 /* 867 * If @delay is 0, queue @dwork->work immediately. This is for 868 * both optimization and correctness. The earliest @timer can 869 * expire is on the closest next tick and delayed_work users depend 870 * on that there's no such delay when @delay is 0. 871 */ 872 if (!delay) { 873 kthread_insert_work(worker, work, &worker->work_list); 874 return; 875 } 876 877 /* Be paranoid and try to detect possible races already now. */ 878 kthread_insert_work_sanity_check(worker, work); 879 880 list_add(&work->node, &worker->delayed_work_list); 881 work->worker = worker; 882 timer->expires = jiffies + delay; 883 add_timer(timer); 884 } 885 886 /** 887 * kthread_queue_delayed_work - queue the associated kthread work 888 * after a delay. 889 * @worker: target kthread_worker 890 * @dwork: kthread_delayed_work to queue 891 * @delay: number of jiffies to wait before queuing 892 * 893 * If the work has not been pending it starts a timer that will queue 894 * the work after the given @delay. If @delay is zero, it queues the 895 * work immediately. 896 * 897 * Return: %false if the @work has already been pending. It means that 898 * either the timer was running or the work was queued. It returns %true 899 * otherwise. 900 */ 901 bool kthread_queue_delayed_work(struct kthread_worker *worker, 902 struct kthread_delayed_work *dwork, 903 unsigned long delay) 904 { 905 struct kthread_work *work = &dwork->work; 906 unsigned long flags; 907 bool ret = false; 908 909 spin_lock_irqsave(&worker->lock, flags); 910 911 if (!queuing_blocked(worker, work)) { 912 __kthread_queue_delayed_work(worker, dwork, delay); 913 ret = true; 914 } 915 916 spin_unlock_irqrestore(&worker->lock, flags); 917 return ret; 918 } 919 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); 920 921 struct kthread_flush_work { 922 struct kthread_work work; 923 struct completion done; 924 }; 925 926 static void kthread_flush_work_fn(struct kthread_work *work) 927 { 928 struct kthread_flush_work *fwork = 929 container_of(work, struct kthread_flush_work, work); 930 complete(&fwork->done); 931 } 932 933 /** 934 * kthread_flush_work - flush a kthread_work 935 * @work: work to flush 936 * 937 * If @work is queued or executing, wait for it to finish execution. 938 */ 939 void kthread_flush_work(struct kthread_work *work) 940 { 941 struct kthread_flush_work fwork = { 942 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 943 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 944 }; 945 struct kthread_worker *worker; 946 bool noop = false; 947 948 worker = work->worker; 949 if (!worker) 950 return; 951 952 spin_lock_irq(&worker->lock); 953 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 954 WARN_ON_ONCE(work->worker != worker); 955 956 if (!list_empty(&work->node)) 957 kthread_insert_work(worker, &fwork.work, work->node.next); 958 else if (worker->current_work == work) 959 kthread_insert_work(worker, &fwork.work, 960 worker->work_list.next); 961 else 962 noop = true; 963 964 spin_unlock_irq(&worker->lock); 965 966 if (!noop) 967 wait_for_completion(&fwork.done); 968 } 969 EXPORT_SYMBOL_GPL(kthread_flush_work); 970 971 /* 972 * This function removes the work from the worker queue. Also it makes sure 973 * that it won't get queued later via the delayed work's timer. 974 * 975 * The work might still be in use when this function finishes. See the 976 * current_work proceed by the worker. 977 * 978 * Return: %true if @work was pending and successfully canceled, 979 * %false if @work was not pending 980 */ 981 static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, 982 unsigned long *flags) 983 { 984 /* Try to cancel the timer if exists. */ 985 if (is_dwork) { 986 struct kthread_delayed_work *dwork = 987 container_of(work, struct kthread_delayed_work, work); 988 struct kthread_worker *worker = work->worker; 989 990 /* 991 * del_timer_sync() must be called to make sure that the timer 992 * callback is not running. The lock must be temporary released 993 * to avoid a deadlock with the callback. In the meantime, 994 * any queuing is blocked by setting the canceling counter. 995 */ 996 work->canceling++; 997 spin_unlock_irqrestore(&worker->lock, *flags); 998 del_timer_sync(&dwork->timer); 999 spin_lock_irqsave(&worker->lock, *flags); 1000 work->canceling--; 1001 } 1002 1003 /* 1004 * Try to remove the work from a worker list. It might either 1005 * be from worker->work_list or from worker->delayed_work_list. 1006 */ 1007 if (!list_empty(&work->node)) { 1008 list_del_init(&work->node); 1009 return true; 1010 } 1011 1012 return false; 1013 } 1014 1015 /** 1016 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work 1017 * @worker: kthread worker to use 1018 * @dwork: kthread delayed work to queue 1019 * @delay: number of jiffies to wait before queuing 1020 * 1021 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, 1022 * modify @dwork's timer so that it expires after @delay. If @delay is zero, 1023 * @work is guaranteed to be queued immediately. 1024 * 1025 * Return: %true if @dwork was pending and its timer was modified, 1026 * %false otherwise. 1027 * 1028 * A special case is when the work is being canceled in parallel. 1029 * It might be caused either by the real kthread_cancel_delayed_work_sync() 1030 * or yet another kthread_mod_delayed_work() call. We let the other command 1031 * win and return %false here. The caller is supposed to synchronize these 1032 * operations a reasonable way. 1033 * 1034 * This function is safe to call from any context including IRQ handler. 1035 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() 1036 * for details. 1037 */ 1038 bool kthread_mod_delayed_work(struct kthread_worker *worker, 1039 struct kthread_delayed_work *dwork, 1040 unsigned long delay) 1041 { 1042 struct kthread_work *work = &dwork->work; 1043 unsigned long flags; 1044 int ret = false; 1045 1046 spin_lock_irqsave(&worker->lock, flags); 1047 1048 /* Do not bother with canceling when never queued. */ 1049 if (!work->worker) 1050 goto fast_queue; 1051 1052 /* Work must not be used with >1 worker, see kthread_queue_work() */ 1053 WARN_ON_ONCE(work->worker != worker); 1054 1055 /* Do not fight with another command that is canceling this work. */ 1056 if (work->canceling) 1057 goto out; 1058 1059 ret = __kthread_cancel_work(work, true, &flags); 1060 fast_queue: 1061 __kthread_queue_delayed_work(worker, dwork, delay); 1062 out: 1063 spin_unlock_irqrestore(&worker->lock, flags); 1064 return ret; 1065 } 1066 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); 1067 1068 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) 1069 { 1070 struct kthread_worker *worker = work->worker; 1071 unsigned long flags; 1072 int ret = false; 1073 1074 if (!worker) 1075 goto out; 1076 1077 spin_lock_irqsave(&worker->lock, flags); 1078 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1079 WARN_ON_ONCE(work->worker != worker); 1080 1081 ret = __kthread_cancel_work(work, is_dwork, &flags); 1082 1083 if (worker->current_work != work) 1084 goto out_fast; 1085 1086 /* 1087 * The work is in progress and we need to wait with the lock released. 1088 * In the meantime, block any queuing by setting the canceling counter. 1089 */ 1090 work->canceling++; 1091 spin_unlock_irqrestore(&worker->lock, flags); 1092 kthread_flush_work(work); 1093 spin_lock_irqsave(&worker->lock, flags); 1094 work->canceling--; 1095 1096 out_fast: 1097 spin_unlock_irqrestore(&worker->lock, flags); 1098 out: 1099 return ret; 1100 } 1101 1102 /** 1103 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish 1104 * @work: the kthread work to cancel 1105 * 1106 * Cancel @work and wait for its execution to finish. This function 1107 * can be used even if the work re-queues itself. On return from this 1108 * function, @work is guaranteed to be not pending or executing on any CPU. 1109 * 1110 * kthread_cancel_work_sync(&delayed_work->work) must not be used for 1111 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. 1112 * 1113 * The caller must ensure that the worker on which @work was last 1114 * queued can't be destroyed before this function returns. 1115 * 1116 * Return: %true if @work was pending, %false otherwise. 1117 */ 1118 bool kthread_cancel_work_sync(struct kthread_work *work) 1119 { 1120 return __kthread_cancel_work_sync(work, false); 1121 } 1122 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync); 1123 1124 /** 1125 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and 1126 * wait for it to finish. 1127 * @dwork: the kthread delayed work to cancel 1128 * 1129 * This is kthread_cancel_work_sync() for delayed works. 1130 * 1131 * Return: %true if @dwork was pending, %false otherwise. 1132 */ 1133 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) 1134 { 1135 return __kthread_cancel_work_sync(&dwork->work, true); 1136 } 1137 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); 1138 1139 /** 1140 * kthread_flush_worker - flush all current works on a kthread_worker 1141 * @worker: worker to flush 1142 * 1143 * Wait until all currently executing or pending works on @worker are 1144 * finished. 1145 */ 1146 void kthread_flush_worker(struct kthread_worker *worker) 1147 { 1148 struct kthread_flush_work fwork = { 1149 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1150 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1151 }; 1152 1153 kthread_queue_work(worker, &fwork.work); 1154 wait_for_completion(&fwork.done); 1155 } 1156 EXPORT_SYMBOL_GPL(kthread_flush_worker); 1157 1158 /** 1159 * kthread_destroy_worker - destroy a kthread worker 1160 * @worker: worker to be destroyed 1161 * 1162 * Flush and destroy @worker. The simple flush is enough because the kthread 1163 * worker API is used only in trivial scenarios. There are no multi-step state 1164 * machines needed. 1165 */ 1166 void kthread_destroy_worker(struct kthread_worker *worker) 1167 { 1168 struct task_struct *task; 1169 1170 task = worker->task; 1171 if (WARN_ON(!task)) 1172 return; 1173 1174 kthread_flush_worker(worker); 1175 kthread_stop(task); 1176 WARN_ON(!list_empty(&worker->work_list)); 1177 kfree(worker); 1178 } 1179 EXPORT_SYMBOL(kthread_destroy_worker); 1180 1181 #ifdef CONFIG_BLK_CGROUP 1182 /** 1183 * kthread_associate_blkcg - associate blkcg to current kthread 1184 * @css: the cgroup info 1185 * 1186 * Current thread must be a kthread. The thread is running jobs on behalf of 1187 * other threads. In some cases, we expect the jobs attach cgroup info of 1188 * original threads instead of that of current thread. This function stores 1189 * original thread's cgroup info in current kthread context for later 1190 * retrieval. 1191 */ 1192 void kthread_associate_blkcg(struct cgroup_subsys_state *css) 1193 { 1194 struct kthread *kthread; 1195 1196 if (!(current->flags & PF_KTHREAD)) 1197 return; 1198 kthread = to_kthread(current); 1199 if (!kthread) 1200 return; 1201 1202 if (kthread->blkcg_css) { 1203 css_put(kthread->blkcg_css); 1204 kthread->blkcg_css = NULL; 1205 } 1206 if (css) { 1207 css_get(css); 1208 kthread->blkcg_css = css; 1209 } 1210 } 1211 EXPORT_SYMBOL(kthread_associate_blkcg); 1212 1213 /** 1214 * kthread_blkcg - get associated blkcg css of current kthread 1215 * 1216 * Current thread must be a kthread. 1217 */ 1218 struct cgroup_subsys_state *kthread_blkcg(void) 1219 { 1220 struct kthread *kthread; 1221 1222 if (current->flags & PF_KTHREAD) { 1223 kthread = to_kthread(current); 1224 if (kthread) 1225 return kthread->blkcg_css; 1226 } 1227 return NULL; 1228 } 1229 EXPORT_SYMBOL(kthread_blkcg); 1230 #endif 1231