1 /* Kernel thread helper functions. 2 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 3 * 4 * Creation is done via kthreadd, so that we get a clean environment 5 * even if we're invoked from userspace (think modprobe, hotplug cpu, 6 * etc.). 7 */ 8 #include <linux/sched.h> 9 #include <linux/kthread.h> 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/cpuset.h> 13 #include <linux/unistd.h> 14 #include <linux/file.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/slab.h> 18 #include <linux/freezer.h> 19 #include <trace/events/sched.h> 20 21 static DEFINE_SPINLOCK(kthread_create_lock); 22 static LIST_HEAD(kthread_create_list); 23 struct task_struct *kthreadd_task; 24 25 struct kthread_create_info 26 { 27 /* Information passed to kthread() from kthreadd. */ 28 int (*threadfn)(void *data); 29 void *data; 30 int node; 31 32 /* Result passed back to kthread_create() from kthreadd. */ 33 struct task_struct *result; 34 struct completion done; 35 36 struct list_head list; 37 }; 38 39 struct kthread { 40 unsigned long flags; 41 unsigned int cpu; 42 void *data; 43 struct completion parked; 44 struct completion exited; 45 }; 46 47 enum KTHREAD_BITS { 48 KTHREAD_IS_PER_CPU = 0, 49 KTHREAD_SHOULD_STOP, 50 KTHREAD_SHOULD_PARK, 51 KTHREAD_IS_PARKED, 52 }; 53 54 #define to_kthread(tsk) \ 55 container_of((tsk)->vfork_done, struct kthread, exited) 56 57 /** 58 * kthread_should_stop - should this kthread return now? 59 * 60 * When someone calls kthread_stop() on your kthread, it will be woken 61 * and this will return true. You should then return, and your return 62 * value will be passed through to kthread_stop(). 63 */ 64 bool kthread_should_stop(void) 65 { 66 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 67 } 68 EXPORT_SYMBOL(kthread_should_stop); 69 70 /** 71 * kthread_should_park - should this kthread park now? 72 * 73 * When someone calls kthread_park() on your kthread, it will be woken 74 * and this will return true. You should then do the necessary 75 * cleanup and call kthread_parkme() 76 * 77 * Similar to kthread_should_stop(), but this keeps the thread alive 78 * and in a park position. kthread_unpark() "restarts" the thread and 79 * calls the thread function again. 80 */ 81 bool kthread_should_park(void) 82 { 83 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 84 } 85 86 /** 87 * kthread_freezable_should_stop - should this freezable kthread return now? 88 * @was_frozen: optional out parameter, indicates whether %current was frozen 89 * 90 * kthread_should_stop() for freezable kthreads, which will enter 91 * refrigerator if necessary. This function is safe from kthread_stop() / 92 * freezer deadlock and freezable kthreads should use this function instead 93 * of calling try_to_freeze() directly. 94 */ 95 bool kthread_freezable_should_stop(bool *was_frozen) 96 { 97 bool frozen = false; 98 99 might_sleep(); 100 101 if (unlikely(freezing(current))) 102 frozen = __refrigerator(true); 103 104 if (was_frozen) 105 *was_frozen = frozen; 106 107 return kthread_should_stop(); 108 } 109 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 110 111 /** 112 * kthread_data - return data value specified on kthread creation 113 * @task: kthread task in question 114 * 115 * Return the data value specified when kthread @task was created. 116 * The caller is responsible for ensuring the validity of @task when 117 * calling this function. 118 */ 119 void *kthread_data(struct task_struct *task) 120 { 121 return to_kthread(task)->data; 122 } 123 124 static void __kthread_parkme(struct kthread *self) 125 { 126 __set_current_state(TASK_INTERRUPTIBLE); 127 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { 128 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) 129 complete(&self->parked); 130 schedule(); 131 __set_current_state(TASK_INTERRUPTIBLE); 132 } 133 clear_bit(KTHREAD_IS_PARKED, &self->flags); 134 __set_current_state(TASK_RUNNING); 135 } 136 137 void kthread_parkme(void) 138 { 139 __kthread_parkme(to_kthread(current)); 140 } 141 142 static int kthread(void *_create) 143 { 144 /* Copy data: it's on kthread's stack */ 145 struct kthread_create_info *create = _create; 146 int (*threadfn)(void *data) = create->threadfn; 147 void *data = create->data; 148 struct kthread self; 149 int ret; 150 151 self.flags = 0; 152 self.data = data; 153 init_completion(&self.exited); 154 init_completion(&self.parked); 155 current->vfork_done = &self.exited; 156 157 /* OK, tell user we're spawned, wait for stop or wakeup */ 158 __set_current_state(TASK_UNINTERRUPTIBLE); 159 create->result = current; 160 complete(&create->done); 161 schedule(); 162 163 ret = -EINTR; 164 165 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) { 166 __kthread_parkme(&self); 167 ret = threadfn(data); 168 } 169 /* we can't just return, we must preserve "self" on stack */ 170 do_exit(ret); 171 } 172 173 /* called from do_fork() to get node information for about to be created task */ 174 int tsk_fork_get_node(struct task_struct *tsk) 175 { 176 #ifdef CONFIG_NUMA 177 if (tsk == kthreadd_task) 178 return tsk->pref_node_fork; 179 #endif 180 return numa_node_id(); 181 } 182 183 static void create_kthread(struct kthread_create_info *create) 184 { 185 int pid; 186 187 #ifdef CONFIG_NUMA 188 current->pref_node_fork = create->node; 189 #endif 190 /* We want our own signal handler (we take no signals by default). */ 191 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 192 if (pid < 0) { 193 create->result = ERR_PTR(pid); 194 complete(&create->done); 195 } 196 } 197 198 /** 199 * kthread_create_on_node - create a kthread. 200 * @threadfn: the function to run until signal_pending(current). 201 * @data: data ptr for @threadfn. 202 * @node: memory node number. 203 * @namefmt: printf-style name for the thread. 204 * 205 * Description: This helper function creates and names a kernel 206 * thread. The thread will be stopped: use wake_up_process() to start 207 * it. See also kthread_run(). 208 * 209 * If thread is going to be bound on a particular cpu, give its node 210 * in @node, to get NUMA affinity for kthread stack, or else give -1. 211 * When woken, the thread will run @threadfn() with @data as its 212 * argument. @threadfn() can either call do_exit() directly if it is a 213 * standalone thread for which no one will call kthread_stop(), or 214 * return when 'kthread_should_stop()' is true (which means 215 * kthread_stop() has been called). The return value should be zero 216 * or a negative error number; it will be passed to kthread_stop(). 217 * 218 * Returns a task_struct or ERR_PTR(-ENOMEM). 219 */ 220 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 221 void *data, int node, 222 const char namefmt[], 223 ...) 224 { 225 struct kthread_create_info create; 226 227 create.threadfn = threadfn; 228 create.data = data; 229 create.node = node; 230 init_completion(&create.done); 231 232 spin_lock(&kthread_create_lock); 233 list_add_tail(&create.list, &kthread_create_list); 234 spin_unlock(&kthread_create_lock); 235 236 wake_up_process(kthreadd_task); 237 wait_for_completion(&create.done); 238 239 if (!IS_ERR(create.result)) { 240 static const struct sched_param param = { .sched_priority = 0 }; 241 va_list args; 242 243 va_start(args, namefmt); 244 vsnprintf(create.result->comm, sizeof(create.result->comm), 245 namefmt, args); 246 va_end(args); 247 /* 248 * root may have changed our (kthreadd's) priority or CPU mask. 249 * The kernel thread should not inherit these properties. 250 */ 251 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); 252 set_cpus_allowed_ptr(create.result, cpu_all_mask); 253 } 254 return create.result; 255 } 256 EXPORT_SYMBOL(kthread_create_on_node); 257 258 static void __kthread_bind(struct task_struct *p, unsigned int cpu) 259 { 260 /* It's safe because the task is inactive. */ 261 do_set_cpus_allowed(p, cpumask_of(cpu)); 262 p->flags |= PF_THREAD_BOUND; 263 } 264 265 /** 266 * kthread_bind - bind a just-created kthread to a cpu. 267 * @p: thread created by kthread_create(). 268 * @cpu: cpu (might not be online, must be possible) for @k to run on. 269 * 270 * Description: This function is equivalent to set_cpus_allowed(), 271 * except that @cpu doesn't need to be online, and the thread must be 272 * stopped (i.e., just returned from kthread_create()). 273 */ 274 void kthread_bind(struct task_struct *p, unsigned int cpu) 275 { 276 /* Must have done schedule() in kthread() before we set_task_cpu */ 277 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { 278 WARN_ON(1); 279 return; 280 } 281 __kthread_bind(p, cpu); 282 } 283 EXPORT_SYMBOL(kthread_bind); 284 285 /** 286 * kthread_create_on_cpu - Create a cpu bound kthread 287 * @threadfn: the function to run until signal_pending(current). 288 * @data: data ptr for @threadfn. 289 * @cpu: The cpu on which the thread should be bound, 290 * @namefmt: printf-style name for the thread. Format is restricted 291 * to "name.*%u". Code fills in cpu number. 292 * 293 * Description: This helper function creates and names a kernel thread 294 * The thread will be woken and put into park mode. 295 */ 296 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 297 void *data, unsigned int cpu, 298 const char *namefmt) 299 { 300 struct task_struct *p; 301 302 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 303 cpu); 304 if (IS_ERR(p)) 305 return p; 306 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); 307 to_kthread(p)->cpu = cpu; 308 /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */ 309 kthread_park(p); 310 return p; 311 } 312 313 static struct kthread *task_get_live_kthread(struct task_struct *k) 314 { 315 struct kthread *kthread; 316 317 get_task_struct(k); 318 kthread = to_kthread(k); 319 /* It might have exited */ 320 barrier(); 321 if (k->vfork_done != NULL) 322 return kthread; 323 return NULL; 324 } 325 326 /** 327 * kthread_unpark - unpark a thread created by kthread_create(). 328 * @k: thread created by kthread_create(). 329 * 330 * Sets kthread_should_park() for @k to return false, wakes it, and 331 * waits for it to return. If the thread is marked percpu then its 332 * bound to the cpu again. 333 */ 334 void kthread_unpark(struct task_struct *k) 335 { 336 struct kthread *kthread = task_get_live_kthread(k); 337 338 if (kthread) { 339 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 340 /* 341 * We clear the IS_PARKED bit here as we don't wait 342 * until the task has left the park code. So if we'd 343 * park before that happens we'd see the IS_PARKED bit 344 * which might be about to be cleared. 345 */ 346 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { 347 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 348 __kthread_bind(k, kthread->cpu); 349 wake_up_process(k); 350 } 351 } 352 put_task_struct(k); 353 } 354 355 /** 356 * kthread_park - park a thread created by kthread_create(). 357 * @k: thread created by kthread_create(). 358 * 359 * Sets kthread_should_park() for @k to return true, wakes it, and 360 * waits for it to return. This can also be called after kthread_create() 361 * instead of calling wake_up_process(): the thread will park without 362 * calling threadfn(). 363 * 364 * Returns 0 if the thread is parked, -ENOSYS if the thread exited. 365 * If called by the kthread itself just the park bit is set. 366 */ 367 int kthread_park(struct task_struct *k) 368 { 369 struct kthread *kthread = task_get_live_kthread(k); 370 int ret = -ENOSYS; 371 372 if (kthread) { 373 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { 374 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 375 if (k != current) { 376 wake_up_process(k); 377 wait_for_completion(&kthread->parked); 378 } 379 } 380 ret = 0; 381 } 382 put_task_struct(k); 383 return ret; 384 } 385 386 /** 387 * kthread_stop - stop a thread created by kthread_create(). 388 * @k: thread created by kthread_create(). 389 * 390 * Sets kthread_should_stop() for @k to return true, wakes it, and 391 * waits for it to exit. This can also be called after kthread_create() 392 * instead of calling wake_up_process(): the thread will exit without 393 * calling threadfn(). 394 * 395 * If threadfn() may call do_exit() itself, the caller must ensure 396 * task_struct can't go away. 397 * 398 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 399 * was never called. 400 */ 401 int kthread_stop(struct task_struct *k) 402 { 403 struct kthread *kthread = task_get_live_kthread(k); 404 int ret; 405 406 trace_sched_kthread_stop(k); 407 if (kthread) { 408 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 409 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 410 wake_up_process(k); 411 wait_for_completion(&kthread->exited); 412 } 413 ret = k->exit_code; 414 415 put_task_struct(k); 416 trace_sched_kthread_stop_ret(ret); 417 418 return ret; 419 } 420 EXPORT_SYMBOL(kthread_stop); 421 422 int kthreadd(void *unused) 423 { 424 struct task_struct *tsk = current; 425 426 /* Setup a clean context for our children to inherit. */ 427 set_task_comm(tsk, "kthreadd"); 428 ignore_signals(tsk); 429 set_cpus_allowed_ptr(tsk, cpu_all_mask); 430 set_mems_allowed(node_states[N_HIGH_MEMORY]); 431 432 current->flags |= PF_NOFREEZE; 433 434 for (;;) { 435 set_current_state(TASK_INTERRUPTIBLE); 436 if (list_empty(&kthread_create_list)) 437 schedule(); 438 __set_current_state(TASK_RUNNING); 439 440 spin_lock(&kthread_create_lock); 441 while (!list_empty(&kthread_create_list)) { 442 struct kthread_create_info *create; 443 444 create = list_entry(kthread_create_list.next, 445 struct kthread_create_info, list); 446 list_del_init(&create->list); 447 spin_unlock(&kthread_create_lock); 448 449 create_kthread(create); 450 451 spin_lock(&kthread_create_lock); 452 } 453 spin_unlock(&kthread_create_lock); 454 } 455 456 return 0; 457 } 458 459 void __init_kthread_worker(struct kthread_worker *worker, 460 const char *name, 461 struct lock_class_key *key) 462 { 463 spin_lock_init(&worker->lock); 464 lockdep_set_class_and_name(&worker->lock, key, name); 465 INIT_LIST_HEAD(&worker->work_list); 466 worker->task = NULL; 467 } 468 EXPORT_SYMBOL_GPL(__init_kthread_worker); 469 470 /** 471 * kthread_worker_fn - kthread function to process kthread_worker 472 * @worker_ptr: pointer to initialized kthread_worker 473 * 474 * This function can be used as @threadfn to kthread_create() or 475 * kthread_run() with @worker_ptr argument pointing to an initialized 476 * kthread_worker. The started kthread will process work_list until 477 * the it is stopped with kthread_stop(). A kthread can also call 478 * this function directly after extra initialization. 479 * 480 * Different kthreads can be used for the same kthread_worker as long 481 * as there's only one kthread attached to it at any given time. A 482 * kthread_worker without an attached kthread simply collects queued 483 * kthread_works. 484 */ 485 int kthread_worker_fn(void *worker_ptr) 486 { 487 struct kthread_worker *worker = worker_ptr; 488 struct kthread_work *work; 489 490 WARN_ON(worker->task); 491 worker->task = current; 492 repeat: 493 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 494 495 if (kthread_should_stop()) { 496 __set_current_state(TASK_RUNNING); 497 spin_lock_irq(&worker->lock); 498 worker->task = NULL; 499 spin_unlock_irq(&worker->lock); 500 return 0; 501 } 502 503 work = NULL; 504 spin_lock_irq(&worker->lock); 505 if (!list_empty(&worker->work_list)) { 506 work = list_first_entry(&worker->work_list, 507 struct kthread_work, node); 508 list_del_init(&work->node); 509 } 510 worker->current_work = work; 511 spin_unlock_irq(&worker->lock); 512 513 if (work) { 514 __set_current_state(TASK_RUNNING); 515 work->func(work); 516 } else if (!freezing(current)) 517 schedule(); 518 519 try_to_freeze(); 520 goto repeat; 521 } 522 EXPORT_SYMBOL_GPL(kthread_worker_fn); 523 524 /* insert @work before @pos in @worker */ 525 static void insert_kthread_work(struct kthread_worker *worker, 526 struct kthread_work *work, 527 struct list_head *pos) 528 { 529 lockdep_assert_held(&worker->lock); 530 531 list_add_tail(&work->node, pos); 532 work->worker = worker; 533 if (likely(worker->task)) 534 wake_up_process(worker->task); 535 } 536 537 /** 538 * queue_kthread_work - queue a kthread_work 539 * @worker: target kthread_worker 540 * @work: kthread_work to queue 541 * 542 * Queue @work to work processor @task for async execution. @task 543 * must have been created with kthread_worker_create(). Returns %true 544 * if @work was successfully queued, %false if it was already pending. 545 */ 546 bool queue_kthread_work(struct kthread_worker *worker, 547 struct kthread_work *work) 548 { 549 bool ret = false; 550 unsigned long flags; 551 552 spin_lock_irqsave(&worker->lock, flags); 553 if (list_empty(&work->node)) { 554 insert_kthread_work(worker, work, &worker->work_list); 555 ret = true; 556 } 557 spin_unlock_irqrestore(&worker->lock, flags); 558 return ret; 559 } 560 EXPORT_SYMBOL_GPL(queue_kthread_work); 561 562 struct kthread_flush_work { 563 struct kthread_work work; 564 struct completion done; 565 }; 566 567 static void kthread_flush_work_fn(struct kthread_work *work) 568 { 569 struct kthread_flush_work *fwork = 570 container_of(work, struct kthread_flush_work, work); 571 complete(&fwork->done); 572 } 573 574 /** 575 * flush_kthread_work - flush a kthread_work 576 * @work: work to flush 577 * 578 * If @work is queued or executing, wait for it to finish execution. 579 */ 580 void flush_kthread_work(struct kthread_work *work) 581 { 582 struct kthread_flush_work fwork = { 583 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 584 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 585 }; 586 struct kthread_worker *worker; 587 bool noop = false; 588 589 retry: 590 worker = work->worker; 591 if (!worker) 592 return; 593 594 spin_lock_irq(&worker->lock); 595 if (work->worker != worker) { 596 spin_unlock_irq(&worker->lock); 597 goto retry; 598 } 599 600 if (!list_empty(&work->node)) 601 insert_kthread_work(worker, &fwork.work, work->node.next); 602 else if (worker->current_work == work) 603 insert_kthread_work(worker, &fwork.work, worker->work_list.next); 604 else 605 noop = true; 606 607 spin_unlock_irq(&worker->lock); 608 609 if (!noop) 610 wait_for_completion(&fwork.done); 611 } 612 EXPORT_SYMBOL_GPL(flush_kthread_work); 613 614 /** 615 * flush_kthread_worker - flush all current works on a kthread_worker 616 * @worker: worker to flush 617 * 618 * Wait until all currently executing or pending works on @worker are 619 * finished. 620 */ 621 void flush_kthread_worker(struct kthread_worker *worker) 622 { 623 struct kthread_flush_work fwork = { 624 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 625 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 626 }; 627 628 queue_kthread_work(worker, &fwork.work); 629 wait_for_completion(&fwork.done); 630 } 631 EXPORT_SYMBOL_GPL(flush_kthread_worker); 632