1 /* Kernel thread helper functions. 2 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 3 * 4 * Creation is done via kthreadd, so that we get a clean environment 5 * even if we're invoked from userspace (think modprobe, hotplug cpu, 6 * etc.). 7 */ 8 #include <linux/sched.h> 9 #include <linux/kthread.h> 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/cpuset.h> 13 #include <linux/unistd.h> 14 #include <linux/file.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/slab.h> 18 #include <linux/freezer.h> 19 #include <trace/events/sched.h> 20 21 static DEFINE_SPINLOCK(kthread_create_lock); 22 static LIST_HEAD(kthread_create_list); 23 struct task_struct *kthreadd_task; 24 25 struct kthread_create_info 26 { 27 /* Information passed to kthread() from kthreadd. */ 28 int (*threadfn)(void *data); 29 void *data; 30 int node; 31 32 /* Result passed back to kthread_create() from kthreadd. */ 33 struct task_struct *result; 34 struct completion done; 35 36 struct list_head list; 37 }; 38 39 struct kthread { 40 int should_stop; 41 void *data; 42 struct completion exited; 43 }; 44 45 #define to_kthread(tsk) \ 46 container_of((tsk)->vfork_done, struct kthread, exited) 47 48 /** 49 * kthread_should_stop - should this kthread return now? 50 * 51 * When someone calls kthread_stop() on your kthread, it will be woken 52 * and this will return true. You should then return, and your return 53 * value will be passed through to kthread_stop(). 54 */ 55 int kthread_should_stop(void) 56 { 57 return to_kthread(current)->should_stop; 58 } 59 EXPORT_SYMBOL(kthread_should_stop); 60 61 /** 62 * kthread_data - return data value specified on kthread creation 63 * @task: kthread task in question 64 * 65 * Return the data value specified when kthread @task was created. 66 * The caller is responsible for ensuring the validity of @task when 67 * calling this function. 68 */ 69 void *kthread_data(struct task_struct *task) 70 { 71 return to_kthread(task)->data; 72 } 73 74 static int kthread(void *_create) 75 { 76 /* Copy data: it's on kthread's stack */ 77 struct kthread_create_info *create = _create; 78 int (*threadfn)(void *data) = create->threadfn; 79 void *data = create->data; 80 struct kthread self; 81 int ret; 82 83 self.should_stop = 0; 84 self.data = data; 85 init_completion(&self.exited); 86 current->vfork_done = &self.exited; 87 88 /* OK, tell user we're spawned, wait for stop or wakeup */ 89 __set_current_state(TASK_UNINTERRUPTIBLE); 90 create->result = current; 91 complete(&create->done); 92 schedule(); 93 94 ret = -EINTR; 95 if (!self.should_stop) 96 ret = threadfn(data); 97 98 /* we can't just return, we must preserve "self" on stack */ 99 do_exit(ret); 100 } 101 102 /* called from do_fork() to get node information for about to be created task */ 103 int tsk_fork_get_node(struct task_struct *tsk) 104 { 105 #ifdef CONFIG_NUMA 106 if (tsk == kthreadd_task) 107 return tsk->pref_node_fork; 108 #endif 109 return numa_node_id(); 110 } 111 112 static void create_kthread(struct kthread_create_info *create) 113 { 114 int pid; 115 116 #ifdef CONFIG_NUMA 117 current->pref_node_fork = create->node; 118 #endif 119 /* We want our own signal handler (we take no signals by default). */ 120 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 121 if (pid < 0) { 122 create->result = ERR_PTR(pid); 123 complete(&create->done); 124 } 125 } 126 127 /** 128 * kthread_create_on_node - create a kthread. 129 * @threadfn: the function to run until signal_pending(current). 130 * @data: data ptr for @threadfn. 131 * @node: memory node number. 132 * @namefmt: printf-style name for the thread. 133 * 134 * Description: This helper function creates and names a kernel 135 * thread. The thread will be stopped: use wake_up_process() to start 136 * it. See also kthread_run(). 137 * 138 * If thread is going to be bound on a particular cpu, give its node 139 * in @node, to get NUMA affinity for kthread stack, or else give -1. 140 * When woken, the thread will run @threadfn() with @data as its 141 * argument. @threadfn() can either call do_exit() directly if it is a 142 * standalone thread for which no one will call kthread_stop(), or 143 * return when 'kthread_should_stop()' is true (which means 144 * kthread_stop() has been called). The return value should be zero 145 * or a negative error number; it will be passed to kthread_stop(). 146 * 147 * Returns a task_struct or ERR_PTR(-ENOMEM). 148 */ 149 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 150 void *data, 151 int node, 152 const char namefmt[], 153 ...) 154 { 155 struct kthread_create_info create; 156 157 create.threadfn = threadfn; 158 create.data = data; 159 create.node = node; 160 init_completion(&create.done); 161 162 spin_lock(&kthread_create_lock); 163 list_add_tail(&create.list, &kthread_create_list); 164 spin_unlock(&kthread_create_lock); 165 166 wake_up_process(kthreadd_task); 167 wait_for_completion(&create.done); 168 169 if (!IS_ERR(create.result)) { 170 static const struct sched_param param = { .sched_priority = 0 }; 171 va_list args; 172 173 va_start(args, namefmt); 174 vsnprintf(create.result->comm, sizeof(create.result->comm), 175 namefmt, args); 176 va_end(args); 177 /* 178 * root may have changed our (kthreadd's) priority or CPU mask. 179 * The kernel thread should not inherit these properties. 180 */ 181 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); 182 set_cpus_allowed_ptr(create.result, cpu_all_mask); 183 } 184 return create.result; 185 } 186 EXPORT_SYMBOL(kthread_create_on_node); 187 188 /** 189 * kthread_bind - bind a just-created kthread to a cpu. 190 * @p: thread created by kthread_create(). 191 * @cpu: cpu (might not be online, must be possible) for @k to run on. 192 * 193 * Description: This function is equivalent to set_cpus_allowed(), 194 * except that @cpu doesn't need to be online, and the thread must be 195 * stopped (i.e., just returned from kthread_create()). 196 */ 197 void kthread_bind(struct task_struct *p, unsigned int cpu) 198 { 199 /* Must have done schedule() in kthread() before we set_task_cpu */ 200 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { 201 WARN_ON(1); 202 return; 203 } 204 205 /* It's safe because the task is inactive. */ 206 do_set_cpus_allowed(p, cpumask_of(cpu)); 207 p->flags |= PF_THREAD_BOUND; 208 } 209 EXPORT_SYMBOL(kthread_bind); 210 211 /** 212 * kthread_stop - stop a thread created by kthread_create(). 213 * @k: thread created by kthread_create(). 214 * 215 * Sets kthread_should_stop() for @k to return true, wakes it, and 216 * waits for it to exit. This can also be called after kthread_create() 217 * instead of calling wake_up_process(): the thread will exit without 218 * calling threadfn(). 219 * 220 * If threadfn() may call do_exit() itself, the caller must ensure 221 * task_struct can't go away. 222 * 223 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 224 * was never called. 225 */ 226 int kthread_stop(struct task_struct *k) 227 { 228 struct kthread *kthread; 229 int ret; 230 231 trace_sched_kthread_stop(k); 232 get_task_struct(k); 233 234 kthread = to_kthread(k); 235 barrier(); /* it might have exited */ 236 if (k->vfork_done != NULL) { 237 kthread->should_stop = 1; 238 wake_up_process(k); 239 wait_for_completion(&kthread->exited); 240 } 241 ret = k->exit_code; 242 243 put_task_struct(k); 244 trace_sched_kthread_stop_ret(ret); 245 246 return ret; 247 } 248 EXPORT_SYMBOL(kthread_stop); 249 250 int kthreadd(void *unused) 251 { 252 struct task_struct *tsk = current; 253 254 /* Setup a clean context for our children to inherit. */ 255 set_task_comm(tsk, "kthreadd"); 256 ignore_signals(tsk); 257 set_cpus_allowed_ptr(tsk, cpu_all_mask); 258 set_mems_allowed(node_states[N_HIGH_MEMORY]); 259 260 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; 261 262 for (;;) { 263 set_current_state(TASK_INTERRUPTIBLE); 264 if (list_empty(&kthread_create_list)) 265 schedule(); 266 __set_current_state(TASK_RUNNING); 267 268 spin_lock(&kthread_create_lock); 269 while (!list_empty(&kthread_create_list)) { 270 struct kthread_create_info *create; 271 272 create = list_entry(kthread_create_list.next, 273 struct kthread_create_info, list); 274 list_del_init(&create->list); 275 spin_unlock(&kthread_create_lock); 276 277 create_kthread(create); 278 279 spin_lock(&kthread_create_lock); 280 } 281 spin_unlock(&kthread_create_lock); 282 } 283 284 return 0; 285 } 286 287 void __init_kthread_worker(struct kthread_worker *worker, 288 const char *name, 289 struct lock_class_key *key) 290 { 291 spin_lock_init(&worker->lock); 292 lockdep_set_class_and_name(&worker->lock, key, name); 293 INIT_LIST_HEAD(&worker->work_list); 294 worker->task = NULL; 295 } 296 EXPORT_SYMBOL_GPL(__init_kthread_worker); 297 298 /** 299 * kthread_worker_fn - kthread function to process kthread_worker 300 * @worker_ptr: pointer to initialized kthread_worker 301 * 302 * This function can be used as @threadfn to kthread_create() or 303 * kthread_run() with @worker_ptr argument pointing to an initialized 304 * kthread_worker. The started kthread will process work_list until 305 * the it is stopped with kthread_stop(). A kthread can also call 306 * this function directly after extra initialization. 307 * 308 * Different kthreads can be used for the same kthread_worker as long 309 * as there's only one kthread attached to it at any given time. A 310 * kthread_worker without an attached kthread simply collects queued 311 * kthread_works. 312 */ 313 int kthread_worker_fn(void *worker_ptr) 314 { 315 struct kthread_worker *worker = worker_ptr; 316 struct kthread_work *work; 317 318 WARN_ON(worker->task); 319 worker->task = current; 320 repeat: 321 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 322 323 if (kthread_should_stop()) { 324 __set_current_state(TASK_RUNNING); 325 spin_lock_irq(&worker->lock); 326 worker->task = NULL; 327 spin_unlock_irq(&worker->lock); 328 return 0; 329 } 330 331 work = NULL; 332 spin_lock_irq(&worker->lock); 333 if (!list_empty(&worker->work_list)) { 334 work = list_first_entry(&worker->work_list, 335 struct kthread_work, node); 336 list_del_init(&work->node); 337 } 338 spin_unlock_irq(&worker->lock); 339 340 if (work) { 341 __set_current_state(TASK_RUNNING); 342 work->func(work); 343 smp_wmb(); /* wmb worker-b0 paired with flush-b1 */ 344 work->done_seq = work->queue_seq; 345 smp_mb(); /* mb worker-b1 paired with flush-b0 */ 346 if (atomic_read(&work->flushing)) 347 wake_up_all(&work->done); 348 } else if (!freezing(current)) 349 schedule(); 350 351 try_to_freeze(); 352 goto repeat; 353 } 354 EXPORT_SYMBOL_GPL(kthread_worker_fn); 355 356 /** 357 * queue_kthread_work - queue a kthread_work 358 * @worker: target kthread_worker 359 * @work: kthread_work to queue 360 * 361 * Queue @work to work processor @task for async execution. @task 362 * must have been created with kthread_worker_create(). Returns %true 363 * if @work was successfully queued, %false if it was already pending. 364 */ 365 bool queue_kthread_work(struct kthread_worker *worker, 366 struct kthread_work *work) 367 { 368 bool ret = false; 369 unsigned long flags; 370 371 spin_lock_irqsave(&worker->lock, flags); 372 if (list_empty(&work->node)) { 373 list_add_tail(&work->node, &worker->work_list); 374 work->queue_seq++; 375 if (likely(worker->task)) 376 wake_up_process(worker->task); 377 ret = true; 378 } 379 spin_unlock_irqrestore(&worker->lock, flags); 380 return ret; 381 } 382 EXPORT_SYMBOL_GPL(queue_kthread_work); 383 384 /** 385 * flush_kthread_work - flush a kthread_work 386 * @work: work to flush 387 * 388 * If @work is queued or executing, wait for it to finish execution. 389 */ 390 void flush_kthread_work(struct kthread_work *work) 391 { 392 int seq = work->queue_seq; 393 394 atomic_inc(&work->flushing); 395 396 /* 397 * mb flush-b0 paired with worker-b1, to make sure either 398 * worker sees the above increment or we see done_seq update. 399 */ 400 smp_mb__after_atomic_inc(); 401 402 /* A - B <= 0 tests whether B is in front of A regardless of overflow */ 403 wait_event(work->done, seq - work->done_seq <= 0); 404 atomic_dec(&work->flushing); 405 406 /* 407 * rmb flush-b1 paired with worker-b0, to make sure our caller 408 * sees every change made by work->func(). 409 */ 410 smp_mb__after_atomic_dec(); 411 } 412 EXPORT_SYMBOL_GPL(flush_kthread_work); 413 414 struct kthread_flush_work { 415 struct kthread_work work; 416 struct completion done; 417 }; 418 419 static void kthread_flush_work_fn(struct kthread_work *work) 420 { 421 struct kthread_flush_work *fwork = 422 container_of(work, struct kthread_flush_work, work); 423 complete(&fwork->done); 424 } 425 426 /** 427 * flush_kthread_worker - flush all current works on a kthread_worker 428 * @worker: worker to flush 429 * 430 * Wait until all currently executing or pending works on @worker are 431 * finished. 432 */ 433 void flush_kthread_worker(struct kthread_worker *worker) 434 { 435 struct kthread_flush_work fwork = { 436 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 437 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 438 }; 439 440 queue_kthread_work(worker, &fwork.work); 441 wait_for_completion(&fwork.done); 442 } 443 EXPORT_SYMBOL_GPL(flush_kthread_worker); 444