Lines Matching refs:worker
938 void __kthread_init_worker(struct kthread_worker *worker, in __kthread_init_worker() argument
942 memset(worker, 0, sizeof(struct kthread_worker)); in __kthread_init_worker()
943 raw_spin_lock_init(&worker->lock); in __kthread_init_worker()
944 lockdep_set_class_and_name(&worker->lock, key, name); in __kthread_init_worker()
945 INIT_LIST_HEAD(&worker->work_list); in __kthread_init_worker()
946 INIT_LIST_HEAD(&worker->delayed_work_list); in __kthread_init_worker()
967 struct kthread_worker *worker = worker_ptr; in kthread_worker_fn() local
974 WARN_ON(worker->task && worker->task != current); in kthread_worker_fn()
975 worker->task = current; in kthread_worker_fn()
977 if (worker->flags & KTW_FREEZABLE) in kthread_worker_fn()
985 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
986 worker->task = NULL; in kthread_worker_fn()
987 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
992 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
993 if (!list_empty(&worker->work_list)) { in kthread_worker_fn()
994 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
998 worker->current_work = work; in kthread_worker_fn()
999 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
1032 struct kthread_worker *worker; in __kthread_create_worker_on_node() local
1035 worker = kzalloc(sizeof(*worker), GFP_KERNEL); in __kthread_create_worker_on_node()
1036 if (!worker) in __kthread_create_worker_on_node()
1039 kthread_init_worker(worker); in __kthread_create_worker_on_node()
1041 task = __kthread_create_on_node(kthread_worker_fn, worker, in __kthread_create_worker_on_node()
1046 worker->flags = flags; in __kthread_create_worker_on_node()
1047 worker->task = task; in __kthread_create_worker_on_node()
1049 return worker; in __kthread_create_worker_on_node()
1052 kfree(worker); in __kthread_create_worker_on_node()
1069 struct kthread_worker *worker; in kthread_create_worker_on_node() local
1073 worker = __kthread_create_worker_on_node(flags, node, namefmt, args); in kthread_create_worker_on_node()
1076 return worker; in kthread_create_worker_on_node()
1120 struct kthread_worker *worker; in kthread_create_worker_on_cpu() local
1122 worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu); in kthread_create_worker_on_cpu()
1123 if (!IS_ERR(worker)) in kthread_create_worker_on_cpu()
1124 kthread_bind(worker->task, cpu); in kthread_create_worker_on_cpu()
1126 return worker; in kthread_create_worker_on_cpu()
1135 static inline bool queuing_blocked(struct kthread_worker *worker, in queuing_blocked() argument
1138 lockdep_assert_held(&worker->lock); in queuing_blocked()
1143 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, in kthread_insert_work_sanity_check() argument
1146 lockdep_assert_held(&worker->lock); in kthread_insert_work_sanity_check()
1149 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
1153 static void kthread_insert_work(struct kthread_worker *worker, in kthread_insert_work() argument
1157 kthread_insert_work_sanity_check(worker, work); in kthread_insert_work()
1159 trace_sched_kthread_work_queue_work(worker, work); in kthread_insert_work()
1162 work->worker = worker; in kthread_insert_work()
1163 if (!worker->current_work && likely(worker->task)) in kthread_insert_work()
1164 wake_up_process(worker->task); in kthread_insert_work()
1179 bool kthread_queue_work(struct kthread_worker *worker, in kthread_queue_work() argument
1185 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_work()
1186 if (!queuing_blocked(worker, work)) { in kthread_queue_work()
1187 kthread_insert_work(worker, work, &worker->work_list); in kthread_queue_work()
1190 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_work()
1208 struct kthread_worker *worker = work->worker; in kthread_delayed_work_timer_fn() local
1215 if (WARN_ON_ONCE(!worker)) in kthread_delayed_work_timer_fn()
1218 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_delayed_work_timer_fn()
1220 WARN_ON_ONCE(work->worker != worker); in kthread_delayed_work_timer_fn()
1226 kthread_insert_work(worker, work, &worker->work_list); in kthread_delayed_work_timer_fn()
1228 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_delayed_work_timer_fn()
1232 static void __kthread_queue_delayed_work(struct kthread_worker *worker, in __kthread_queue_delayed_work() argument
1248 kthread_insert_work(worker, work, &worker->work_list); in __kthread_queue_delayed_work()
1253 kthread_insert_work_sanity_check(worker, work); in __kthread_queue_delayed_work()
1255 list_add(&work->node, &worker->delayed_work_list); in __kthread_queue_delayed_work()
1256 work->worker = worker; in __kthread_queue_delayed_work()
1276 bool kthread_queue_delayed_work(struct kthread_worker *worker, in kthread_queue_delayed_work() argument
1284 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_delayed_work()
1286 if (!queuing_blocked(worker, work)) { in kthread_queue_delayed_work()
1287 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_queue_delayed_work()
1291 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_delayed_work()
1320 struct kthread_worker *worker; in kthread_flush_work() local
1323 worker = work->worker; in kthread_flush_work()
1324 if (!worker) in kthread_flush_work()
1327 raw_spin_lock_irq(&worker->lock); in kthread_flush_work()
1329 WARN_ON_ONCE(work->worker != worker); in kthread_flush_work()
1332 kthread_insert_work(worker, &fwork.work, work->node.next); in kthread_flush_work()
1333 else if (worker->current_work == work) in kthread_flush_work()
1334 kthread_insert_work(worker, &fwork.work, in kthread_flush_work()
1335 worker->work_list.next); in kthread_flush_work()
1339 raw_spin_unlock_irq(&worker->lock); in kthread_flush_work()
1358 struct kthread_worker *worker = work->worker; in kthread_cancel_delayed_work_timer() local
1367 raw_spin_unlock_irqrestore(&worker->lock, *flags); in kthread_cancel_delayed_work_timer()
1369 raw_spin_lock_irqsave(&worker->lock, *flags); in kthread_cancel_delayed_work_timer()
1423 bool kthread_mod_delayed_work(struct kthread_worker *worker, in kthread_mod_delayed_work() argument
1431 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_mod_delayed_work()
1434 if (!work->worker) { in kthread_mod_delayed_work()
1440 WARN_ON_ONCE(work->worker != worker); in kthread_mod_delayed_work()
1463 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_mod_delayed_work()
1465 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_mod_delayed_work()
1472 struct kthread_worker *worker = work->worker; in __kthread_cancel_work_sync() local
1476 if (!worker) in __kthread_cancel_work_sync()
1479 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1481 WARN_ON_ONCE(work->worker != worker); in __kthread_cancel_work_sync()
1488 if (worker->current_work != work) in __kthread_cancel_work_sync()
1496 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1498 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1502 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1551 void kthread_flush_worker(struct kthread_worker *worker) in kthread_flush_worker() argument
1558 kthread_queue_work(worker, &fwork.work); in kthread_flush_worker()
1575 void kthread_destroy_worker(struct kthread_worker *worker) in kthread_destroy_worker() argument
1579 task = worker->task; in kthread_destroy_worker()
1583 kthread_flush_worker(worker); in kthread_destroy_worker()
1585 WARN_ON(!list_empty(&worker->delayed_work_list)); in kthread_destroy_worker()
1586 WARN_ON(!list_empty(&worker->work_list)); in kthread_destroy_worker()
1587 kfree(worker); in kthread_destroy_worker()