| /linux/kernel/sched/ |
| H A D | completion.c | 153 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); in wait_for_completion() 172 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); in wait_for_completion_timeout() 186 wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); in wait_for_completion_io() 206 return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE); in wait_for_completion_io_timeout()
|
| /linux/include/linux/ |
| H A D | rcupdate_wait.h | 35 #define wait_rcu_gp(...) _wait_rcu_gp(false, TASK_UNINTERRUPTIBLE, __VA_ARGS__) 60 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), TASK_UNINTERRUPTIBLE, __VA_ARGS__)
|
| H A D | wait.h | 330 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 354 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 395 TASK_UNINTERRUPTIBLE, 0, timeout, \ 445 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ 458 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 592 TASK_UNINTERRUPTIBLE); \ 1019 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 1203 TASK_UNINTERRUPTIBLE); \
|
| H A D | swait.h | 183 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ 195 TASK_UNINTERRUPTIBLE, timeout, \
|
| H A D | sched.h | 107 #define TASK_UNINTERRUPTIBLE 0x00000002 macro 135 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 139 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 142 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 146 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 1681 state = TASK_UNINTERRUPTIBLE; in __task_state_index()
|
| /linux/lib/ |
| H A D | closure.c | 143 set_current_state(TASK_UNINTERRUPTIBLE); in __closure_sync() 174 set_current_state(TASK_UNINTERRUPTIBLE); in closure_return_sync() 197 set_current_state(TASK_UNINTERRUPTIBLE); in __closure_sync_timeout()
|
| H A D | sys_info.c | 160 show_state_filter(TASK_UNINTERRUPTIBLE); in __sys_info()
|
| /linux/kernel/locking/ |
| H A D | rwsem.c | 1069 if (state == TASK_UNINTERRUPTIBLE) in rwsem_down_read_slowpath() 1091 if (state == TASK_UNINTERRUPTIBLE) in rwsem_down_read_slowpath() 1155 if (state == TASK_UNINTERRUPTIBLE) in rwsem_down_write_slowpath() 1192 if (state == TASK_UNINTERRUPTIBLE) in rwsem_down_write_slowpath() 1274 __down_read_common(sem, TASK_UNINTERRUPTIBLE); in __down_read() 1326 __down_write_common(sem, TASK_UNINTERRUPTIBLE); in __down_write() 1466 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_read() 1491 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_write()
|
| H A D | semaphore.c | 303 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); in __down() 318 return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout); in __down_timeout()
|
| H A D | ww_rt_mutex.c | 81 return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_); in ww_mutex_lock()
|
| /linux/rust/kernel/sync/ |
| H A D | condvar.rs | 13 MAX_SCHEDULE_TIMEOUT, TASK_FREEZABLE, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE, 146 self.wait_internal(TASK_UNINTERRUPTIBLE, guard, MAX_SCHEDULE_TIMEOUT); in wait()
|
| /linux/fs/jfs/ |
| H A D | jfs_lock.h | 28 set_current_state(TASK_UNINTERRUPTIBLE);\
|
| /linux/net/atm/ |
| H A D | svc.c | 61 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); in svc_disconnect() 137 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); in svc_bind() 308 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); in svc_listen() 401 TASK_UNINTERRUPTIBLE); in svc_accept() 445 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); in svc_change_qos()
|
| /linux/fs/netfs/ |
| H A D | misc.c | 364 prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); in netfs_wait_for_in_progress_stream() 440 prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); in netfs_wait_for_in_progress() 509 prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); in netfs_wait_for_pause()
|
| /linux/drivers/mtd/chips/ |
| H A D | cfi_cmdset_0020.c | 353 set_current_state(TASK_UNINTERRUPTIBLE); in do_read_onechip() 486 set_current_state(TASK_UNINTERRUPTIBLE); in do_write_buffer() 543 set_current_state(TASK_UNINTERRUPTIBLE); in do_write_buffer() 779 set_current_state(TASK_UNINTERRUPTIBLE); in do_erase_oneblock() 808 set_current_state(TASK_UNINTERRUPTIBLE); in do_erase_oneblock() 1006 set_current_state(TASK_UNINTERRUPTIBLE); in cfi_staa_sync() 1076 set_current_state(TASK_UNINTERRUPTIBLE); in do_lock_oneblock() 1222 set_current_state(TASK_UNINTERRUPTIBLE); in do_unlock_oneblock()
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| H A D | igt_reset.c | 29 TASK_UNINTERRUPTIBLE); in igt_global_reset_lock()
|
| /linux/block/ |
| H A D | blk-rq-qos.c | 276 TASK_UNINTERRUPTIBLE); in rq_qos_wait() 308 set_current_state(TASK_UNINTERRUPTIBLE); in rq_qos_wait()
|
| /linux/Documentation/trace/rv/ |
| H A D | monitor_wwnr.rst | 33 1: set_current_state(TASK_UNINTERRUPTIBLE);
|
| /linux/tools/perf/util/bpf_skel/ |
| H A D | off_cpu.bpf.c | 13 #define TASK_UNINTERRUPTIBLE 0x0002 macro 177 state != TASK_UNINTERRUPTIBLE) in can_record()
|
| /linux/tools/testing/selftests/net/bench/page_pool/ |
| H A D | time_bench.c | 372 set_current_state(TASK_UNINTERRUPTIBLE); in time_bench_run_concurrent() 380 set_current_state(TASK_UNINTERRUPTIBLE); in time_bench_run_concurrent()
|
| /linux/fs/ |
| H A D | fs_pin.c | 54 set_current_state(TASK_UNINTERRUPTIBLE); in pin_kill()
|
| /linux/Documentation/translations/zh_CN/locking/ |
| H A D | mutex-design.rst | 67 休眠直到被解锁路径唤醒。在通常情况下,它以TASK_UNINTERRUPTIBLE状态
|
| /linux/drivers/media/i2c/ |
| H A D | saa7110.c | 186 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); in determine_norm() 221 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); in determine_norm()
|
| /linux/kernel/time/ |
| H A D | sleep_timeout.c | 157 __set_current_state(TASK_UNINTERRUPTIBLE); in schedule_timeout_uninterruptible()
|
| /linux/kernel/ |
| H A D | kthread.c | 443 __set_current_state(TASK_UNINTERRUPTIBLE); in kthread() 616 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); in kthread_bind_mask() 632 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); in kthread_bind() 860 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE) || kthread->started) { in kthread_affine_preferred()
|