Lines Matching +full:lock +full:- +full:state

1 /*-
81 spinlock_t lock; member
96 long linux_wait_woken(wait_queue_t *wq, unsigned state, long timeout);
98 #define wait_woken(wq, state, timeout) \ argument
99 linux_wait_woken((wq), (state), (timeout))
121 MTX_SYSINIT(name, &(name).lock, spin_lock_name("wqhead"), MTX_DEF)
124 mtx_init(&(wqh)->lock, spin_lock_name("wqhead"), \
126 INIT_LIST_HEAD(&(wqh)->task_list); \
153 * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
157 #define __wait_event_common(wqh, cond, timeout, state, lock) ({ \ argument
164 linux_prepare_to_wait(&(wqh), &__wq, state); \
168 __timeout, state, lock); \
174 if (__ret == -EWOULDBLOCK) \
176 else if (__ret != -ERESTARTSYS) { \
177 __ret = __timeout + __start - jiffies; \
219 spin_unlock(&(wqh).lock); \
222 spin_lock(&(wqh).lock); \
229 #define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \ argument
231 TASK_INTERRUPTIBLE, &(lock)); \
237 #define wait_event_lock_irq(wqh, cond, lock) ({ \ argument
239 TASK_UNINTERRUPTIBLE, &(lock)); \
245 list_add(&wq->task_list, &wqh->task_list); in __add_wait_queue()
252 spin_lock(&wqh->lock); in add_wait_queue()
254 spin_unlock(&wqh->lock); in add_wait_queue()
260 list_add_tail(&wq->task_list, &wqh->task_list); in __add_wait_queue_tail()
266 list_add_tail(&wq->entry, &wqh->head); in __add_wait_queue_entry_tail()
272 list_del(&wq->task_list); in __remove_wait_queue()
279 spin_lock(&wqh->lock); in remove_wait_queue()
281 spin_unlock(&wqh->lock); in remove_wait_queue()
291 #define prepare_to_wait(wqh, wq, state) linux_prepare_to_wait(wqh, wq, state) argument
300 #define wait_on_bit(word, bit, state) \ argument
301 linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT)
302 #define wait_on_bit_timeout(word, bit, state, timeout) \ argument
303 linux_wait_on_bit_timeout(word, bit, state, timeout)
311 #define wait_on_atomic_t(a, state) linux_wait_on_atomic_t(a, state) argument
317 #define wake_up_state(task, state) linux_wake_up_state(task, state) argument