| /linux/include/asm-generic/ |
| H A D | qrwlock.h | 40 * queued_read_trylock - try to acquire read lock of a queued rwlock 41 * @lock : Pointer to queued rwlock structure 59 * queued_write_trylock - try to acquire write lock of a queued rwlock 60 * @lock : Pointer to queued rwlock structure 75 * queued_read_lock - acquire read lock of a queued rwlock 76 * @lock: Pointer to queued rwlock structure 91 * queued_write_lock - acquire write lock of a queued rwlock 92 * @lock : Pointer to queued rwlock structure 105 * queued_read_unlock - release read lock of a queued rwlock 106 * @lock : Pointer to queued rwlock structure [all …]
|
| H A D | qspinlock.h | 3 * Queued spinlock 48 * @lock: Pointer to queued spinlock structure 63 * @lock: queued spinlock structure 78 * @lock : Pointer to queued spinlock structure 86 * queued_spin_trylock - try to acquire the queued spinlock 87 * @lock : Pointer to queued spinlock structure 104 * queued_spin_lock - acquire a queued spinlock 105 * @lock: Pointer to queued spinlock structure 120 * queued_spin_unlock - release a queued spinlock 121 * @lock : Pointer to queued spinlock structure [all …]
|
| /linux/security/integrity/ima/ |
| H A D | ima_queue_keys.c | 18 * right away or should be queued for processing later. 29 * If custom IMA policy is not loaded then keys queued up 40 * queued up in case custom IMA policy was not loaded. 49 * This function sets up a worker to free queued keys in case 107 bool queued = false; in ima_queue_key() local 117 queued = true; in ima_queue_key() 121 if (!queued) in ima_queue_key() 124 return queued; in ima_queue_key() 128 * ima_process_queued_keys() - process keys queued for measurement 130 * This function sets ima_process_keys to true and processes queued keys. [all …]
|
| /linux/drivers/net/wireless/mediatek/mt76/ |
| H A D | debugfs.c | 59 seq_puts(s, " queue | hw-queued | head | tail |\n"); in mt76_queues_read() 67 i, q->queued, q->head, q->tail); in mt76_queues_read() 77 int i, queued; in mt76_rx_queues_read() local 79 seq_puts(s, " queue | hw-queued | head | tail |\n"); in mt76_rx_queues_read() 83 queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; in mt76_rx_queues_read() 85 i, queued, q->head, q->tail); in mt76_rx_queues_read()
|
| /linux/net/x25/ |
| H A D | x25_in.c | 210 int queued = 0; in x25_state3_machine() local 277 queued = 1; in x25_state3_machine() 315 queued = !sock_queue_rcv_skb(sk, skb); in x25_state3_machine() 319 queued = 1; in x25_state3_machine() 330 return queued; in x25_state3_machine() 418 int queued = 0, frametype, ns, nr, q, d, m; in x25_process_rx_frame() local 427 queued = x25_state1_machine(sk, skb, frametype); in x25_process_rx_frame() 430 queued = x25_state2_machine(sk, skb, frametype); in x25_process_rx_frame() 433 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in x25_process_rx_frame() 436 queued = x25_state4_machine(sk, skb, frametype); in x25_process_rx_frame() [all …]
|
| /linux/Documentation/userspace-api/media/mediactl/ |
| H A D | media-request-ioc-queue.rst | 34 If the request was successfully queued, then the file descriptor can be 37 If the request was already queued before, then ``EBUSY`` is returned. 42 Once a request is queued, then the driver is required to gracefully handle 49 queued directly and you next try to queue a request, or vice versa. 62 The request was already queued or the application queued the first
|
| H A D | request-api.rst | 24 buffer queues since in practice only one buffer would be queued at a time. 59 instead of being immediately applied, and buffers queued to a request do not 60 enter the regular buffer queue until the request itself is queued. 66 queued by calling :ref:`MEDIA_REQUEST_IOC_QUEUE` on the request file descriptor. 68 A queued request cannot be modified anymore. 86 a buffer was queued via a request or vice versa will result in an ``EBUSY`` 109 request that has been queued but not yet completed will return ``EBUSY`` 121 longer in use by the kernel. That is, if the request is queued and then the 165 Once the request is fully prepared, it can be queued to the driver: 245 Once the request is fully prepared, it can be queued to the driver:
|
| /linux/net/rose/ |
| H A D | rose_in.c | 106 int queued = 0; in rose_state3_machine() local 169 queued = 1; in rose_state3_machine() 206 return queued; in rose_state3_machine() 267 int queued = 0, frametype, ns, nr, q, d, m; in rose_process_rx_frame() local 276 queued = rose_state1_machine(sk, skb, frametype); in rose_process_rx_frame() 279 queued = rose_state2_machine(sk, skb, frametype); in rose_process_rx_frame() 282 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in rose_process_rx_frame() 285 queued = rose_state4_machine(sk, skb, frametype); in rose_process_rx_frame() 288 queued = rose_state5_machine(sk, skb, frametype); in rose_process_rx_frame() 294 return queued; in rose_process_rx_frame()
|
| /linux/net/ax25/ |
| H A D | ax25_std_in.c | 143 int queued = 0; in ax25_std_state3_machine() local 225 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state3_machine() 258 return queued; in ax25_std_state3_machine() 268 int queued = 0; in ax25_std_state4_machine() local 380 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state4_machine() 413 return queued; in ax25_std_state4_machine() 421 int queued = 0, frametype, ns, nr, pf; in ax25_std_frame_in() local 427 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 430 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 433 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_std_frame_in() [all …]
|
| H A D | ax25_ds_in.c | 147 int queued = 0; in ax25_ds_state3_machine() local 240 queued = ax25_rx_iframe(ax25, skb); in ax25_ds_state3_machine() 273 return queued; in ax25_ds_state3_machine() 281 int queued = 0, frametype, ns, nr, pf; in ax25_ds_frame_in() local 287 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 290 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 293 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_ds_frame_in() 297 return queued; in ax25_ds_frame_in()
|
| H A D | ax25_in.c | 103 int queued = 0; in ax25_rx_iframe() local 145 queued = 1; in ax25_rx_iframe() 151 return queued; in ax25_rx_iframe() 159 int queued = 0; in ax25_process_rx_frame() local 167 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 173 queued = ax25_ds_frame_in(ax25, skb, type); in ax25_process_rx_frame() 175 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 180 return queued; in ax25_process_rx_frame() 305 * Process the frame. If it is queued up internally it in ax25_rcv()
|
| /linux/tools/testing/selftests/net/ |
| H A D | fq_band_pktlimit.sh | 7 # 2. send 20 pkts on band A: verify that 10 are queued, 10 dropped 8 # 3. send 20 pkts on band A: verify that 0 are queued, 20 dropped 9 # 4. send 20 pkts on band B: verify that 10 are queued, 10 dropped 12 # packets are still queued when later ones are sent. 44 # queued in FQ. Sleep for at least the delay period and see that
|
| /linux/include/drm/ |
| H A D | drm_flip_work.h | 46 * @val: value queued via drm_flip_work_queue() 58 * @queued: queued tasks 60 * @lock: lock to access queued and commited lists 66 struct list_head queued; member
|
| /linux/kernel/locking/ |
| H A D | qspinlock.c | 3 * Queued spinlock 28 * Include queued spinlock definitions and statistics code 42 * This queued spinlock implementation is based on the MCS lock, however to 110 * queued_spin_lock_slowpath - acquire the queued spinlock 111 * @lock: Pointer to queued spinlock structure 112 * @val: Current value of the queued spinlock 32-bit word 358 * Either somebody is queued behind us or _Q_PENDING_VAL got set in queued_spin_lock_slowpath()
|
| H A D | qrwlock.c | 3 * Queued read/write locks 18 * queued_read_lock_slowpath - acquire read lock of a queued rwlock 19 * @lock: Pointer to queued rwlock structure 63 * queued_write_lock_slowpath - acquire write lock of a queued rwlock 64 * @lock : Pointer to queued rwlock structure
|
| /linux/net/dccp/ |
| H A D | input.c | |
| /linux/Documentation/userspace-api/media/v4l/ |
| H A D | vidioc-streamon.rst | 51 If ``VIDIOC_STREAMON`` fails then any already queued buffers will remain 52 queued. 63 If buffers have been queued with :ref:`VIDIOC_QBUF` and 65 ``VIDIOC_STREAMON``, then those queued buffers will also be removed from 77 but ``VIDIOC_STREAMOFF`` will return queued buffers to their starting
|
| H A D | dev-encoder.rst | 309 fast raw frames are queued on the ``OUTPUT`` queue. 468 * a buffer queued to ``OUTPUT`` may result in more than one buffer produced on 473 * a buffer queued to ``OUTPUT`` may result in a buffer being produced on 479 buffers queued to ``OUTPUT`` (e.g. during drain or ``EOS``), because of the 480 ``OUTPUT`` buffers queued in the past whose encoding results are only 483 * buffers queued to ``OUTPUT`` may not become available to dequeue instantly 549 sequence to avoid losing the already queued/encoded frames. 559 To ensure that all the queued ``OUTPUT`` buffers have been processed and the 562 received all encoded frames for all ``OUTPUT`` buffers queued before the 586 2. Any ``OUTPUT`` buffers queued by the client before the [all …]
|
| H A D | dev-decoder.rst | 84 ``OUTPUT`` buffers must be queued by the client in decode order; for 92 buffers must be queued by the client in display order; for decoders, 722 * a buffer queued to ``OUTPUT`` may result in no buffers being produced 726 * a buffer queued to ``OUTPUT`` may result in more than one buffer produced 731 * a buffer queued to ``OUTPUT`` may result in a buffer being produced on 737 buffers queued to ``OUTPUT`` (e.g. during drain or ``EOS``), because of the 738 ``OUTPUT`` buffers queued in the past whose decoding results are only 756 the ``OUTPUT`` buffer queued first will be copied. 771 client should make sure that each ``CAPTURE`` buffer is always queued with 846 from a resume point (e.g. SPS or a keyframe). Any queued ``OUTPUT`` [all …]
|
| /linux/net/netrom/ |
| H A D | nr_in.c | 153 int queued = 0; in nr_state3_machine() local 225 queued = 1; in nr_state3_machine() 272 return queued; in nr_state3_machine() 279 int queued = 0, frametype; in nr_process_rx_frame() local 288 queued = nr_state1_machine(sk, skb, frametype); in nr_process_rx_frame() 291 queued = nr_state2_machine(sk, skb, frametype); in nr_process_rx_frame() 294 queued = nr_state3_machine(sk, skb, frametype); in nr_process_rx_frame() 300 return queued; in nr_process_rx_frame()
|
| /linux/tools/testing/selftests/net/mptcp/ |
| H A D | mptcp_inq.c | 217 int nsd, ret, queued = -1; in wait_for_ack() local 220 ret = ioctl(fd, TIOCOUTQ, &queued); in wait_for_ack() 228 if ((size_t)queued > total) in wait_for_ack() 229 xerror("TIOCOUTQ %u, but only %zu expected\n", queued, total); in wait_for_ack() 230 assert(nsd <= queued); in wait_for_ack() 232 if (queued == 0) in wait_for_ack() 241 xerror("still tx data queued after %u ms\n", timeout); in wait_for_ack() 365 unsigned int queued; in process_one_client() local 367 ret = ioctl(fd, FIONREAD, &queued); in process_one_client() 370 if (queued > expect_le in process_one_client() [all...] |
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | bpf_arena_spin_lock.h | 140 * @lock : Pointer to queued spinlock structure 174 * @lock: Pointer to queued spinlock structure 185 * @lock: Pointer to queued spinlock structure 198 * @lock: Pointer to queued spinlock structure 229 * arena_spin_trylock - try to acquire the queued spinlock 230 * @lock : Pointer to queued spinlock structure 434 * Either somebody is queued behind us or _Q_PENDING_VAL got set in arena_spin_lock_slowpath() 470 * arena_spin_lock - acquire a queued spinlock 471 * @lock: Pointer to queued spinlock structure 512 * arena_spin_unlock - release a queued spinlock [all …]
|
| /linux/virt/kvm/ |
| H A D | async_pf.c | 111 * need to be flushed (but sanity check that the work wasn't queued). in kvm_flush_and_free_async_pf_work() 150 vcpu->async_pf.queued = 0; in kvm_clear_async_pf_completion_queue() 170 vcpu->async_pf.queued--; in kvm_check_async_pf_completion() 184 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) in kvm_setup_async_pf() 208 vcpu->async_pf.queued++; in kvm_setup_async_pf() 239 vcpu->async_pf.queued++; in kvm_async_pf_wakeup_all()
|
| /linux/include/trace/events/ |
| H A D | workqueue.h | 14 * workqueue_queue_work - called when a work gets queued 19 * This event occurs when a work is queued immediately or once a 20 * delayed work is actually queued on a workqueue (ie: once the delay 55 * This event occurs when a queued work is put on the active queue,
|
| /linux/Documentation/usb/ |
| H A D | ohci.rst | 22 - interrupt transfers can be larger, and can be queued 28 types can be queued. That was also true in "usb-ohci", except for interrupt 30 to overhead in IRQ processing. When interrupt transfers are queued, those
|