Home
last modified time | relevance | path

Searched full:queue (Results 1 – 25 of 3441) sorted by relevance

12345678910>>...138

/linux/drivers/net/wireless/st/cw1200/
H A Dqueue.c3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
12 #include "queue.h"
27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument
29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock()
30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock()
31 pr_debug("[TX] Queue %d is locked.\n", in __cw1200_queue_lock()
32 queue->queue_id); in __cw1200_queue_lock()
33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock()
37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument
39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock()
[all …]
/linux/drivers/nvme/target/
H A Dtcp.c65 * queue before determining it to be idle. This optional module behavior
109 struct nvmet_tcp_queue *queue; member
218 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument
221 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag()
226 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
254 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument
258 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
274 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
277 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
280 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument
285 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue * queue) nvmet_tcp_hdgst_len() argument
290 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue * queue) nvmet_tcp_ddgst_len() argument
300 nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue * queue,void * pdu,size_t len) nvmet_tcp_verify_hdgst() argument
326 nvmet_tcp_check_ddgst(struct nvmet_tcp_queue * queue,void * pdu) nvmet_tcp_check_ddgst() argument
382 nvmet_tcp_fatal_error(struct nvmet_tcp_queue * queue) nvmet_tcp_fatal_error() argument
391 nvmet_tcp_socket_error(struct nvmet_tcp_queue * queue,int status) nvmet_tcp_socket_error() argument
460 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_setup_c2h_data_pdu() local
533 nvmet_tcp_process_resp_list(struct nvmet_tcp_queue * queue) nvmet_tcp_process_resp_list() argument
545 nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue * queue) nvmet_tcp_fetch_cmd() argument
575 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_queue_response() local
641 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_try_send_data() local
753 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_try_send_ddgst() local
786 nvmet_tcp_try_send_one(struct nvmet_tcp_queue * queue,bool last_in_batch) nvmet_tcp_try_send_one() argument
835 nvmet_tcp_try_send(struct nvmet_tcp_queue * queue,int budget,int * sends) nvmet_tcp_try_send() argument
854 nvmet_prepare_receive_pdu(struct nvmet_tcp_queue * queue) nvmet_prepare_receive_pdu() argument
863 nvmet_tcp_handle_icreq(struct nvmet_tcp_queue * queue) nvmet_tcp_handle_icreq() argument
918 nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd,struct nvmet_req * req) nvmet_tcp_handle_req_failure() argument
949 nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue * queue) nvmet_tcp_handle_h2c_data_pdu() argument
998 nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue * queue) nvmet_tcp_done_recv_pdu() argument
1109 nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue * queue,struct msghdr * msg,char * cbuf) nvmet_tcp_tls_record_ok() argument
1144 nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue * queue) nvmet_tcp_try_recv_pdu() argument
1209 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_prep_recv_ddgst() local
1217 nvmet_tcp_try_recv_data(struct nvmet_tcp_queue * queue) nvmet_tcp_try_recv_data() argument
1250 nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue * queue) nvmet_tcp_try_recv_ddgst() argument
1301 nvmet_tcp_try_recv_one(struct nvmet_tcp_queue * queue) nvmet_tcp_try_recv_one() argument
1335 nvmet_tcp_try_recv(struct nvmet_tcp_queue * queue,int budget,int * recvs) nvmet_tcp_try_recv() argument
1356 struct nvmet_tcp_queue *queue = nvmet_tcp_release_queue() local
1363 nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue * queue) nvmet_tcp_schedule_release_queue() argument
1377 nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue * queue) nvmet_tcp_arm_queue_deadline() argument
1382 nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue * queue,int ops) nvmet_tcp_check_queue_deadline() argument
1396 struct nvmet_tcp_queue *queue = nvmet_tcp_io_work() local
1426 nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * c) nvmet_tcp_alloc_cmd() argument
1482 nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue * queue) nvmet_tcp_alloc_cmds() argument
1508 nvmet_tcp_free_cmds(struct nvmet_tcp_queue * queue) nvmet_tcp_free_cmds() argument
1520 nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue * queue) nvmet_tcp_restore_socket_callbacks() argument
1535 nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue * queue) nvmet_tcp_uninit_data_in_cmds() argument
1551 nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue * queue) nvmet_tcp_free_cmd_data_in_buffers() argument
1563 struct nvmet_tcp_queue *queue = nvmet_tcp_release_queue_work() local
1592 struct nvmet_tcp_queue *queue; nvmet_tcp_data_ready() local
1610 struct nvmet_tcp_queue *queue; nvmet_tcp_write_space() local
1632 struct nvmet_tcp_queue *queue; nvmet_tcp_state_change() local
1657 nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue * queue) nvmet_tcp_set_queue_sock() argument
1713 nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue * queue) nvmet_tcp_try_peek_pdu() argument
1761 nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue * queue,key_serial_t peerid) nvmet_tcp_tls_key_lookup() argument
1785 struct nvmet_tcp_queue *queue = data; nvmet_tcp_tls_handshake_done() local
1815 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w), nvmet_tcp_tls_handshake_timeout() local
1835 nvmet_tcp_tls_handshake(struct nvmet_tcp_queue * queue) nvmet_tcp_tls_handshake() argument
1871 struct nvmet_tcp_queue *queue; nvmet_tcp_alloc_queue() local
2086 struct nvmet_tcp_queue *queue; nvmet_tcp_destroy_port_queues() local
2116 struct nvmet_tcp_queue *queue; nvmet_tcp_delete_ctrl() local
2127 struct nvmet_tcp_queue *queue = nvmet_tcp_install_queue() local
2162 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_disc_port_addr() local
2174 struct nvmet_tcp_queue *queue = nvmet_tcp_host_port_addr() local
2217 struct nvmet_tcp_queue *queue; nvmet_tcp_exit() local
[all...]
H A Drdma.c52 struct nvmet_rdma_queue *queue; member
66 struct nvmet_rdma_queue *queue; member
147 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
157 MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument
218 tag = sbitmap_get(&queue->rsp_tags); in nvmet_rdma_get_rsp()
220 rsp = &queue->rsps[tag]; in nvmet_rdma_get_rsp()
228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, in nvmet_rdma_get_rsp()
243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
[all …]
/linux/drivers/net/wireless/broadcom/b43legacy/
H A Dpio.c22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument
24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start()
28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument
31 if (queue->need_workarounds) { in tx_octet()
32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument
71 if (queue->need_workarounds) { in tx_data()
[all …]
/linux/drivers/md/dm-vdo/
H A Dfunnel-workqueue.c15 #include "funnel-queue.h"
28 * DOC: Work queue definition.
36 /* Name of just the work queue (e.g., "cpuQ12") */
73 static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue) in as_simple_work_queue() argument
75 return ((queue == NULL) ? in as_simple_work_queue()
76 NULL : container_of(queue, struct simple_work_queue, common)); in as_simple_work_queue()
79 static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue) in as_round_robin_work_queue() argument
81 return ((queue == NULL) ? in as_round_robin_work_queue()
83 container_of(queue, struct round_robin_work_queue, common)); in as_round_robin_work_queue()
96 static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue) in poll_for_completion() argument
[all …]
H A Dfunnel-queue.c6 #include "funnel-queue.h"
15 struct funnel_queue *queue; in vdo_make_funnel_queue() local
17 result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue); in vdo_make_funnel_queue()
22 * Initialize the stub entry and put it in the queue, establishing the invariant that in vdo_make_funnel_queue()
23 * queue->newest and queue->oldest are never null. in vdo_make_funnel_queue()
25 queue->stub.next = NULL; in vdo_make_funnel_queue()
26 queue->newest = &queue->stub; in vdo_make_funnel_queue()
27 queue->oldest = &queue->stub; in vdo_make_funnel_queue()
29 *queue_ptr = queue; in vdo_make_funnel_queue()
33 void vdo_free_funnel_queue(struct funnel_queue *queue) in vdo_free_funnel_queue() argument
[all …]
/linux/drivers/scsi/arm/
H A Dqueue.c3 * linux/drivers/acorn/scsi/queue.c: queue handling primitives
50 #include "queue.h"
55 * Function: void queue_initialise (Queue_t *queue)
56 * Purpose : initialise a queue
57 * Params : queue - queue to initialise
59 int queue_initialise (Queue_t *queue) in queue_initialise() argument
64 spin_lock_init(&queue->queue_lock); in queue_initialise()
65 INIT_LIST_HEAD(&queue->head); in queue_initialise()
66 INIT_LIST_HEAD(&queue->free); in queue_initialise()
74 queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); in queue_initialise()
[all …]
H A Dqueue.h3 * linux/drivers/acorn/scsi/queue.h: queue handling
18 * Function: void queue_initialise (Queue_t *queue)
19 * Purpose : initialise a queue
20 * Params : queue - queue to initialise
22 extern int queue_initialise (Queue_t *queue);
25 * Function: void queue_free (Queue_t *queue)
26 * Purpose : free a queue
27 * Params : queue - queue to free
29 extern void queue_free (Queue_t *queue);
32 * Function: struct scsi_cmnd *queue_remove (queue)
[all …]
/linux/drivers/nvme/host/
H A Dtcp.c106 struct nvme_tcp_queue *queue; member
207 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
214 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument
216 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
233 * Check if the queue is TLS encrypted
235 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) in nvme_tcp_queue_tls() argument
240 return queue->tls_enabled; in nvme_tcp_queue_tls()
254 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument
256 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset()
259 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
[all …]
H A Drdma.c73 struct nvme_rdma_queue *queue; member
159 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) in nvme_rdma_queue_idx() argument
161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
164 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) in nvme_rdma_poll_queue() argument
166 return nvme_rdma_queue_idx(queue) > in nvme_rdma_poll_queue()
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
171 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) in nvme_rdma_inline_data_size() argument
173 return queue in nvme_rdma_inline_data_size()
246 nvme_rdma_wait_for_cm(struct nvme_rdma_queue * queue) nvme_rdma_wait_for_cm() argument
257 nvme_rdma_create_qp(struct nvme_rdma_queue * queue,const int factor) nvme_rdma_create_qp() argument
300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; nvme_rdma_init_request() local
323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; nvme_rdma_init_hctx() local
335 struct nvme_rdma_queue *queue = &ctrl->queues[0]; nvme_rdma_init_admin_hctx() local
413 nvme_rdma_free_cq(struct nvme_rdma_queue * queue) nvme_rdma_free_cq() argument
421 nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue * queue) nvme_rdma_destroy_queue_ib() argument
463 nvme_rdma_create_cq(struct ib_device * ibdev,struct nvme_rdma_queue * queue) nvme_rdma_create_cq() argument
489 nvme_rdma_create_queue_ib(struct nvme_rdma_queue * queue) nvme_rdma_create_queue_ib() argument
572 struct nvme_rdma_queue *queue; nvme_rdma_alloc_queue() local
633 __nvme_rdma_stop_queue(struct nvme_rdma_queue * queue) __nvme_rdma_stop_queue() argument
639 nvme_rdma_stop_queue(struct nvme_rdma_queue * queue) nvme_rdma_stop_queue() argument
650 nvme_rdma_free_queue(struct nvme_rdma_queue * queue) nvme_rdma_free_queue() argument
678 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; nvme_rdma_start_queue() local
1170 struct nvme_rdma_queue *queue = wc->qp->qp_context; nvme_rdma_wr_error() local
1198 nvme_rdma_inv_rkey(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req) nvme_rdma_inv_rkey() argument
1231 nvme_rdma_unmap_data(struct nvme_rdma_queue * queue,struct request * rq) nvme_rdma_unmap_data() argument
1264 nvme_rdma_map_sg_inline(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c,int count) nvme_rdma_map_sg_inline() argument
1290 nvme_rdma_map_sg_single(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c) nvme_rdma_map_sg_single() argument
1302 nvme_rdma_map_sg_fr(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c,int count) nvme_rdma_map_sg_fr() argument
1406 nvme_rdma_map_sg_pi(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c,int count,int pi_count) nvme_rdma_map_sg_pi() argument
1525 nvme_rdma_map_data(struct nvme_rdma_queue * queue,struct request * rq,struct nvme_command * c) nvme_rdma_map_data() argument
1591 nvme_rdma_post_send(struct nvme_rdma_queue * queue,struct nvme_rdma_qe * qe,struct ib_sge * sge,u32 num_sge,struct ib_send_wr * first) nvme_rdma_post_send() argument
1622 nvme_rdma_post_recv(struct nvme_rdma_queue * queue,struct nvme_rdma_qe * qe) nvme_rdma_post_recv() argument
1648 nvme_rdma_tagset(struct nvme_rdma_queue * queue) nvme_rdma_tagset() argument
1666 struct nvme_rdma_queue *queue = &ctrl->queues[0]; nvme_rdma_submit_async_event() local
1690 nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue * queue,struct nvme_completion * cqe,struct ib_wc * wc) nvme_rdma_process_nvme_rsp() argument
1738 struct nvme_rdma_queue *queue = wc->qp->qp_context; nvme_rdma_recv_done() local
1774 nvme_rdma_conn_established(struct nvme_rdma_queue * queue) nvme_rdma_conn_established() argument
1787 nvme_rdma_conn_rejected(struct nvme_rdma_queue * queue,struct rdma_cm_event * ev) nvme_rdma_conn_rejected() argument
1813 nvme_rdma_addr_resolved(struct nvme_rdma_queue * queue) nvme_rdma_addr_resolved() argument
1838 nvme_rdma_route_resolved(struct nvme_rdma_queue * queue) nvme_rdma_route_resolved() argument
1889 struct nvme_rdma_queue *queue = cm_id->context; nvme_rdma_cm_handler() local
1947 struct nvme_rdma_queue *queue = req->queue; nvme_rdma_complete_timed_out() local
1956 struct nvme_rdma_queue *queue = req->queue; nvme_rdma_timeout() local
1996 struct nvme_rdma_queue *queue = hctx->driver_data; nvme_rdma_queue_rq() local
2075 struct nvme_rdma_queue *queue = hctx->driver_data; nvme_rdma_poll() local
2114 struct nvme_rdma_queue *queue = req->queue; nvme_rdma_complete_rq() local
[all...]
/linux/drivers/net/xen-netback/
H A Drx.c42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument
55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots()
58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument
63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available()
68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available()
69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available()
74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available()
80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available()
85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument
90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
[all …]
H A Dinterface.c44 /* Number of bytes allowed on the internal guest Rx queue. */
52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument
56 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare()
59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument
61 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete()
67 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete()
77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument
81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt()
83 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt()
89 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local
[all …]
H A Dnetback.c59 /* The time that packets can stay on the guest Rx internal queue
107 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
110 static void make_tx_response(struct xenvif_queue *queue,
115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
117 static inline int tx_work_todo(struct xenvif_queue *queue);
119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument
122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument
128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr()
161 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument
[all …]
/linux/drivers/iio/buffer/
H A Dindustrialio-buffer-dma.c37 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
38 * incoming queue are waiting for the DMA controller to pick them up and fill
39 * them with data. Block on the outgoing queue have been filled with data and
55 * incoming or outgoing queue the block will be freed.
101 struct iio_dma_buffer_queue *queue = block->queue; in iio_buffer_block_release() local
106 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
109 atomic_dec(&queue->num_dmabufs); in iio_buffer_block_release()
112 iio_buffer_put(&queue->buffer); in iio_buffer_block_release()
175 struct iio_dma_buffer_queue *queue, size_t size, bool fileio) in iio_dma_buffer_alloc_block() argument
184 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
[all …]
/linux/drivers/gpu/drm/imagination/
H A Dpvr_queue.c86 WARN(1, "Invalid queue type"); in get_ctx_state_size()
116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release_work()
123 struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; in pvr_queue_fence_release()
133 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name()
147 WARN(1, "Invalid queue type"); in pvr_queue_job_fence_get_timeline_name()
156 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name()
170 WARN(1, "Invalid queue type"); in pvr_queue_cccb_fence_get_timeline_name()
262 * @queue: The queue this fence belongs to.
267 * pvr_queue_fence::queue field too.
271 struct pvr_queue *queue, in pvr_queue_fence_init() argument
[all …]
/linux/fs/fuse/
H A Ddev_uring.c51 static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) in fuse_uring_flush_bg() argument
53 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg()
56 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg()
60 * Allow one bg request per queue, ignoring global fc limits. in fuse_uring_flush_bg()
61 * This prevents a single queue from consuming all resources and in fuse_uring_flush_bg()
62 * eliminates the need for remote queue wake-ups when global in fuse_uring_flush_bg()
63 * limits are met but this queue has no more waiting requests. in fuse_uring_flush_bg()
66 !queue->active_background) && in fuse_uring_flush_bg()
67 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg()
70 req = list_first_entry(&queue->fuse_req_bg_queue, in fuse_uring_flush_bg()
[all …]
/linux/drivers/net/
H A Dxen-netfront.c91 /* Queue name is interface name with "-qNNN" appended */
94 /* IRQ name is queue name with "-tx" or "-rx" appended */
108 unsigned int id; /* Queue ID, 0-based */
166 /* Multi-queue support */
218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument
222 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb()
223 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb()
227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument
231 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref()
232 queue in xennet_get_rx_ref()
248 struct netfront_queue *queue = timer_container_of(queue, t, rx_refill_timeout() local
253 netfront_tx_slot_available(struct netfront_queue * queue) netfront_tx_slot_available() argument
259 xennet_maybe_wake_tx(struct netfront_queue * queue) xennet_maybe_wake_tx() argument
271 xennet_alloc_one_rx_buffer(struct netfront_queue * queue) xennet_alloc_one_rx_buffer() argument
299 xennet_alloc_rx_buffers(struct netfront_queue * queue) xennet_alloc_rx_buffers() argument
366 struct netfront_queue *queue = NULL; xennet_open() local
390 xennet_tx_buf_gc(struct netfront_queue * queue) xennet_tx_buf_gc() argument
466 struct netfront_queue *queue; global() member
483 struct netfront_queue *queue = info->queue; xennet_tx_setup_grant() local
604 xennet_mark_tx_pending(struct netfront_queue * queue) xennet_mark_tx_pending() argument
614 xennet_xdp_xmit_one(struct net_device * dev,struct netfront_queue * queue,struct xdp_frame * xdpf) xennet_xdp_xmit_one() argument
649 struct netfront_queue *queue = NULL; xennet_xdp_xmit() local
718 struct netfront_queue *queue = NULL; xennet_start_xmit() local
868 struct netfront_queue *queue; xennet_close() local
885 struct netfront_queue *queue = &info->queues[i]; xennet_destroy_queues() local
902 xennet_set_rx_rsp_cons(struct netfront_queue * queue,RING_IDX val) xennet_set_rx_rsp_cons() argument
912 xennet_move_rx_slot(struct netfront_queue * queue,struct sk_buff * skb,grant_ref_t ref) xennet_move_rx_slot() argument
925 xennet_get_extras(struct netfront_queue * queue,struct xen_netif_extra_info * extras,RING_IDX rp) xennet_get_extras() argument
967 xennet_run_xdp(struct netfront_queue * queue,struct page * pdata,struct xen_netif_rx_response * rx,struct bpf_prog * prog,struct xdp_buff * xdp,bool * need_xdp_flush) xennet_run_xdp() argument
1021 xennet_get_responses(struct netfront_queue * queue,struct netfront_rx_info * rinfo,RING_IDX rp,struct sk_buff_head * list,bool * need_xdp_flush) xennet_get_responses() argument
1164 xennet_fill_frags(struct netfront_queue * queue,struct sk_buff * skb,struct sk_buff_head * list) xennet_fill_frags() argument
1227 handle_incoming_queue(struct netfront_queue * queue,struct sk_buff_head * rxq) handle_incoming_queue() argument
1265 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); xennet_poll() local
1423 xennet_release_tx_bufs(struct netfront_queue * queue) xennet_release_tx_bufs() argument
1445 xennet_release_rx_bufs(struct netfront_queue * queue) xennet_release_rx_bufs() argument
1514 xennet_handle_tx(struct netfront_queue * queue,unsigned int * eoi) xennet_handle_tx() argument
1539 xennet_handle_rx(struct netfront_queue * queue,unsigned int * eoi) xennet_handle_rx() argument
1823 struct netfront_queue *queue = &info->queues[i]; xennet_disconnect_backend() local
1905 setup_netfront_single(struct netfront_queue * queue) setup_netfront_single() argument
1931 setup_netfront_split(struct netfront_queue * queue) setup_netfront_split() argument
1976 setup_netfront(struct xenbus_device * dev,struct netfront_queue * queue,unsigned int feature_split_evtchn) setup_netfront() argument
2026 xennet_init_queue(struct netfront_queue * queue) xennet_init_queue() argument
2082 write_queue_xenstore_keys(struct netfront_queue * queue,struct xenbus_transaction * xbt,int write_hierarchical) write_queue_xenstore_keys() argument
2166 xennet_create_page_pool(struct netfront_queue * queue) xennet_create_page_pool() argument
2221 struct netfront_queue *queue = &info->queues[i]; xennet_create_queues() local
2266 struct netfront_queue *queue = NULL; talk_to_netback() local
2430 struct netfront_queue *queue = NULL; xennet_connect() local
[all...]
/linux/drivers/md/dm-vdo/indexer/
H A Dfunnel-requestqueue.c12 #include "funnel-queue.h"
18 * This queue will attempt to handle requests in reasonably sized batches instead of reacting
22 * If the wait time becomes long enough, the queue will become dormant and must be explicitly
24 * queue via xchg (which is a memory barrier), and later checks "dormant" to decide whether to do a
28 * decide if the funnel queue is idle. In dormant mode, the last examination of "newest" before
31 * queue's "next" field update isn't visible yet to make the entry accessible, its existence will
35 * the queue to awaken immediately.
50 /* Wait queue for synchronizing producers and consumer */
54 /* Queue of new incoming requests */
56 /* Queue of old requests to retry */
[all …]
/linux/drivers/net/wireless/ralink/rt2x00/
H A Drt2x00queue.c12 Abstract: rt2x00 queue specific routines.
25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local
26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb()
37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb()
95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb()
112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb()
488 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data()
493 * a queue corruption! in rt2x00queue_write_tx_data()
498 "Corrupt queue %d, accessing entry which is not ours\n" in rt2x00queue_write_tx_data()
500 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data()
[all …]
H A Drt2x00queue.h10 Abstract: rt2x00 queue datastructures and routines
33 * enum data_queue_qid: Queue identification
35 * @QID_AC_VO: AC VO queue
36 * @QID_AC_VI: AC VI queue
37 * @QID_AC_BE: AC BE queue
38 * @QID_AC_BK: AC BK queue
39 * @QID_HCCA: HCCA queue
40 * @QID_MGMT: MGMT queue (prio queue)
41 * @QID_RX: RX queue
43 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
[all …]
/linux/drivers/net/wireguard/
H A Dqueueing.c25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument
30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init()
31 queue->last_cpu = -1; in wg_packet_queue_init()
32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init()
35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init()
36 if (!queue->worker) { in wg_packet_queue_init()
37 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init()
43 void wg_packet_queue_free(struct crypt_queue *queue, bool purge) in wg_packet_queue_free() argument
45 free_percpu(queue->worker); in wg_packet_queue_free()
46 WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); in wg_packet_queue_free()
[all …]
/linux/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.h40 * WQE - Work Queue Entry
41 * SWQE - Send Work Queue Entry
42 * RWQE - Receive Work Queue Entry
43 * CQE - Completion Queue Entry
44 * EQE - Event Queue Entry
196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument
200 if (q_offset >= queue->queue_length) in hw_qeit_calc()
201 q_offset -= queue->queue_length; in hw_qeit_calc()
202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc()
206 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument
[all …]
/linux/net/sunrpc/
H A Dsched.c91 * queue->lock and bh_disabled in order to avoid races within
95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument
101 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer()
102 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer()
106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument
109 queue->timer_list.expires = expires; in rpc_set_queue_timer()
114 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer()
121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument
125 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) in __rpc_add_timer()
126 rpc_set_queue_timer(queue, timeout); in __rpc_add_timer()
[all …]
/linux/drivers/net/wireless/ath/ath5k/
H A Dqcu.c20 Queue Control Unit, DCF Control Unit Functions
31 * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
35 * basically we have 10 queues to play with. Each queue has a matching
36 * QCU that controls when the queue will get triggered and multiple QCUs
39 * and DCUs allowing us to have different DFS settings for each queue.
41 * When a frame goes into a TX queue, QCU decides when it'll trigger a
43 * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
58 * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
60 * @queue: One of enum ath5k_tx_queue_id
63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_userq.c125 * Iterate through all queue types to detect and reset problematic queues in amdgpu_userq_unmap_helper()
126 * Process each queue type in the defined order in amdgpu_userq_unmap_helper()
151 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue, in amdgpu_userq_map_helper()
164 list_add(&va_cursor->list, &queue->userq_va_list); in amdgpu_userq_wait_for_last_fence() argument
169 int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue, in amdgpu_userq_wait_for_last_fence()
173 struct amdgpu_vm *vm = queue->vm; in amdgpu_userq_wait_for_last_fence()
193 amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr); in amdgpu_userq_find()
222 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue) in amdgpu_userq_create_object()
227 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { in amdgpu_userq_create_object()
228 r += amdgpu_userq_buffer_va_mapped(queue in amdgpu_userq_create_object()
82 amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue) amdgpu_userq_preempt_helper() argument
103 amdgpu_userq_restore_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue) amdgpu_userq_restore_helper() argument
124 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue) amdgpu_userq_unmap_helper() argument
144 amdgpu_userq_map_helper(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue) amdgpu_userq_map_helper() argument
179 amdgpu_userq_cleanup(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue,int queue_id) amdgpu_userq_cleanup() argument
362 struct amdgpu_usermode_queue *queue; amdgpu_userq_destroy() local
418 struct amdgpu_usermode_queue *queue = m->private; amdgpu_mqd_info_read() local
462 struct amdgpu_usermode_queue *queue; amdgpu_userq_create() local
695 struct amdgpu_usermode_queue *queue; amdgpu_userq_restore_all() local
850 struct amdgpu_usermode_queue *queue; amdgpu_userq_evict_all() local
869 struct amdgpu_usermode_queue *queue; amdgpu_userq_wait_for_signal() local
940 struct amdgpu_usermode_queue *queue; amdgpu_userq_mgr_fini() local
969 struct amdgpu_usermode_queue *queue; amdgpu_userq_suspend() local
998 struct amdgpu_usermode_queue *queue; amdgpu_userq_resume() local
1027 struct amdgpu_usermode_queue *queue; amdgpu_userq_stop_sched_for_enforce_isolation() local
1062 struct amdgpu_usermode_queue *queue; amdgpu_userq_start_sched_for_enforce_isolation() local
[all...]

12345678910>>...138