Home
last modified time | relevance | path

Searched full:queue (Results 1 – 25 of 3548) sorted by relevance

12345678910>>...142

/linux/drivers/net/wireless/st/cw1200/
H A Dqueue.c3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
12 #include "queue.h"
27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument
29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock()
30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock()
31 pr_debug("[TX] Queue %d is locked.\n", in __cw1200_queue_lock()
32 queue->queue_id); in __cw1200_queue_lock()
33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock()
37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument
39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock()
[all …]
/linux/drivers/usb/gadget/function/
H A Duvc_queue.c27 * Video buffers queue management.
33 * the videobuf2 queue operations by serializing calls to videobuf2 and a
34 * spinlock to protect the IRQ queue that holds the buffers to be processed by
39 * videobuf2 queue operations
46 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local
47 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup()
63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local
64 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_buffer_prepare()
74 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare()
78 if (queue->use_sg) { in uvc_buffer_prepare()
[all …]
/linux/drivers/net/wireless/broadcom/b43legacy/
H A Dpio.c22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument
24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start()
28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument
31 if (queue->need_workarounds) { in tx_octet()
32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument
71 if (queue->need_workarounds) { in tx_data()
[all …]
/linux/drivers/scsi/arm/
H A Dqueue.c3 * linux/drivers/acorn/scsi/queue.c: queue handling primitives
50 #include "queue.h"
55 * Function: void queue_initialise (Queue_t *queue)
56 * Purpose : initialise a queue
57 * Params : queue - queue to initialise
59 int queue_initialise (Queue_t *queue) in queue_initialise() argument
64 spin_lock_init(&queue->queue_lock); in queue_initialise()
65 INIT_LIST_HEAD(&queue->head); in queue_initialise()
66 INIT_LIST_HEAD(&queue->free); in queue_initialise()
74 queue->alloc = q = kmalloc_objs(QE_t, nqueues); in queue_initialise()
[all …]
H A Dqueue.h3 * linux/drivers/acorn/scsi/queue.h: queue handling
18 * Function: void queue_initialise (Queue_t *queue)
19 * Purpose : initialise a queue
20 * Params : queue - queue to initialise
22 extern int queue_initialise (Queue_t *queue);
25 * Function: void queue_free (Queue_t *queue)
26 * Purpose : free a queue
27 * Params : queue - queue to free
29 extern void queue_free (Queue_t *queue);
32 * Function: struct scsi_cmnd *queue_remove (queue)
[all …]
/linux/drivers/nvme/host/
H A Dtcp.c107 struct nvme_tcp_queue *queue; member
208 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
215 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument
217 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
234 * Check if the queue is TLS encrypted
236 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) in nvme_tcp_queue_tls() argument
241 return queue->tls_enabled; in nvme_tcp_queue_tls()
255 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument
257 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset()
264 nvme_tcp_hdgst_len(struct nvme_tcp_queue * queue) nvme_tcp_hdgst_len() argument
269 nvme_tcp_ddgst_len(struct nvme_tcp_queue * queue) nvme_tcp_ddgst_len() argument
384 nvme_tcp_send_all(struct nvme_tcp_queue * queue) nvme_tcp_send_all() argument
394 nvme_tcp_queue_has_pending(struct nvme_tcp_queue * queue) nvme_tcp_queue_has_pending() argument
400 nvme_tcp_queue_more(struct nvme_tcp_queue * queue) nvme_tcp_queue_more() argument
409 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_queue_request() local
430 nvme_tcp_process_req_list(struct nvme_tcp_queue * queue) nvme_tcp_process_req_list() argument
442 nvme_tcp_fetch_request(struct nvme_tcp_queue * queue) nvme_tcp_fetch_request() argument
495 nvme_tcp_verify_hdgst(struct nvme_tcp_queue * queue,void * pdu,size_t pdu_len) nvme_tcp_verify_hdgst() argument
521 nvme_tcp_check_ddgst(struct nvme_tcp_queue * queue,void * pdu) nvme_tcp_check_ddgst() argument
557 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; nvme_tcp_init_request() local
580 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; nvme_tcp_init_hctx() local
590 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_init_admin_hctx() local
597 nvme_tcp_recv_state(struct nvme_tcp_queue * queue) nvme_tcp_recv_state() argument
604 nvme_tcp_init_recv_ctx(struct nvme_tcp_queue * queue) nvme_tcp_init_recv_ctx() argument
622 nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue * queue,struct nvme_completion * cqe) nvme_tcp_process_nvme_cqe() argument
648 nvme_tcp_handle_c2h_data(struct nvme_tcp_queue * queue,struct nvme_tcp_data_pdu * pdu) nvme_tcp_handle_c2h_data() argument
682 nvme_tcp_handle_comp(struct nvme_tcp_queue * queue,struct nvme_tcp_rsp_pdu * pdu) nvme_tcp_handle_comp() argument
707 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_setup_h2c_data_pdu() local
738 nvme_tcp_handle_r2t(struct nvme_tcp_queue * queue,struct nvme_tcp_r2t_pdu * pdu) nvme_tcp_handle_r2t() argument
797 nvme_tcp_handle_c2h_term(struct nvme_tcp_queue * queue,struct nvme_tcp_term_pdu * pdu) nvme_tcp_handle_c2h_term() argument
831 nvme_tcp_recv_pdu(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len) nvme_tcp_recv_pdu() argument
911 nvme_tcp_recv_data(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len) nvme_tcp_recv_data() argument
982 nvme_tcp_recv_ddgst(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len) nvme_tcp_recv_ddgst() argument
1030 struct nvme_tcp_queue *queue = desc->arg.data; nvme_tcp_recv_skb() local
1065 struct nvme_tcp_queue *queue; nvme_tcp_data_ready() local
1079 struct nvme_tcp_queue *queue; nvme_tcp_write_space() local
1095 struct nvme_tcp_queue *queue; nvme_tcp_state_change() local
1121 nvme_tcp_done_send_req(struct nvme_tcp_queue * queue) nvme_tcp_done_send_req() argument
1141 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_data() local
1204 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_cmd_pdu() local
1245 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_data_pdu() local
1279 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_try_send_ddgst() local
1310 nvme_tcp_try_send(struct nvme_tcp_queue * queue) nvme_tcp_try_send() argument
1360 nvme_tcp_try_recv(struct nvme_tcp_queue * queue) nvme_tcp_try_recv() argument
1378 struct nvme_tcp_queue *queue = nvme_tcp_io_work() local
1423 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_alloc_async_req() local
1440 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_free_queue() local
1459 nvme_tcp_init_connection(struct nvme_tcp_queue * queue) nvme_tcp_init_connection() argument
1590 nvme_tcp_admin_queue(struct nvme_tcp_queue * queue) nvme_tcp_admin_queue() argument
1595 nvme_tcp_default_queue(struct nvme_tcp_queue * queue) nvme_tcp_default_queue() argument
1604 nvme_tcp_read_queue(struct nvme_tcp_queue * queue) nvme_tcp_read_queue() argument
1615 nvme_tcp_poll_queue(struct nvme_tcp_queue * queue) nvme_tcp_poll_queue() argument
1637 nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue * queue) nvme_tcp_set_queue_io_cpu() argument
1682 struct nvme_tcp_queue *queue = data; nvme_tcp_tls_done() local
1713 nvme_tcp_start_tls(struct nvme_ctrl * nctrl,struct nvme_tcp_queue * queue,key_serial_t pskid) nvme_tcp_start_tls() argument
1769 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_alloc_queue() local
1913 nvme_tcp_restore_sock_ops(struct nvme_tcp_queue * queue) nvme_tcp_restore_sock_ops() argument
1925 __nvme_tcp_stop_queue(struct nvme_tcp_queue * queue) __nvme_tcp_stop_queue() argument
1935 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_stop_queue_nowait() local
1954 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; nvme_tcp_wait_queue() local
1976 nvme_tcp_setup_sock_ops(struct nvme_tcp_queue * queue) nvme_tcp_setup_sock_ops() argument
1995 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; nvme_tcp_start_queue() local
2580 nvme_tcp_set_sg_inline(struct nvme_tcp_queue * queue,struct nvme_command * c,u32 data_len) nvme_tcp_set_sg_inline() argument
2604 struct nvme_tcp_queue *queue = &ctrl->queues[0]; nvme_tcp_submit_async_event() local
2679 nvme_tcp_map_data(struct nvme_tcp_queue * queue,struct request * rq) nvme_tcp_map_data() argument
2704 struct nvme_tcp_queue *queue = req->queue; nvme_tcp_setup_cmd_pdu() local
2755 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_commit_rqs() local
2765 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_queue_rq() local
2794 struct nvme_tcp_queue *queue = hctx->driver_data; nvme_tcp_poll() local
2811 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; nvme_tcp_get_address() local
[all...]
H A Drdma.c73 struct nvme_rdma_queue *queue; member
159 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) in nvme_rdma_queue_idx() argument
161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
164 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) in nvme_rdma_poll_queue() argument
166 return nvme_rdma_queue_idx(queue) > in nvme_rdma_poll_queue()
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
171 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) in nvme_rdma_inline_data_size() argument
173 return queue in nvme_rdma_inline_data_size()
246 nvme_rdma_wait_for_cm(struct nvme_rdma_queue * queue) nvme_rdma_wait_for_cm() argument
257 nvme_rdma_create_qp(struct nvme_rdma_queue * queue,const int factor) nvme_rdma_create_qp() argument
300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; nvme_rdma_init_request() local
323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; nvme_rdma_init_hctx() local
335 struct nvme_rdma_queue *queue = &ctrl->queues[0]; nvme_rdma_init_admin_hctx() local
413 nvme_rdma_free_cq(struct nvme_rdma_queue * queue) nvme_rdma_free_cq() argument
421 nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue * queue) nvme_rdma_destroy_queue_ib() argument
463 nvme_rdma_create_cq(struct ib_device * ibdev,struct nvme_rdma_queue * queue) nvme_rdma_create_cq() argument
489 nvme_rdma_create_queue_ib(struct nvme_rdma_queue * queue) nvme_rdma_create_queue_ib() argument
572 struct nvme_rdma_queue *queue; nvme_rdma_alloc_queue() local
633 __nvme_rdma_stop_queue(struct nvme_rdma_queue * queue) __nvme_rdma_stop_queue() argument
639 nvme_rdma_stop_queue(struct nvme_rdma_queue * queue) nvme_rdma_stop_queue() argument
650 nvme_rdma_free_queue(struct nvme_rdma_queue * queue) nvme_rdma_free_queue() argument
678 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; nvme_rdma_start_queue() local
1170 struct nvme_rdma_queue *queue = wc->qp->qp_context; nvme_rdma_wr_error() local
1198 nvme_rdma_inv_rkey(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req) nvme_rdma_inv_rkey() argument
1231 nvme_rdma_unmap_data(struct nvme_rdma_queue * queue,struct request * rq) nvme_rdma_unmap_data() argument
1264 nvme_rdma_map_sg_inline(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c,int count) nvme_rdma_map_sg_inline() argument
1290 nvme_rdma_map_sg_single(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c) nvme_rdma_map_sg_single() argument
1302 nvme_rdma_map_sg_fr(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c,int count) nvme_rdma_map_sg_fr() argument
1406 nvme_rdma_map_sg_pi(struct nvme_rdma_queue * queue,struct nvme_rdma_request * req,struct nvme_command * c,int count,int pi_count) nvme_rdma_map_sg_pi() argument
1525 nvme_rdma_map_data(struct nvme_rdma_queue * queue,struct request * rq,struct nvme_command * c) nvme_rdma_map_data() argument
1591 nvme_rdma_post_send(struct nvme_rdma_queue * queue,struct nvme_rdma_qe * qe,struct ib_sge * sge,u32 num_sge,struct ib_send_wr * first) nvme_rdma_post_send() argument
1622 nvme_rdma_post_recv(struct nvme_rdma_queue * queue,struct nvme_rdma_qe * qe) nvme_rdma_post_recv() argument
1648 nvme_rdma_tagset(struct nvme_rdma_queue * queue) nvme_rdma_tagset() argument
1666 struct nvme_rdma_queue *queue = &ctrl->queues[0]; nvme_rdma_submit_async_event() local
1690 nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue * queue,struct nvme_completion * cqe,struct ib_wc * wc) nvme_rdma_process_nvme_rsp() argument
1738 struct nvme_rdma_queue *queue = wc->qp->qp_context; nvme_rdma_recv_done() local
1774 nvme_rdma_conn_established(struct nvme_rdma_queue * queue) nvme_rdma_conn_established() argument
1787 nvme_rdma_conn_rejected(struct nvme_rdma_queue * queue,struct rdma_cm_event * ev) nvme_rdma_conn_rejected() argument
1813 nvme_rdma_addr_resolved(struct nvme_rdma_queue * queue) nvme_rdma_addr_resolved() argument
1838 nvme_rdma_route_resolved(struct nvme_rdma_queue * queue) nvme_rdma_route_resolved() argument
1889 struct nvme_rdma_queue *queue = cm_id->context; nvme_rdma_cm_handler() local
1947 struct nvme_rdma_queue *queue = req->queue; nvme_rdma_complete_timed_out() local
1956 struct nvme_rdma_queue *queue = req->queue; nvme_rdma_timeout() local
1996 struct nvme_rdma_queue *queue = hctx->driver_data; nvme_rdma_queue_rq() local
2075 struct nvme_rdma_queue *queue = hctx->driver_data; nvme_rdma_poll() local
2114 struct nvme_rdma_queue *queue = req->queue; nvme_rdma_complete_rq() local
[all...]
/linux/drivers/net/xen-netback/
H A Drx.c42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument
55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots()
58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument
63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available()
68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available()
69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available()
74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available()
80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available()
85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument
90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
[all …]
H A Dinterface.c44 /* Number of bytes allowed on the internal guest Rx queue. */
52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument
56 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare()
59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument
61 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete()
67 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete()
77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument
81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt()
83 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt()
89 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local
[all …]
H A Dnetback.c59 /* The time that packets can stay on the guest Rx internal queue
107 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
110 static void make_tx_response(struct xenvif_queue *queue,
115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
117 static inline int tx_work_todo(struct xenvif_queue *queue);
119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument
122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument
128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr()
161 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument
[all …]
/linux/drivers/gpu/drm/imagination/
H A Dpvr_queue.c86 WARN(1, "Invalid queue type"); in get_ctx_state_size()
116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release_work()
123 struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; in pvr_queue_fence_release()
133 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name()
147 WARN(1, "Invalid queue type"); in pvr_queue_job_fence_get_timeline_name()
156 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name()
170 WARN(1, "Invalid queue type"); in pvr_queue_cccb_fence_get_timeline_name()
262 * @queue: The queue this fence belongs to.
267 * pvr_queue_fence::queue field too.
271 struct pvr_queue *queue, in pvr_queue_fence_init() argument
[all …]
/linux/drivers/net/
H A Dxen-netfront.c91 /* Queue name is interface name with "-qNNN" appended */
94 /* IRQ name is queue name with "-tx" or "-rx" appended */
108 unsigned int id; /* Queue ID, 0-based */
166 /* Multi-queue support */
218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument
222 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb()
223 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb()
227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument
231 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref()
232 queue->grant_rx_ref[i] = INVALID_GRANT_REF; in xennet_get_rx_ref()
[all …]
/linux/drivers/iio/buffer/
H A Dindustrialio-buffer-dma.c38 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
39 * incoming queue are waiting for the DMA controller to pick them up and fill
40 * them with data. Block on the outgoing queue have been filled with data and
56 * incoming or outgoing queue the block will be freed.
102 struct iio_dma_buffer_queue *queue = block->queue; in iio_buffer_block_release() local
107 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
110 atomic_dec(&queue->num_dmabufs); in iio_buffer_block_release()
113 iio_buffer_put(&queue->buffer); in iio_buffer_block_release()
173 iio_dma_buffer_alloc_block(struct iio_dma_buffer_queue *queue, size_t size, in iio_dma_buffer_alloc_block() argument
182 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_userq.c125 * Iterate through all queue types to detect and reset problematic queues in amdgpu_userq_detect_and_reset_queues()
126 * Process each queue type in the defined order in amdgpu_userq_detect_and_reset_queues()
153 struct amdgpu_usermode_queue *queue = container_of(work, in amdgpu_userq_hang_detect_work() local
159 if (!queue->userq_mgr) in amdgpu_userq_hang_detect_work()
162 uq_mgr = queue->userq_mgr; in amdgpu_userq_hang_detect_work()
163 fence = READ_ONCE(queue->hang_detect_fence); in amdgpu_userq_hang_detect_work()
174 * Start hang detection for a user queue fence. A delayed work will be scheduled
177 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue) in amdgpu_userq_start_hang_detect_work() argument
182 if (!queue || !queue in amdgpu_userq_start_hang_detect_work()
208 amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue * queue) amdgpu_userq_init_hang_detect_work() argument
214 amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue * queue,struct amdgpu_bo_va_mapping * va_map,u64 addr) amdgpu_userq_buffer_va_list_add() argument
233 amdgpu_userq_input_va_validate(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue,u64 addr,u64 expected_size) amdgpu_userq_input_va_validate() argument
281 amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue * queue) amdgpu_userq_buffer_vas_mapped() argument
308 amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue) amdgpu_userq_buffer_vas_list_cleanup() argument
329 amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue * queue) amdgpu_userq_preempt_helper() argument
354 amdgpu_userq_restore_helper(struct amdgpu_usermode_queue * queue) amdgpu_userq_restore_helper() argument
374 amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue * queue) amdgpu_userq_unmap_helper() argument
400 amdgpu_userq_map_helper(struct amdgpu_usermode_queue * queue) amdgpu_userq_map_helper() argument
421 amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue * queue) amdgpu_userq_wait_for_last_fence() argument
431 amdgpu_userq_cleanup(struct amdgpu_usermode_queue * queue) amdgpu_userq_cleanup() argument
616 amdgpu_userq_destroy(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue) amdgpu_userq_destroy() argument
675 struct amdgpu_usermode_queue *queue = amdgpu_userq_kref_destroy() local
686 struct amdgpu_usermode_queue *queue; amdgpu_userq_get() local
697 amdgpu_userq_put(struct amdgpu_usermode_queue * queue) amdgpu_userq_put() argument
725 struct amdgpu_usermode_queue *queue; amdgpu_userq_create() local
964 struct amdgpu_usermode_queue *queue; amdgpu_userq_ioctl() local
1004 struct amdgpu_usermode_queue *queue; amdgpu_userq_restore_all() local
1253 struct amdgpu_usermode_queue *queue; amdgpu_userq_evict_all() local
1291 struct amdgpu_usermode_queue *queue; amdgpu_userq_wait_for_signal() local
1331 struct amdgpu_usermode_queue *queue; amdgpu_userq_mgr_fini() local
1355 struct amdgpu_usermode_queue *queue; amdgpu_userq_suspend() local
1381 struct amdgpu_usermode_queue *queue; amdgpu_userq_resume() local
1407 struct amdgpu_usermode_queue *queue; amdgpu_userq_stop_sched_for_enforce_isolation() local
1441 struct amdgpu_usermode_queue *queue; amdgpu_userq_start_sched_for_enforce_isolation() local
1498 struct amdgpu_usermode_queue *queue; amdgpu_userq_pre_reset() local
1525 struct amdgpu_usermode_queue *queue; amdgpu_userq_post_reset() local
[all...]
/linux/drivers/misc/genwqe/
H A Dcard_ddcb.c14 * Device Driver Control Block (DDCB) queue support. Definition of
15 * interrupt handlers for queue support as well as triggering the
40 * Situation (1): Empty queue
56 * Situation (3): Queue wrapped, A > N
64 * Situation (4a): Queue full N > A
73 * Situation (4a): Queue full A > N
82 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument
84 return queue->ddcb_next == queue->ddcb_act; in queue_empty()
87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument
89 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs()
[all …]
/linux/drivers/net/wireless/ralink/rt2x00/
H A Drt2x00queue.c12 Abstract: rt2x00 queue specific routines.
25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local
26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb()
37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb()
95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb()
112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb()
488 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data()
493 * a queue corruption! in rt2x00queue_write_tx_data()
498 "Corrupt queue %d, accessing entry which is not ours\n" in rt2x00queue_write_tx_data()
500 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data()
[all …]
H A Drt2x00queue.h10 Abstract: rt2x00 queue datastructures and routines
33 * enum data_queue_qid: Queue identification
35 * @QID_AC_VO: AC VO queue
36 * @QID_AC_VI: AC VI queue
37 * @QID_AC_BE: AC BE queue
38 * @QID_AC_BK: AC BK queue
39 * @QID_HCCA: HCCA queue
40 * @QID_MGMT: MGMT queue (prio queue)
41 * @QID_RX: RX queue
43 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
[all …]
/linux/drivers/net/wireguard/
H A Dqueueing.c25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument
30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init()
31 queue->last_cpu = -1; in wg_packet_queue_init()
32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init()
35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init()
36 if (!queue->worker) { in wg_packet_queue_init()
37 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init()
43 void wg_packet_queue_free(struct crypt_queue *queue, bool purge) in wg_packet_queue_free() argument
45 free_percpu(queue->worker); in wg_packet_queue_free()
46 WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); in wg_packet_queue_free()
[all …]
/linux/drivers/nvme/target/
H A Drdma.c52 struct nvmet_rdma_queue *queue; member
66 struct nvmet_rdma_queue *queue; member
147 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
157 MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument
218 tag = sbitmap_get(&queue->rsp_tags); in nvmet_rdma_get_rsp()
220 rsp = &queue->rsps[tag]; in nvmet_rdma_get_rsp()
228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, in nvmet_rdma_get_rsp()
243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
[all …]
/linux/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.h40 * WQE - Work Queue Entry
41 * SWQE - Send Work Queue Entry
42 * RWQE - Receive Work Queue Entry
43 * CQE - Completion Queue Entry
44 * EQE - Event Queue Entry
196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument
200 if (q_offset >= queue->queue_length) in hw_qeit_calc()
201 q_offset -= queue->queue_length; in hw_qeit_calc()
202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc()
206 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument
[all …]
/linux/net/sunrpc/
H A Dsched.c91 * queue->lock and bh_disabled in order to avoid races within
95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument
101 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer()
102 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer()
106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument
109 queue->timer_list.expires = expires; in rpc_set_queue_timer()
114 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer()
121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument
125 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) in __rpc_add_timer()
126 rpc_set_queue_timer(queue, timeout); in __rpc_add_timer()
[all …]
/linux/drivers/net/wireless/ath/ath5k/
H A Dqcu.c20 Queue Control Unit, DCF Control Unit Functions
31 * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
35 * basically we have 10 queues to play with. Each queue has a matching
36 * QCU that controls when the queue will get triggered and multiple QCUs
39 * and DCUs allowing us to have different DFS settings for each queue.
41 * When a frame goes into a TX queue, QCU decides when it'll trigger a
43 * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
58 * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
60 * @queue: One of enum ath5k_tx_queue_id
63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument
[all …]
H A Ddma.c27 * handle queue setup for 5210 chipset (rest are handled on qcu.c).
116 * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
118 * @queue: The hw queue number
120 * Start DMA transmit for a specific queue and since 5210 doesn't have
121 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
122 * queue for normal data and one queue for beacons). For queue setup
123 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
124 * of range or if queue is already disabled.
127 * queue (see below).
130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_start_tx_dma() argument
[all …]
/linux/drivers/gpu/drm/msm/
H A Dmsm_submitqueue.c74 struct msm_gpu_submitqueue *queue = container_of(kref, in msm_submitqueue_destroy() local
77 idr_destroy(&queue->fence_idr); in msm_submitqueue_destroy()
79 if (queue->entity == &queue->_vm_bind_entity[0]) in msm_submitqueue_destroy()
80 drm_sched_entity_destroy(queue->entity); in msm_submitqueue_destroy()
82 msm_context_put(queue->ctx); in msm_submitqueue_destroy()
84 kfree(queue); in msm_submitqueue_destroy()
112 struct msm_gpu_submitqueue *queue, *tmp; in msm_submitqueue_close() local
121 list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) { in msm_submitqueue_close()
122 if (queue->entity == &queue->_vm_bind_entity[0]) in msm_submitqueue_close()
123 drm_sched_entity_flush(queue->entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); in msm_submitqueue_close()
[all …]
/linux/Documentation/devicetree/bindings/net/
H A Dintel,ixp4xx-hss.yaml15 Processing Engine) and the IXP4xx Queue Manager to process
35 intel,queue-chl-rxtrig:
39 - description: phandle to the RX trigger queue on the NPE
40 - description: the queue instance number
41 description: phandle to the RX trigger queue on the NPE
43 intel,queue-chl-txready:
47 - description: phandle to the TX ready queue on the NPE
48 - description: the queue instance number
49 description: phandle to the TX ready queue on the NPE
51 intel,queue-pkt-rx:
[all …]

12345678910>>...142