| /linux/drivers/net/wireless/st/cw1200/ |
| H A D | queue.c | 3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers 12 #include "queue.h" 27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 31 pr_debug("[TX] Queue %d is locked.\n", in __cw1200_queue_lock() 32 queue->queue_id); in __cw1200_queue_lock() 33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() [all …]
|
| /linux/drivers/usb/gadget/function/ |
| H A D | uvc_queue.c | 27 * Video buffers queue management. 33 * the videobuf2 queue operations by serializing calls to videobuf2 and a 34 * spinlock to protect the IRQ queue that holds the buffers to be processed by 39 * videobuf2 queue operations 46 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 47 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup() 63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 64 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_buffer_prepare() 74 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() 78 if (queue->use_sg) { in uvc_buffer_prepare() [all …]
|
| /linux/drivers/nvme/target/ |
| H A D | tcp.c | 65 * queue before determining it to be idle. This optional module behavior 109 struct nvmet_tcp_queue *queue; member 218 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument 221 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag() 226 return cmd - queue->cmds; in nvmet_tcp_cmd_tag() 254 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument 258 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd() 274 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd() 277 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd() 280 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument 285 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue * queue) nvmet_tcp_hdgst_len() argument 290 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue * queue) nvmet_tcp_ddgst_len() argument 300 nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue * queue,void * pdu,size_t len) nvmet_tcp_verify_hdgst() argument 326 nvmet_tcp_check_ddgst(struct nvmet_tcp_queue * queue,void * pdu) nvmet_tcp_check_ddgst() argument 399 nvmet_tcp_fatal_error(struct nvmet_tcp_queue * queue) nvmet_tcp_fatal_error() argument 408 nvmet_tcp_socket_error(struct nvmet_tcp_queue * queue,int status) nvmet_tcp_socket_error() argument 476 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_setup_c2h_data_pdu() local 549 nvmet_tcp_process_resp_list(struct nvmet_tcp_queue * queue) nvmet_tcp_process_resp_list() argument 561 nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue * queue) nvmet_tcp_fetch_cmd() argument 591 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_queue_response() local 657 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_try_send_data() local 769 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_try_send_ddgst() local 802 nvmet_tcp_try_send_one(struct nvmet_tcp_queue * queue,bool last_in_batch) nvmet_tcp_try_send_one() argument 851 nvmet_tcp_try_send(struct nvmet_tcp_queue * queue,int budget,int * sends) nvmet_tcp_try_send() argument 870 nvmet_prepare_receive_pdu(struct nvmet_tcp_queue * queue) nvmet_prepare_receive_pdu() argument 879 nvmet_tcp_handle_icreq(struct nvmet_tcp_queue * queue) nvmet_tcp_handle_icreq() argument 934 nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd,struct nvmet_req * req) nvmet_tcp_handle_req_failure() argument 965 nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue * queue) nvmet_tcp_handle_h2c_data_pdu() argument 1026 nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue * queue) nvmet_tcp_done_recv_pdu() argument 1137 nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue * queue,struct msghdr * msg,char * cbuf) nvmet_tcp_tls_record_ok() argument 1172 nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue * queue) nvmet_tcp_try_recv_pdu() argument 1237 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_prep_recv_ddgst() local 1245 nvmet_tcp_try_recv_data(struct nvmet_tcp_queue * queue) nvmet_tcp_try_recv_data() argument 1278 nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue * queue) nvmet_tcp_try_recv_ddgst() argument 1329 nvmet_tcp_try_recv_one(struct nvmet_tcp_queue * queue) nvmet_tcp_try_recv_one() argument 1363 nvmet_tcp_try_recv(struct nvmet_tcp_queue * queue,int budget,int * recvs) nvmet_tcp_try_recv() argument 1384 struct nvmet_tcp_queue *queue = nvmet_tcp_release_queue() local 1391 nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue * queue) nvmet_tcp_schedule_release_queue() argument 1405 nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue * queue) nvmet_tcp_arm_queue_deadline() argument 1410 nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue * queue,int ops) nvmet_tcp_check_queue_deadline() argument 1424 struct nvmet_tcp_queue *queue = nvmet_tcp_io_work() local 1454 nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * c) nvmet_tcp_alloc_cmd() argument 1510 nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue * queue) nvmet_tcp_alloc_cmds() argument 1536 nvmet_tcp_free_cmds(struct nvmet_tcp_queue * queue) nvmet_tcp_free_cmds() argument 1548 nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue * queue) nvmet_tcp_restore_socket_callbacks() argument 1563 nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue * queue) nvmet_tcp_uninit_data_in_cmds() argument 1579 nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue * queue) nvmet_tcp_free_cmd_data_in_buffers() argument 1591 struct nvmet_tcp_queue *queue = nvmet_tcp_release_queue_work() local 1620 struct nvmet_tcp_queue *queue; nvmet_tcp_data_ready() local 1638 struct nvmet_tcp_queue *queue; nvmet_tcp_write_space() local 1660 struct nvmet_tcp_queue *queue; nvmet_tcp_state_change() local 1685 nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue * queue) nvmet_tcp_set_queue_sock() argument 1741 nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue * queue) nvmet_tcp_try_peek_pdu() argument 1789 nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue * queue,key_serial_t peerid) nvmet_tcp_tls_key_lookup() argument 1813 struct nvmet_tcp_queue *queue = data; nvmet_tcp_tls_handshake_done() local 1843 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w), nvmet_tcp_tls_handshake_timeout() local 1863 nvmet_tcp_tls_handshake(struct nvmet_tcp_queue * queue) nvmet_tcp_tls_handshake() argument 1899 struct nvmet_tcp_queue *queue; nvmet_tcp_alloc_queue() local 2113 struct nvmet_tcp_queue *queue; nvmet_tcp_destroy_port_queues() local 2143 struct nvmet_tcp_queue *queue; nvmet_tcp_delete_ctrl() local 2154 struct nvmet_tcp_queue *queue = nvmet_tcp_install_queue() local 2189 struct nvmet_tcp_queue *queue = cmd->queue; nvmet_tcp_disc_port_addr() local 2201 struct nvmet_tcp_queue *queue = nvmet_tcp_host_port_addr() local 2244 struct nvmet_tcp_queue *queue; nvmet_tcp_exit() local [all...] |
| H A D | rdma.c | 52 struct nvmet_rdma_queue *queue; member 66 struct nvmet_rdma_queue *queue; member 147 MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 157 MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)"); 172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument 218 tag = sbitmap_get(&queue->rsp_tags); in nvmet_rdma_get_rsp() 220 rsp = &queue->rsps[tag]; in nvmet_rdma_get_rsp() 228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, in nvmet_rdma_get_rsp() 243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp() [all …]
|
| /linux/drivers/net/wireless/broadcom/b43legacy/ |
| H A D | pio.c | 22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 31 if (queue->need_workarounds) { in tx_octet() 32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 71 if (queue->need_workarounds) { in tx_data() [all …]
|
| /linux/drivers/md/dm-vdo/ |
| H A D | funnel-workqueue.c | 15 #include "funnel-queue.h" 28 * DOC: Work queue definition. 36 /* Name of just the work queue (e.g., "cpuQ12") */ 73 static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue) in as_simple_work_queue() argument 75 return ((queue == NULL) ? in as_simple_work_queue() 76 NULL : container_of(queue, struct simple_work_queue, common)); in as_simple_work_queue() 79 static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue) in as_round_robin_work_queue() argument 81 return ((queue == NULL) ? in as_round_robin_work_queue() 83 container_of(queue, struct round_robin_work_queue, common)); in as_round_robin_work_queue() 96 static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue) in poll_for_completion() argument 110 enqueue_work_queue_completion(struct simple_work_queue * queue,struct vdo_completion * completion) enqueue_work_queue_completion() argument 153 run_start_hook(struct simple_work_queue * queue) run_start_hook() argument 159 run_finish_hook(struct simple_work_queue * queue) run_finish_hook() argument 174 wait_for_next_completion(struct simple_work_queue * queue) wait_for_next_completion() argument 222 process_completion(struct simple_work_queue * queue,struct vdo_completion * completion) process_completion() argument 233 service_work_queue(struct simple_work_queue * queue) service_work_queue() argument 263 struct simple_work_queue *queue = ptr; work_queue_runner() local 272 free_simple_work_queue(struct simple_work_queue * queue) free_simple_work_queue() argument 282 free_round_robin_work_queue(struct round_robin_work_queue * queue) free_round_robin_work_queue() argument 297 vdo_free_work_queue(struct vdo_work_queue * queue) vdo_free_work_queue() argument 316 struct simple_work_queue *queue; make_simple_work_queue() local 392 struct round_robin_work_queue *queue; vdo_make_work_queue() local 450 finish_simple_work_queue(struct simple_work_queue * queue) finish_simple_work_queue() argument 460 finish_round_robin_work_queue(struct round_robin_work_queue * queue) finish_round_robin_work_queue() argument 471 vdo_finish_work_queue(struct vdo_work_queue * queue) vdo_finish_work_queue() argument 484 dump_simple_work_queue(struct simple_work_queue * queue) dump_simple_work_queue() argument 505 vdo_dump_work_queue(struct vdo_work_queue * queue) vdo_dump_work_queue() argument 562 vdo_enqueue_work_queue(struct vdo_work_queue * queue,struct vdo_completion * completion) vdo_enqueue_work_queue() argument 618 struct simple_work_queue *queue = get_current_thread_work_queue(); vdo_get_current_work_queue() local 623 vdo_get_work_queue_owner(struct vdo_work_queue * queue) vdo_get_work_queue_owner() argument 635 struct simple_work_queue *queue = get_current_thread_work_queue(); vdo_get_work_queue_private_data() local 640 vdo_work_queue_type_is(struct vdo_work_queue * queue,const struct vdo_work_queue_type * type) vdo_work_queue_type_is() argument [all...] |
| H A D | funnel-queue.c | 6 #include "funnel-queue.h" 15 struct funnel_queue *queue; in vdo_make_funnel_queue() local 17 result = vdo_allocate(1, "funnel queue", &queue); in vdo_make_funnel_queue() 22 * Initialize the stub entry and put it in the queue, establishing the invariant that in vdo_make_funnel_queue() 23 * queue->newest and queue->oldest are never null. in vdo_make_funnel_queue() 25 queue->stub.next = NULL; in vdo_make_funnel_queue() 26 queue->newest = &queue in vdo_make_funnel_queue() 33 vdo_free_funnel_queue(struct funnel_queue * queue) vdo_free_funnel_queue() argument 38 get_oldest(struct funnel_queue * queue) get_oldest() argument 103 vdo_funnel_queue_poll(struct funnel_queue * queue) vdo_funnel_queue_poll() argument 137 vdo_is_funnel_queue_empty(struct funnel_queue * queue) vdo_is_funnel_queue_empty() argument 148 vdo_is_funnel_queue_idle(struct funnel_queue * queue) vdo_is_funnel_queue_idle() argument [all...] |
| /linux/drivers/nvme/host/ |
| H A D | tcp.c | 107 struct nvme_tcp_queue *queue; member 208 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); 215 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument 217 return queue - queue->ctrl->queues; in nvme_tcp_queue_id() 234 * Check if the queue is TLS encrypted 236 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) in nvme_tcp_queue_tls() argument 241 return queue->tls_enabled; in nvme_tcp_queue_tls() 255 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument 257 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset() 260 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset() [all …]
|
| /linux/drivers/scsi/arm/ |
| H A D | queue.c | 3 * linux/drivers/acorn/scsi/queue.c: queue handling primitives 50 #include "queue.h" 55 * Function: void queue_initialise (Queue_t *queue) 56 * Purpose : initialise a queue 57 * Params : queue - queue to initialise 59 int queue_initialise (Queue_t *queue) in queue_initialise() argument 64 spin_lock_init(&queue->queue_lock); in queue_initialise() 65 INIT_LIST_HEAD(&queue->head); in queue_initialise() 66 INIT_LIST_HEAD(&queue->free); in queue_initialise() 74 queue->alloc = q = kmalloc_objs(QE_t, nqueues); in queue_initialise() [all …]
|
| H A D | queue.h | 3 * linux/drivers/acorn/scsi/queue.h: queue handling 18 * Function: void queue_initialise (Queue_t *queue) 19 * Purpose : initialise a queue 20 * Params : queue - queue to initialise 22 extern int queue_initialise (Queue_t *queue); 25 * Function: void queue_free (Queue_t *queue) 26 * Purpose : free a queue 27 * Params : queue - queue to free 29 extern void queue_free (Queue_t *queue); 32 * Function: struct scsi_cmnd *queue_remove (queue) [all …]
|
| /linux/drivers/net/xen-netback/ |
| H A D | rx.c | 42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument 55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots() 58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument 63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available() 68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available() 69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available() 74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available() 80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available() 85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument 90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() [all …]
|
| H A D | interface.c | 44 /* Number of bytes allowed on the internal guest Rx queue. */ 52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument 56 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare() 59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument 61 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete() 67 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete() 77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument 81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt() 83 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt() 89 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local [all …]
|
| H A D | netback.c | 59 /* The time that packets can stay on the guest Rx internal queue 107 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 110 static void make_tx_response(struct xenvif_queue *queue, 115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); 117 static inline int tx_work_todo(struct xenvif_queue *queue); 119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument 122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn() 125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument 128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr() 161 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument [all …]
|
| /linux/drivers/gpu/drm/imagination/ |
| H A D | pvr_queue.c | 86 WARN(1, "Invalid queue type"); in get_ctx_state_size() 116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release_work() 123 struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; in pvr_queue_fence_release() 133 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name() 147 WARN(1, "Invalid queue type"); in pvr_queue_job_fence_get_timeline_name() 156 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name() 170 WARN(1, "Invalid queue type"); in pvr_queue_cccb_fence_get_timeline_name() 262 * @queue: The queue this fence belongs to. 267 * pvr_queue_fence::queue field too. 271 struct pvr_queue *queue, in pvr_queue_fence_init() argument [all …]
|
| /linux/fs/fuse/ |
| H A D | dev_uring.c | 51 static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) in fuse_uring_flush_bg() argument 53 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg() 56 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg() 60 * Allow one bg request per queue, ignoring global fc limits. in fuse_uring_flush_bg() 61 * This prevents a single queue from consuming all resources and in fuse_uring_flush_bg() 62 * eliminates the need for remote queue wake-ups when global in fuse_uring_flush_bg() 63 * limits are met but this queue has no more waiting requests. in fuse_uring_flush_bg() 66 !queue->active_background) && in fuse_uring_flush_bg() 67 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg() 70 req = list_first_entry(&queue in fuse_uring_flush_bg() 82 struct fuse_ring_queue *queue = ent->queue; fuse_uring_req_end() local 107 fuse_uring_abort_end_queue_requests(struct fuse_ring_queue * queue) fuse_uring_abort_end_queue_requests() argument 125 struct fuse_ring_queue *queue; fuse_uring_abort_end_requests() local 163 struct fuse_ring_queue *queue; fuse_uring_request_expired() local 197 struct fuse_ring_queue *queue = ring->queues[qid]; fuse_uring_destruct() local 274 struct fuse_ring_queue *queue; fuse_uring_create_queue() local 333 struct fuse_ring_queue *queue = ent->queue; fuse_uring_entry_teardown() local 363 fuse_uring_stop_list_entries(struct list_head * head,struct fuse_ring_queue * queue,enum fuse_ring_req_state exp_state) fuse_uring_stop_list_entries() argument 392 fuse_uring_teardown_entries(struct fuse_ring_queue * queue) fuse_uring_teardown_entries() argument 409 struct fuse_ring_queue *queue = ring->queues[qid]; fuse_uring_log_ent_state() local 440 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); fuse_uring_async_stop_queues() local 475 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); fuse_uring_stop_queues() local 504 struct fuse_ring_queue *queue; fuse_uring_cancel() local 670 struct fuse_ring_queue *queue = ent->queue; fuse_uring_copy_to_ring() local 726 struct fuse_ring_queue *queue = ent->queue; fuse_uring_send_next_to_ring() local 749 fuse_uring_ent_avail(struct fuse_ring_ent * ent,struct fuse_ring_queue * queue) fuse_uring_ent_avail() argument 760 struct fuse_ring_queue *queue = ent->queue; fuse_uring_add_to_pq() local 775 struct fuse_ring_queue *queue = ent->queue; fuse_uring_add_req_to_ring_ent() local 797 struct fuse_ring_queue *queue = ent->queue; fuse_uring_ent_assign_req() local 844 fuse_uring_next_fuse_req(struct fuse_ring_ent * ent,struct fuse_ring_queue * queue,unsigned int issue_flags) fuse_uring_next_fuse_req() argument 865 struct fuse_ring_queue *queue = ent->queue; fuse_ring_ent_set_commit() local 887 struct fuse_ring_queue *queue; fuse_uring_commit_fetch() local 957 struct fuse_ring_queue *queue; is_ring_ready() local 986 struct fuse_ring_queue *queue = ent->queue; fuse_uring_do_register() local 1037 fuse_uring_create_ring_ent(struct io_uring_cmd * cmd,struct fuse_ring_queue * queue) fuse_uring_create_ring_ent() argument 1090 struct fuse_ring_queue *queue; fuse_uring_register() local 1202 struct fuse_ring_queue *queue = ent->queue; fuse_uring_send() local 1223 struct fuse_ring_queue *queue = ent->queue; fuse_uring_send_in_task() local 1242 struct fuse_ring_queue *queue; fuse_uring_task_to_queue() local 1270 struct fuse_ring_queue *queue; fuse_uring_queue_fuse_req() local 1313 struct fuse_ring_queue *queue; fuse_uring_queue_bq_req() local 1360 struct fuse_ring_queue *queue = req->ring_queue; fuse_uring_remove_pending_req() local [all...] |
| /linux/drivers/net/ |
| H A D | xen-netfront.c | 91 /* Queue name is interface name with "-qNNN" appended */ 94 /* IRQ name is queue name with "-tx" or "-rx" appended */ 108 unsigned int id; /* Queue ID, 0-based */ 166 /* Multi-queue support */ 218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument 222 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb() 223 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb() 227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument 231 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref() 232 queue->grant_rx_ref[i] = INVALID_GRANT_REF; in xennet_get_rx_ref() [all …]
|
| /linux/drivers/iio/buffer/ |
| H A D | industrialio-buffer-dma.c | 38 * means of two queues. The incoming queue and the outgoing queue. Blocks on the 39 * incoming queue are waiting for the DMA controller to pick them up and fill 40 * them with data. Block on the outgoing queue have been filled with data and 56 * incoming or outgoing queue the block will be freed. 102 struct iio_dma_buffer_queue *queue = block->queue; in iio_buffer_block_release() local 107 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release() 110 atomic_dec(&queue->num_dmabufs); in iio_buffer_block_release() 113 iio_buffer_put(&queue->buffer); in iio_buffer_block_release() 173 iio_dma_buffer_alloc_block(struct iio_dma_buffer_queue *queue, size_t size, in iio_dma_buffer_alloc_block() argument 182 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block() [all …]
|
| /linux/drivers/md/dm-vdo/indexer/ |
| H A D | funnel-requestqueue.c | 12 #include "funnel-queue.h" 18 * This queue will attempt to handle requests in reasonably sized batches instead of reacting 22 * If the wait time becomes long enough, the queue will become dormant and must be explicitly 24 * queue via xchg (which is a memory barrier), and later checks "dormant" to decide whether to do a 28 * decide if the funnel queue is idle. In dormant mode, the last examination of "newest" before 31 * queue's "next" field update isn't visible yet to make the entry accessible, its existence will 35 * the queue to awaken immediately. 50 /* Wait queue for synchronizing producers and consumer */ 54 /* Queue of new incoming requests */ 56 /* Queue o 68 poll_queues(struct uds_request_queue * queue) poll_queues() argument 83 are_queues_idle(struct uds_request_queue * queue) are_queues_idle() argument 94 dequeue_request(struct uds_request_queue * queue,struct uds_request ** request_ptr,bool * waited_ptr) dequeue_request() argument 115 wait_for_request(struct uds_request_queue * queue,bool dormant,unsigned long timeout,struct uds_request ** request,bool * waited) wait_for_request() argument 133 struct uds_request_queue *queue = arg; request_queue_worker() local 199 struct uds_request_queue *queue; uds_make_request_queue() local 234 wake_up_worker(struct uds_request_queue * queue) wake_up_worker() argument 240 uds_request_queue_enqueue(struct uds_request_queue * queue,struct uds_request * request) uds_request_queue_enqueue() argument 257 uds_request_queue_finish(struct uds_request_queue * queue) uds_request_queue_finish() argument [all...] |
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_userq.c | 125 * Iterate through all queue types to detect and reset problematic queues in amdgpu_userq_detect_and_reset_queues() 126 * Process each queue type in the defined order in amdgpu_userq_detect_and_reset_queues() 153 struct amdgpu_usermode_queue *queue = container_of(work, in amdgpu_userq_hang_detect_work() local 159 if (!queue->userq_mgr) in amdgpu_userq_hang_detect_work() 162 uq_mgr = queue->userq_mgr; in amdgpu_userq_hang_detect_work() 163 fence = READ_ONCE(queue->hang_detect_fence); in amdgpu_userq_hang_detect_work() 174 * Start hang detection for a user queue fence. A delayed work will be scheduled 177 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue) in amdgpu_userq_start_hang_detect_work() argument 182 if (!queue || !queue in amdgpu_userq_start_hang_detect_work() 208 amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue * queue) amdgpu_userq_init_hang_detect_work() argument 214 amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue * queue,struct amdgpu_bo_va_mapping * va_map,u64 addr) amdgpu_userq_buffer_va_list_add() argument 233 amdgpu_userq_input_va_validate(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue,u64 addr,u64 expected_size) amdgpu_userq_input_va_validate() argument 286 amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue * queue) amdgpu_userq_buffer_vas_mapped() argument 313 amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue) amdgpu_userq_buffer_vas_list_cleanup() argument 338 amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue * queue) amdgpu_userq_preempt_helper() argument 363 amdgpu_userq_restore_helper(struct amdgpu_usermode_queue * queue) amdgpu_userq_restore_helper() argument 383 amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue * queue) amdgpu_userq_unmap_helper() argument 409 amdgpu_userq_map_helper(struct amdgpu_usermode_queue * queue) amdgpu_userq_map_helper() argument 430 amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue * queue) amdgpu_userq_wait_for_last_fence() argument 449 amdgpu_userq_cleanup(struct amdgpu_usermode_queue * queue) amdgpu_userq_cleanup() argument 627 amdgpu_userq_destroy(struct amdgpu_userq_mgr * uq_mgr,struct amdgpu_usermode_queue * queue) amdgpu_userq_destroy() argument 676 struct amdgpu_usermode_queue *queue = amdgpu_userq_kref_destroy() local 687 struct amdgpu_usermode_queue *queue; amdgpu_userq_get() local 698 amdgpu_userq_put(struct amdgpu_usermode_queue * queue) amdgpu_userq_put() argument 722 struct amdgpu_usermode_queue *queue = m->private; amdgpu_mqd_info_read() local 766 struct amdgpu_usermode_queue *queue; amdgpu_userq_create() local 1013 struct amdgpu_usermode_queue *queue; amdgpu_userq_ioctl() local 1051 struct amdgpu_usermode_queue *queue; amdgpu_userq_restore_all() local 1300 struct amdgpu_usermode_queue *queue; amdgpu_userq_evict_all() local 1341 struct amdgpu_usermode_queue *queue; amdgpu_userq_wait_for_signal() local 1413 struct amdgpu_usermode_queue *queue; amdgpu_userq_mgr_fini() local 1437 struct amdgpu_usermode_queue *queue; amdgpu_userq_suspend() local 1463 struct amdgpu_usermode_queue *queue; amdgpu_userq_resume() local 1489 struct amdgpu_usermode_queue *queue; amdgpu_userq_stop_sched_for_enforce_isolation() local 1523 struct amdgpu_usermode_queue *queue; amdgpu_userq_start_sched_for_enforce_isolation() local 1585 struct amdgpu_usermode_queue *queue; amdgpu_userq_pre_reset() local 1612 struct amdgpu_usermode_queue *queue; amdgpu_userq_post_reset() local [all...] |
| /linux/drivers/misc/genwqe/ |
| H A D | card_ddcb.c | 14 * Device Driver Control Block (DDCB) queue support. Definition of 15 * interrupt handlers for queue support as well as triggering the 40 * Situation (1): Empty queue 56 * Situation (3): Queue wrapped, A > N 64 * Situation (4a): Queue full N > A 73 * Situation (4a): Queue full A > N 82 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument 84 return queue->ddcb_next == queue->ddcb_act; in queue_empty() 87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument 89 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs() [all …]
|
| /linux/drivers/net/wireless/ralink/rt2x00/ |
| H A D | rt2x00queue.c | 12 Abstract: rt2x00 queue specific routines. 25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local 26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb() 37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb() 95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb() 112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb() 488 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data() 493 * a queue corruption! in rt2x00queue_write_tx_data() 498 "Corrupt queue %d, accessing entry which is not ours\n" in rt2x00queue_write_tx_data() 500 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data() [all …]
|
| /linux/drivers/net/wireguard/ |
| H A D | queueing.c | 25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument 30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init() 31 queue->last_cpu = -1; in wg_packet_queue_init() 32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init() 35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init() 36 if (!queue->worker) { in wg_packet_queue_init() 37 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init() 43 void wg_packet_queue_free(struct crypt_queue *queue, bool purge) in wg_packet_queue_free() argument 45 free_percpu(queue->worker); in wg_packet_queue_free() 46 WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); in wg_packet_queue_free() [all …]
|
| /linux/net/sunrpc/ |
| H A D | sched.c | 91 * queue->lock and bh_disabled in order to avoid races within 95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument 101 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer() 102 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer() 106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument 109 queue->timer_list.expires = expires; in rpc_set_queue_timer() 114 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer() 121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument 125 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) in __rpc_add_timer() 126 rpc_set_queue_timer(queue, timeout); in __rpc_add_timer() [all …]
|
| /linux/drivers/net/ethernet/ibm/ehea/ |
| H A D | ehea_qmr.h | 40 * WQE - Work Queue Entry 41 * SWQE - Send Work Queue Entry 42 * RWQE - Receive Work Queue Entry 43 * CQE - Completion Queue Entry 44 * EQE - Event Queue Entry 196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument 200 if (q_offset >= queue->queue_length) in hw_qeit_calc() 201 q_offset -= queue->queue_length; in hw_qeit_calc() 202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc() 206 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument [all …]
|
| /linux/drivers/net/wireless/ath/ath5k/ |
| H A D | qcu.c | 20 Queue Control Unit, DCF Control Unit Functions 31 * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions 35 * basically we have 10 queues to play with. Each queue has a matching 36 * QCU that controls when the queue will get triggered and multiple QCUs 39 * and DCUs allowing us to have different DFS settings for each queue. 41 * When a frame goes into a TX queue, QCU decides when it'll trigger a 43 * it's buffer or -if it's a beacon queue- if it's time to fire up the queue 58 * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue 60 * @queue: One of enum ath5k_tx_queue_id 63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument [all …]
|