/linux/drivers/net/wireless/st/cw1200/ |
H A D | queue.c | 3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers 12 #include "queue.h" 27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 31 pr_debug("[TX] Queue %d is locked.\n", in __cw1200_queue_lock() 32 queue->queue_id); in __cw1200_queue_lock() 33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() [all …]
|
/linux/drivers/nvme/target/ |
H A D | tcp.c | 65 * queue before determining it to be idle. This optional module behavior 109 struct nvmet_tcp_queue *queue; member 218 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument 221 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag() 226 return cmd - queue->cmds; in nvmet_tcp_cmd_tag() 254 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument 258 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd() 274 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd() 277 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd() 280 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument [all …]
|
H A D | rdma.c | 52 struct nvmet_rdma_queue *queue; member 66 struct nvmet_rdma_queue *queue; member 147 MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 157 MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)"); 172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument 218 tag = sbitmap_get(&queue->rsp_tags); in nvmet_rdma_get_rsp() 220 rsp = &queue->rsps[tag]; in nvmet_rdma_get_rsp() 228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, in nvmet_rdma_get_rsp() 243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp() [all …]
|
/linux/drivers/net/wireless/broadcom/b43legacy/ |
H A D | pio.c | 22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 31 if (queue->need_workarounds) { in tx_octet() 32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 71 if (queue->need_workarounds) { in tx_data() [all …]
|
/linux/drivers/md/dm-vdo/ |
H A D | funnel-workqueue.c | 15 #include "funnel-queue.h" 28 * DOC: Work queue definition. 36 /* Name of just the work queue (e.g., "cpuQ12") */ 73 static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue) in as_simple_work_queue() argument 75 return ((queue == NULL) ? in as_simple_work_queue() 76 NULL : container_of(queue, struct simple_work_queue, common)); in as_simple_work_queue() 79 static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue) in as_round_robin_work_queue() argument 81 return ((queue == NULL) ? in as_round_robin_work_queue() 83 container_of(queue, struct round_robin_work_queue, common)); in as_round_robin_work_queue() 96 static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue) in poll_for_completion() argument [all …]
|
H A D | funnel-queue.c | 6 #include "funnel-queue.h" 15 struct funnel_queue *queue; in vdo_make_funnel_queue() local 17 result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue); in vdo_make_funnel_queue() 22 * Initialize the stub entry and put it in the queue, establishing the invariant that in vdo_make_funnel_queue() 23 * queue->newest and queue->oldest are never null. in vdo_make_funnel_queue() 25 queue->stub.next = NULL; in vdo_make_funnel_queue() 26 queue->newest = &queue->stub; in vdo_make_funnel_queue() 27 queue->oldest = &queue->stub; in vdo_make_funnel_queue() 29 *queue_ptr = queue; in vdo_make_funnel_queue() 33 void vdo_free_funnel_queue(struct funnel_queue *queue) in vdo_free_funnel_queue() argument [all …]
|
H A D | funnel-queue.h | 13 * A funnel queue is a simple (almost) lock-free queue that accepts entries from multiple threads 19 * mechanism to ensure that only one thread is consuming from the queue. If more than one thread 20 * attempts to consume from the queue, the resulting behavior is undefined. Clients must not 21 * directly access or manipulate the internals of the queue, which are only exposed for the purpose 25 * the queue entries, and pointers to those structures are used exclusively by the queue. No macros 26 * are defined to template the queue, so the offset of the funnel_queue_entry in the records placed 27 * in the queue must all be the same so the client can derive their structure pointer from the 31 * soon as they are returned since this queue is not susceptible to the "ABA problem" present in 32 * many lock-free data structures. The queue is dynamically allocated to ensure cache-line 36 * at which a preempted producer will prevent the consumers from seeing items added to the queue by [all …]
|
/linux/drivers/scsi/arm/ |
H A D | queue.c | 3 * linux/drivers/acorn/scsi/queue.c: queue handling primitives 50 #include "queue.h" 55 * Function: void queue_initialise (Queue_t *queue) 56 * Purpose : initialise a queue 57 * Params : queue - queue to initialise 59 int queue_initialise (Queue_t *queue) in queue_initialise() argument 64 spin_lock_init(&queue->queue_lock); in queue_initialise() 65 INIT_LIST_HEAD(&queue->head); in queue_initialise() 66 INIT_LIST_HEAD(&queue->free); in queue_initialise() 74 queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); in queue_initialise() [all …]
|
H A D | queue.h | 3 * linux/drivers/acorn/scsi/queue.h: queue handling 18 * Function: void queue_initialise (Queue_t *queue) 19 * Purpose : initialise a queue 20 * Params : queue - queue to initialise 22 extern int queue_initialise (Queue_t *queue); 25 * Function: void queue_free (Queue_t *queue) 26 * Purpose : free a queue 27 * Params : queue - queue to free 29 extern void queue_free (Queue_t *queue); 32 * Function: struct scsi_cmnd *queue_remove (queue) [all …]
|
/linux/drivers/net/xen-netback/ |
H A D | rx.c | 42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument 55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots() 58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument 63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available() 68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available() 69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available() 74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available() 80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available() 85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument 90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() [all …]
|
H A D | interface.c | 44 /* Number of bytes allowed on the internal guest Rx queue. */ 52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument 56 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare() 59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument 61 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete() 67 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete() 77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument 81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt() 83 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt() 89 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local [all …]
|
H A D | netback.c | 59 /* The time that packets can stay on the guest Rx internal queue 107 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 110 static void make_tx_response(struct xenvif_queue *queue, 115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); 117 static inline int tx_work_todo(struct xenvif_queue *queue); 119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument 122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn() 125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument 128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr() 161 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument [all …]
|
/linux/drivers/gpu/drm/imagination/ |
H A D | pvr_queue.c | 86 WARN(1, "Invalid queue type"); in get_ctx_state_size() 116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release_work() 123 struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; in pvr_queue_fence_release() 133 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name() 147 WARN(1, "Invalid queue type"); in pvr_queue_job_fence_get_timeline_name() 156 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name() 170 WARN(1, "Invalid queue type"); in pvr_queue_cccb_fence_get_timeline_name() 262 * @queue: The queue this fence belongs to. 267 * pvr_queue_fence::queue field too. 271 struct pvr_queue *queue, in pvr_queue_fence_init() argument [all …]
|
/linux/fs/fuse/ |
H A D | dev_uring.c | 51 static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) in fuse_uring_flush_bg() argument 53 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg() 56 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg() 60 * Allow one bg request per queue, ignoring global fc limits. in fuse_uring_flush_bg() 61 * This prevents a single queue from consuming all resources and in fuse_uring_flush_bg() 62 * eliminates the need for remote queue wake-ups when global in fuse_uring_flush_bg() 63 * limits are met but this queue has no more waiting requests. in fuse_uring_flush_bg() 66 !queue->active_background) && in fuse_uring_flush_bg() 67 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg() 70 req = list_first_entry(&queue->fuse_req_bg_queue, in fuse_uring_flush_bg() [all …]
|
/linux/drivers/md/dm-vdo/indexer/ |
H A D | funnel-requestqueue.c | 12 #include "funnel-queue.h" 18 * This queue will attempt to handle requests in reasonably sized batches instead of reacting 22 * If the wait time becomes long enough, the queue will become dormant and must be explicitly 24 * queue via xchg (which is a memory barrier), and later checks "dormant" to decide whether to do a 28 * decide if the funnel queue is idle. In dormant mode, the last examination of "newest" before 31 * queue's "next" field update isn't visible yet to make the entry accessible, its existence will 35 * the queue to awaken immediately. 50 /* Wait queue for synchronizing producers and consumer */ 54 /* Queue of new incoming requests */ 56 /* Queue of old requests to retry */ [all …]
|
/linux/drivers/net/ |
H A D | xen-netfront.c | 91 /* Queue name is interface name with "-qNNN" appended */ 94 /* IRQ name is queue name with "-tx" or "-rx" appended */ 108 unsigned int id; /* Queue ID, 0-based */ 166 /* Multi-queue support */ 218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument 222 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb() 223 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb() 227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument 231 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref() 232 queue->grant_rx_ref[i] = INVALID_GRANT_REF; in xennet_get_rx_ref() [all …]
|
/linux/drivers/net/wireless/ralink/rt2x00/ |
H A D | rt2x00queue.c | 12 Abstract: rt2x00 queue specific routines. 25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local 26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb() 37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb() 95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb() 112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb() 488 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data() 493 * a queue corruption! in rt2x00queue_write_tx_data() 498 "Corrupt queue %d, accessing entry which is not ours\n" in rt2x00queue_write_tx_data() 500 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data() [all …]
|
H A D | rt2x00queue.h | 10 Abstract: rt2x00 queue datastructures and routines 33 * enum data_queue_qid: Queue identification 35 * @QID_AC_VO: AC VO queue 36 * @QID_AC_VI: AC VI queue 37 * @QID_AC_BE: AC BE queue 38 * @QID_AC_BK: AC BK queue 39 * @QID_HCCA: HCCA queue 40 * @QID_MGMT: MGMT queue (prio queue) 41 * @QID_RX: RX queue 43 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device) [all …]
|
/linux/drivers/net/wireguard/ |
H A D | queueing.c | 25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument 30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init() 31 queue->last_cpu = -1; in wg_packet_queue_init() 32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init() 35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init() 36 if (!queue->worker) { in wg_packet_queue_init() 37 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init() 43 void wg_packet_queue_free(struct crypt_queue *queue, bool purge) in wg_packet_queue_free() argument 45 free_percpu(queue->worker); in wg_packet_queue_free() 46 WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); in wg_packet_queue_free() [all …]
|
/linux/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_qmr.h | 40 * WQE - Work Queue Entry 41 * SWQE - Send Work Queue Entry 42 * RWQE - Receive Work Queue Entry 43 * CQE - Completion Queue Entry 44 * EQE - Event Queue Entry 196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument 200 if (q_offset >= queue->queue_length) in hw_qeit_calc() 201 q_offset -= queue->queue_length; in hw_qeit_calc() 202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc() 206 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument [all …]
|
/linux/net/sunrpc/ |
H A D | sched.c | 91 * queue->lock and bh_disabled in order to avoid races within 95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument 101 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer() 102 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer() 106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument 109 queue->timer_list.expires = expires; in rpc_set_queue_timer() 114 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer() 121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument 125 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) in __rpc_add_timer() 126 rpc_set_queue_timer(queue, timeout); in __rpc_add_timer() [all …]
|
/linux/drivers/net/wireless/ath/ath5k/ |
H A D | qcu.c | 20 Queue Control Unit, DCF Control Unit Functions 31 * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions 35 * basically we have 10 queues to play with. Each queue has a matching 36 * QCU that controls when the queue will get triggered and multiple QCUs 39 * and DCUs allowing us to have different DFS settings for each queue. 41 * When a frame goes into a TX queue, QCU decides when it'll trigger a 43 * it's buffer or -if it's a beacon queue- if it's time to fire up the queue 58 * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue 60 * @queue: One of enum ath5k_tx_queue_id 63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument [all …]
|
H A D | dma.c | 27 * handle queue setup for 5210 chipset (rest are handled on qcu.c). 116 * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue 118 * @queue: The hw queue number 120 * Start DMA transmit for a specific queue and since 5210 doesn't have 121 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one 122 * queue for normal data and one queue for beacons). For queue setup 123 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out 124 * of range or if queue is already disabled. 127 * queue (see below). 130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_start_tx_dma() argument [all …]
|
/linux/drivers/nvme/host/ |
H A D | rdma.c | 73 struct nvme_rdma_queue *queue; member 159 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) in nvme_rdma_queue_idx() argument 161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx() 164 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) in nvme_rdma_poll_queue() argument 166 return nvme_rdma_queue_idx(queue) > in nvme_rdma_poll_queue() 167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue() 168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue() 171 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) in nvme_rdma_inline_data_size() argument 173 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_rdma_inline_data_size() 223 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue in nvme_rdma_alloc_ring() [all …]
|
/linux/Documentation/devicetree/bindings/net/ |
H A D | intel,ixp4xx-hss.yaml | 15 Processing Engine) and the IXP4xx Queue Manager to process 35 intel,queue-chl-rxtrig: 39 - description: phandle to the RX trigger queue on the NPE 40 - description: the queue instance number 41 description: phandle to the RX trigger queue on the NPE 43 intel,queue-chl-txready: 47 - description: phandle to the TX ready queue on the NPE 48 - description: the queue instance number 49 description: phandle to the TX ready queue on the NPE 51 intel,queue-pkt-rx: [all …]
|