| /linux/drivers/md/dm-vdo/ |
| H A D | funnel-workqueue.c | 73 static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue) in as_simple_work_queue() argument 75 return ((queue == NULL) ? in as_simple_work_queue() 76 NULL : container_of(queue, struct simple_work_queue, common)); in as_simple_work_queue() 79 static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue) in as_round_robin_work_queue() argument 81 return ((queue == NULL) ? in as_round_robin_work_queue() 83 container_of(queue, struct round_robin_work_queue, common)); in as_round_robin_work_queue() 96 static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue) in poll_for_completion() argument 100 for (i = queue->common.type->max_priority; i >= 0; i--) { in poll_for_completion() 101 struct funnel_queue_entry *link = vdo_funnel_queue_poll(queue->priority_lists[i]); in poll_for_completion() 110 static void enqueue_work_queue_completion(struct simple_work_queue *queue, in enqueue_work_queue_completion() argument [all …]
|
| H A D | funnel-queue.c | 15 struct funnel_queue *queue; in vdo_make_funnel_queue() local 17 result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue); in vdo_make_funnel_queue() 25 queue->stub.next = NULL; in vdo_make_funnel_queue() 26 queue->newest = &queue->stub; in vdo_make_funnel_queue() 27 queue->oldest = &queue->stub; in vdo_make_funnel_queue() 29 *queue_ptr = queue; in vdo_make_funnel_queue() 33 void vdo_free_funnel_queue(struct funnel_queue *queue) in vdo_free_funnel_queue() argument 35 vdo_free(queue); in vdo_free_funnel_queue() 38 static struct funnel_queue_entry *get_oldest(struct funnel_queue *queue) in get_oldest() argument 45 struct funnel_queue_entry *oldest = queue->oldest; in get_oldest() [all …]
|
| /linux/drivers/net/wireless/st/cw1200/ |
| H A D | queue.c | 27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 32 queue->queue_id); in __cw1200_queue_lock() 33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() 40 BUG_ON(!queue->tx_locked_cnt); in __cw1200_queue_unlock() 41 if (--queue->tx_locked_cnt == 0) { in __cw1200_queue_unlock() 43 queue->queue_id); in __cw1200_queue_unlock() [all …]
|
| /linux/drivers/net/wireless/broadcom/b43legacy/ |
| H A D | pio.c | 22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 31 if (queue->need_workarounds) { in tx_octet() 32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 71 if (queue->need_workarounds) { in tx_data() [all …]
|
| /linux/drivers/iio/buffer/ |
| H A D | industrialio-buffer-dma.c | 101 struct iio_dma_buffer_queue *queue = block->queue; in iio_buffer_block_release() local 106 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release() 109 atomic_dec(&queue->num_dmabufs); in iio_buffer_block_release() 112 iio_buffer_put(&queue->buffer); in iio_buffer_block_release() 175 struct iio_dma_buffer_queue *queue, size_t size, bool fileio) in iio_dma_buffer_alloc_block() argument 184 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block() 195 block->queue = queue; in iio_dma_buffer_alloc_block() 199 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block() 202 atomic_inc(&queue->num_dmabufs); in iio_dma_buffer_alloc_block() 213 static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_queue_wake() argument [all …]
|
| /linux/drivers/net/xen-netback/ |
| H A D | rx.c | 42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument 55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots() 58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument 63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available() 68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available() 69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available() 74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available() 80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available() 85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument 90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail() [all …]
|
| H A D | netback.c | 107 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 110 static void make_tx_response(struct xenvif_queue *queue, 115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); 117 static inline int tx_work_todo(struct xenvif_queue *queue); 119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument 122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn() 125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument 128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr() 161 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument 163 wake_up(&queue->wq); in xenvif_kick_thread() [all …]
|
| H A D | interface.c | 52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument 56 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare() 59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument 61 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete() 67 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete() 77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument 81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt() 83 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt() 89 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local 92 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); in xenvif_tx_interrupt() [all …]
|
| /linux/drivers/nvme/target/ |
| H A D | tcp.c | 109 struct nvmet_tcp_queue *queue; member 218 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument 221 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag() 226 return cmd - queue->cmds; in nvmet_tcp_cmd_tag() 254 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument 258 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd() 274 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd() 277 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd() 280 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument 282 return queue->sock->sk->sk_incoming_cpu; in queue_cpu() [all …]
|
| H A D | rdma.c | 52 struct nvmet_rdma_queue *queue; member 66 struct nvmet_rdma_queue *queue; member 172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument 218 tag = sbitmap_get(&queue->rsp_tags); in nvmet_rdma_get_rsp() 220 rsp = &queue->rsps[tag]; in nvmet_rdma_get_rsp() 228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, in nvmet_rdma_get_rsp() 243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp() 248 sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag); in nvmet_rdma_put_rsp() 448 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_alloc_rsps() argument [all …]
|
| /linux/drivers/md/dm-vdo/indexer/ |
| H A D | funnel-requestqueue.c | 68 static inline struct uds_request *poll_queues(struct uds_request_queue *queue) in poll_queues() argument 72 entry = vdo_funnel_queue_poll(queue->retry_queue); in poll_queues() 76 entry = vdo_funnel_queue_poll(queue->main_queue); in poll_queues() 83 static inline bool are_queues_idle(struct uds_request_queue *queue) in are_queues_idle() argument 85 return vdo_is_funnel_queue_idle(queue->retry_queue) && in are_queues_idle() 86 vdo_is_funnel_queue_idle(queue->main_queue); in are_queues_idle() 94 static inline bool dequeue_request(struct uds_request_queue *queue, in dequeue_request() argument 97 struct uds_request *request = poll_queues(queue); in dequeue_request() 104 if (!READ_ONCE(queue->running)) { in dequeue_request() 115 static void wait_for_request(struct uds_request_queue *queue, bool dormant, in wait_for_request() argument [all …]
|
| /linux/fs/fuse/ |
| H A D | dev_uring.c | 51 static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) in fuse_uring_flush_bg() argument 53 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg() 56 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg() 66 !queue->active_background) && in fuse_uring_flush_bg() 67 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg() 70 req = list_first_entry(&queue->fuse_req_bg_queue, in fuse_uring_flush_bg() 73 queue->active_background++; in fuse_uring_flush_bg() 75 list_move_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_flush_bg() 82 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_req_end() local 83 struct fuse_ring *ring = queue->ring; in fuse_uring_req_end() [all …]
|
| /linux/drivers/nvme/host/ |
| H A D | tcp.c | 106 struct nvme_tcp_queue *queue; member 207 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); 214 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument 216 return queue - queue->ctrl->queues; in nvme_tcp_queue_id() 235 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) in nvme_tcp_queue_tls() argument 240 return queue->tls_enabled; in nvme_tcp_queue_tls() 254 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument 256 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset() 259 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset() 260 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset() [all …]
|
| /linux/drivers/net/wireless/ralink/rt2x00/ |
| H A D | rt2x00queue.c | 25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local 26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb() 37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb() 95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb() 112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb() 488 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data() 500 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data() 529 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local 531 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); in rt2x00queue_write_tx_descriptor() 537 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); in rt2x00queue_write_tx_descriptor() [all …]
|
| H A D | rt2x00mmio.c | 51 struct data_queue *queue = rt2x00dev->rx; in rt2x00mmio_rxdone() local 58 entry = rt2x00queue_get_entry(queue, Q_INDEX); in rt2x00mmio_rxdone() 69 skbdesc->desc_len = entry->queue->desc_size; in rt2x00mmio_rxdone() 88 void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop) in rt2x00mmio_flush_queue() argument 92 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) in rt2x00mmio_flush_queue() 101 struct data_queue *queue) in rt2x00mmio_alloc_queue_dma() argument 112 queue->limit * queue->desc_size, &dma, in rt2x00mmio_alloc_queue_dma() 120 for (i = 0; i < queue->limit; i++) { in rt2x00mmio_alloc_queue_dma() 121 entry_priv = queue->entries[i].priv_data; in rt2x00mmio_alloc_queue_dma() 122 entry_priv->desc = addr + i * queue->desc_size; in rt2x00mmio_alloc_queue_dma() [all …]
|
| /linux/drivers/net/wireguard/ |
| H A D | queueing.c | 25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument 30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init() 31 queue->last_cpu = -1; in wg_packet_queue_init() 32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init() 35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init() 36 if (!queue->worker) { in wg_packet_queue_init() 37 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init() 43 void wg_packet_queue_free(struct crypt_queue *queue, bool purge) in wg_packet_queue_free() argument 45 free_percpu(queue->worker); in wg_packet_queue_free() 46 WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); in wg_packet_queue_free() [all …]
|
| /linux/drivers/gpu/drm/imagination/ |
| H A D | pvr_queue.c | 116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release_work() 123 struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; in pvr_queue_fence_release() 133 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name() 156 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name() 271 struct pvr_queue *queue, in pvr_queue_fence_init() argument 277 pvr_context_get(queue->ctx); in pvr_queue_fence_init() 278 fence->queue = queue; in pvr_queue_fence_init() 296 pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue) in pvr_queue_cccb_fence_init() argument 298 pvr_queue_fence_init(fence, queue, &pvr_queue_cccb_fence_ops, in pvr_queue_cccb_fence_init() 299 &queue->cccb_fence_ctx.base); in pvr_queue_cccb_fence_init() [all …]
|
| /linux/drivers/scsi/arm/ |
| H A D | queue.c | 59 int queue_initialise (Queue_t *queue) in queue_initialise() argument 64 spin_lock_init(&queue->queue_lock); in queue_initialise() 65 INIT_LIST_HEAD(&queue->head); in queue_initialise() 66 INIT_LIST_HEAD(&queue->free); in queue_initialise() 74 queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); in queue_initialise() 79 list_add(&q->list, &queue->free); in queue_initialise() 83 return queue->alloc != NULL; in queue_initialise() 91 void queue_free (Queue_t *queue) in queue_free() argument 93 if (!list_empty(&queue->head)) in queue_free() 94 printk(KERN_WARNING "freeing non-empty queue %p\n", queue); in queue_free() [all …]
|
| /linux/drivers/net/ethernet/ibm/ehea/ |
| H A D | ehea_qmr.h | 196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument 200 if (q_offset >= queue->queue_length) in hw_qeit_calc() 201 q_offset -= queue->queue_length; in hw_qeit_calc() 202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc() 206 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument 208 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get() 211 static inline void hw_qeit_inc(struct hw_queue *queue) in hw_qeit_inc() argument 213 queue->current_q_offset += queue->qe_size; in hw_qeit_inc() 214 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc() 215 queue->current_q_offset = 0; in hw_qeit_inc() [all …]
|
| /linux/net/sunrpc/ |
| H A D | sched.c | 95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument 101 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer() 102 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer() 106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument 109 queue->timer_list.expires = expires; in rpc_set_queue_timer() 114 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer() 121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument 125 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) in __rpc_add_timer() 126 rpc_set_queue_timer(queue, timeout); in __rpc_add_timer() 127 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer() [all …]
|
| /linux/drivers/net/wireless/ath/ath5k/ |
| H A D | qcu.c | 63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument 66 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_num_tx_pending() 69 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_num_tx_pending() 76 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); in ath5k_hw_num_tx_pending() 82 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) in ath5k_hw_num_tx_pending() 94 ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_release_tx_queue() argument 96 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) in ath5k_hw_release_tx_queue() 100 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; in ath5k_hw_release_tx_queue() 102 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); in ath5k_hw_release_tx_queue() 138 ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, in ath5k_hw_get_tx_queueprops() argument [all …]
|
| /linux/include/drm/ |
| H A D | spsc_queue.h | 48 static inline void spsc_queue_init(struct spsc_queue *queue) in spsc_queue_init() argument 50 queue->head = NULL; in spsc_queue_init() 51 atomic_long_set(&queue->tail, (long)&queue->head); in spsc_queue_init() 52 atomic_set(&queue->job_count, 0); in spsc_queue_init() 55 static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) in spsc_queue_peek() argument 57 return queue->head; in spsc_queue_peek() 60 static inline int spsc_queue_count(struct spsc_queue *queue) in spsc_queue_count() argument 62 return atomic_read(&queue->job_count); in spsc_queue_count() 65 static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) in spsc_queue_push() argument 73 atomic_inc(&queue->job_count); in spsc_queue_push() [all …]
|
| /linux/arch/mips/cavium-octeon/executive/ |
| H A D | cvmx-pko.c | 70 int queue; in __cvmx_pko_iport_config() local 76 for (queue = 0; queue < num_queues; queue++) { in __cvmx_pko_iport_config() 82 config.s.index = queue; in __cvmx_pko_iport_config() 83 config.s.qid = base_queue + queue; in __cvmx_pko_iport_config() 85 config.s.tail = (queue == (num_queues - 1)); in __cvmx_pko_iport_config() 86 config.s.s_tail = (queue == static_priority_end); in __cvmx_pko_iport_config() 88 config.s.static_q = (queue <= static_priority_end); in __cvmx_pko_iport_config() 92 CVMX_CMD_QUEUE_PKO(base_queue + queue), in __cvmx_pko_iport_config() 101 num_queues, queue); in __cvmx_pko_iport_config() 104 CVMX_CMD_QUEUE_PKO(base_queue + queue)); in __cvmx_pko_iport_config() [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | mes_userqueue.c | 63 struct amdgpu_usermode_queue *queue, in mes_userq_create_wptr_mapping() argument 68 struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj; in mes_userq_create_wptr_mapping() 71 wptr_vm = queue->vm; in mes_userq_create_wptr_mapping() 96 queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj); in mes_userq_create_wptr_mapping() 116 struct amdgpu_usermode_queue *queue) in mes_userq_map() argument 119 struct amdgpu_userq_obj *ctx = &queue->fw_obj; in mes_userq_map() 120 struct amdgpu_mqd_prop *userq_props = queue->userq_prop; in mes_userq_map() 137 queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority); in mes_userq_map() 139 queue_input.process_id = queue->vm->pasid; in mes_userq_map() 140 queue_input.queue_type = queue->queue_type; in mes_userq_map() [all …]
|
| /linux/drivers/net/ |
| H A D | eql.c | 142 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); 149 spin_lock(&eql->queue.lock); in eql_timer() 150 head = &eql->queue.all_slaves; in eql_timer() 159 eql_kill_one_slave(&eql->queue, slave); in eql_timer() 163 spin_unlock(&eql->queue.lock); in eql_timer() 186 spin_lock_init(&eql->queue.lock); in eql_setup() 187 INIT_LIST_HEAD(&eql->queue.all_slaves); in eql_setup() 188 eql->queue.master_dev = dev; in eql_setup() 213 BUG_ON(!list_empty(&eql->queue.all_slaves)); in eql_open() 223 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) in eql_kill_one_slave() argument [all …]
|