| /linux/drivers/net/ethernet/intel/idpf/ |
| H A D | idpf_controlq.c | 11 static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq, in idpf_ctlq_setup_regs() argument 15 cq->reg.head = q_create_info->reg.head; in idpf_ctlq_setup_regs() 16 cq->reg.tail = q_create_info->reg.tail; in idpf_ctlq_setup_regs() 17 cq->reg.len = q_create_info->reg.len; in idpf_ctlq_setup_regs() 18 cq->reg.bah = q_create_info->reg.bah; in idpf_ctlq_setup_regs() 19 cq->reg.bal = q_create_info->reg.bal; in idpf_ctlq_setup_regs() 20 cq->reg.len_mask = q_create_info->reg.len_mask; in idpf_ctlq_setup_regs() 21 cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask; in idpf_ctlq_setup_regs() 22 cq->reg.head_mask = q_create_info->reg.head_mask; in idpf_ctlq_setup_regs() 34 static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, in idpf_ctlq_init_regs() argument [all …]
|
| H A D | idpf_controlq_setup.c | 12 struct idpf_ctlq_info *cq) in idpf_ctlq_alloc_desc_ring() argument 14 size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc); in idpf_ctlq_alloc_desc_ring() 16 cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size); in idpf_ctlq_alloc_desc_ring() 17 if (!cq->desc_ring.va) in idpf_ctlq_alloc_desc_ring() 32 struct idpf_ctlq_info *cq) in idpf_ctlq_alloc_bufs() argument 37 if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX) in idpf_ctlq_alloc_bufs() 43 cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *), in idpf_ctlq_alloc_bufs() 45 if (!cq->bi.rx_buff) in idpf_ctlq_alloc_bufs() 49 for (i = 0; i < cq->ring_size - 1; i++) { in idpf_ctlq_alloc_bufs() 53 cq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem), in idpf_ctlq_alloc_bufs() [all …]
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_controlq.c | 36 struct ice_ctl_q_info *cq = &hw->adminq; in ice_adminq_init_regs() local 38 ICE_CQ_INIT_REGS(cq, PF_FW); in ice_adminq_init_regs() 49 struct ice_ctl_q_info *cq = &hw->mailboxq; in ice_mailbox_init_regs() local 51 ICE_CQ_INIT_REGS(cq, PF_MBX); in ice_mailbox_init_regs() 62 struct ice_ctl_q_info *cq = &hw->sbq; in ice_sb_init_regs() local 64 ICE_CQ_INIT_REGS(cq, PF_SB); in ice_sb_init_regs() 74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_check_sq_alive() argument 77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive() 78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive() 79 cq->sq.len_ena_mask)) == in ice_check_sq_alive() [all …]
|
| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_cq.c | 169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) in get_cqe() argument 171 return get_cqe_from_buf(&cq->buf, entry); in get_cqe() 179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) in next_cqe_sw() argument 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, in update_cons_index() argument 208 *cq->set_ci_db = cpu_to_be32(cq->cons_index); in update_cons_index() 211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, in update_cons_index() 219 struct mthca_cq *cq; in mthca_cq_completion() local 221 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion() 223 if (!cq) { in mthca_cq_completion() [all …]
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_cq.c | 11 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, in rxe_cq_chk_attr() argument 27 if (cq) { in rxe_cq_chk_attr() 28 count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT); in rxe_cq_chk_attr() 30 rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n", in rxe_cq_chk_attr() 42 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument 50 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init() 52 if (!cq->queue) { in rxe_cq_from_init() 58 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); in rxe_cq_from_init() 62 cq->is_user = uresp; in rxe_cq_from_init() 64 spin_lock_init(&cq->cq_lock); in rxe_cq_from_init() [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | cq.c | 26 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) in rvt_cq_enter() argument 37 spin_lock_irqsave(&cq->lock, flags); in rvt_cq_enter() 39 if (cq->ip) { in rvt_cq_enter() 40 u_wc = cq->queue; in rvt_cq_enter() 45 k_wc = cq->kqueue; in rvt_cq_enter() 55 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter() 56 head = cq->ibcq.cqe; in rvt_cq_enter() 62 if (unlikely(next == tail || cq->cq_full)) { in rvt_cq_enter() 63 struct rvt_dev_info *rdi = cq->rdi; in rvt_cq_enter() 65 if (!cq->cq_full) in rvt_cq_enter() [all …]
|
| /linux/drivers/net/ethernet/cisco/enic/ |
| H A D | vnic_cq.c | 16 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument 18 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free() 20 cq->ctrl = NULL; in vnic_cq_free() 23 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() argument 26 cq->index = index; in vnic_cq_alloc() 27 cq->vdev = vdev; in vnic_cq_alloc() 29 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); in vnic_cq_alloc() 30 if (!cq->ctrl) { in vnic_cq_alloc() 35 return vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc() 38 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument [all …]
|
| H A D | vnic_cq.h | 59 static inline void *vnic_cq_to_clean(struct vnic_cq *cq) in vnic_cq_to_clean() argument 61 return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean); in vnic_cq_to_clean() 64 static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq) in vnic_cq_inc_to_clean() argument 66 cq->to_clean++; in vnic_cq_inc_to_clean() 67 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_inc_to_clean() 68 cq->to_clean = 0; in vnic_cq_inc_to_clean() 69 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_inc_to_clean() 73 void vnic_cq_free(struct vnic_cq *cq); 74 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, 76 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, [all …]
|
| /linux/drivers/scsi/snic/ |
| H A D | vnic_cq.c | 10 void svnic_cq_free(struct vnic_cq *cq) in svnic_cq_free() argument 12 svnic_dev_free_desc_ring(cq->vdev, &cq->ring); in svnic_cq_free() 14 cq->ctrl = NULL; in svnic_cq_free() 17 int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, in svnic_cq_alloc() argument 20 cq->index = index; in svnic_cq_alloc() 21 cq->vdev = vdev; in svnic_cq_alloc() 23 cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index); in svnic_cq_alloc() 24 if (!cq->ctrl) { in svnic_cq_alloc() 30 return svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in svnic_cq_alloc() 33 void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in svnic_cq_init() argument [all …]
|
| H A D | vnic_cq_fw.h | 10 vnic_cq_fw_service(struct vnic_cq *cq, in vnic_cq_fw_service() argument 21 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service() 22 cq->ring.desc_size * cq->to_clean); in vnic_cq_fw_service() 25 while (color != cq->last_color) { in vnic_cq_fw_service() 27 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_fw_service() 30 cq->to_clean++; in vnic_cq_fw_service() 31 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_fw_service() 32 cq->to_clean = 0; in vnic_cq_fw_service() 33 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_fw_service() 36 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service() [all …]
|
| H A D | vnic_cq.h | 46 static inline unsigned int svnic_cq_service(struct vnic_cq *cq, in svnic_cq_service() argument 57 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service() 58 cq->ring.desc_size * cq->to_clean); in svnic_cq_service() 62 while (color != cq->last_color) { in svnic_cq_service() 64 if ((*q_service)(cq->vdev, cq_desc, type, in svnic_cq_service() 68 cq->to_clean++; in svnic_cq_service() 69 if (cq->to_clean == cq->ring.desc_count) { in svnic_cq_service() 70 cq->to_clean = 0; in svnic_cq_service() 71 cq->last_color = cq->last_color ? 0 : 1; in svnic_cq_service() 74 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service() [all …]
|
| /linux/drivers/infiniband/hw/vmw_pvrdma/ |
| H A D | pvrdma_cq.c | 67 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_req_notify_cq() local 68 u32 val = cq->cq_handle; in pvrdma_req_notify_cq() 75 spin_lock_irqsave(&cq->cq_lock, flags); in pvrdma_req_notify_cq() 82 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, in pvrdma_req_notify_cq() 83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq() 88 spin_unlock_irqrestore(&cq->cq_lock, flags); in pvrdma_req_notify_cq() 108 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_create_cq() local 133 cq->ibcq.cqe = entries; in pvrdma_create_cq() 134 cq->is_kernel = !udata; in pvrdma_create_cq() 136 if (!cq->is_kernel) { in pvrdma_create_cq() [all …]
|
| /linux/drivers/scsi/fnic/ |
| H A D | vnic_cq.c | 12 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument 14 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free() 16 cq->ctrl = NULL; in vnic_cq_free() 19 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() argument 24 cq->index = index; in vnic_cq_alloc() 25 cq->vdev = vdev; in vnic_cq_alloc() 27 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); in vnic_cq_alloc() 28 if (!cq->ctrl) { in vnic_cq_alloc() 33 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc() 40 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument [all …]
|
| H A D | vnic_cq_copy.h | 12 struct vnic_cq *cq, in vnic_cq_copy_service() argument 23 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service() 24 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service() 27 while (color != cq->last_color) { in vnic_cq_copy_service() 29 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_copy_service() 32 cq->to_clean++; in vnic_cq_copy_service() 33 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_copy_service() 34 cq->to_clean = 0; in vnic_cq_copy_service() 35 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_copy_service() 38 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service() [all …]
|
| H A D | vnic_cq.h | 58 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, in vnic_cq_service() argument 69 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() 70 cq->ring.desc_size * cq->to_clean); in vnic_cq_service() 74 while (color != cq->last_color) { in vnic_cq_service() 76 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service() 80 cq->to_clean++; in vnic_cq_service() 81 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service() 82 cq->to_clean = 0; in vnic_cq_service() 83 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service() 86 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | cq.c | 44 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) in mlx5_ib_cq_comp() argument 46 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp() 53 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local 54 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event() 55 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event() 67 event.element.cq = ibcq; in mlx5_ib_cq_event() 72 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument 74 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); in get_cqe() 82 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() argument 84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | cq.c | 69 void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, in mlx5_add_cq_to_tasklet() argument 73 struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; in mlx5_add_cq_to_tasklet() 82 if (list_empty_careful(&cq->tasklet_ctx.list)) { in mlx5_add_cq_to_tasklet() 83 mlx5_cq_hold(cq); in mlx5_add_cq_to_tasklet() 91 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); in mlx5_add_cq_to_tasklet() 100 static void mlx5_core_cq_dummy_cb(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) in mlx5_core_cq_dummy_cb() argument 102 mlx5_core_err(cq->eq->core.dev, in mlx5_core_cq_dummy_cb() 103 "CQ default completion callback, CQ #%u\n", cq->cqn); in mlx5_core_cq_dummy_cb() 108 int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, in mlx5_create_cq() argument 127 cq->cqn = MLX5_GET(create_cq_out, out, cqn); in mlx5_create_cq() [all …]
|
| /linux/drivers/infiniband/hw/mana/ |
| H A D | cq.c | 12 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); in mana_ib_create_cq() local 25 cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors; in mana_ib_create_cq() 26 cq->cq_handle = INVALID_MANA_HANDLE; in mana_ib_create_cq() 46 cq->cqe = attr->cqe; in mana_ib_create_cq() 47 err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, in mana_ib_create_cq() 48 &cq->queue); in mana_ib_create_cq() 64 cq->cqe = buf_size / COMP_ENTRY_SIZE; in mana_ib_create_cq() 65 err = mana_ib_create_kernel_queue(mdev, buf_size, GDMA_CQ, &cq->queue); in mana_ib_create_cq() 74 err = mana_ib_gd_create_cq(mdev, cq, doorbell); in mana_ib_create_cq() 80 err = mana_ib_install_cq_cb(mdev, cq); in mana_ib_create_cq() [all …]
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | cq.c | 37 static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in destroy_cq() argument 56 res->u.cq.restype = FW_RI_RES_TYPE_CQ; in destroy_cq() 57 res->u.cq.op = FW_RI_RES_OP_RESET; in destroy_cq() 58 res->u.cq.iqid = cpu_to_be32(cq->cqid); in destroy_cq() 63 kfree(cq->sw_queue); in destroy_cq() 65 cq->memsize, cq->queue, in destroy_cq() 66 dma_unmap_addr(cq, mapping)); in destroy_cq() 67 c4iw_put_cqid(rdev, cq->cqid, uctx); in destroy_cq() 70 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in create_cq() argument 85 cq->cqid = c4iw_get_cqid(rdev, uctx); in create_cq() [all …]
|
| H A D | t4.h | 703 static inline void write_gts(struct t4_cq *cq, u32 val) in write_gts() argument 705 if (cq->bar2_va) in write_gts() 706 writel(val | INGRESSQID_V(cq->bar2_qid), in write_gts() 707 cq->bar2_va + SGE_UDB_GTS); in write_gts() 709 writel(val | INGRESSQID_V(cq->cqid), cq->gts); in write_gts() 712 static inline int t4_clear_cq_armed(struct t4_cq *cq) in t4_clear_cq_armed() argument 714 return test_and_clear_bit(CQ_ARMED, &cq->flags); in t4_clear_cq_armed() 717 static inline int t4_arm_cq(struct t4_cq *cq, int se) in t4_arm_cq() argument 721 set_bit(CQ_ARMED, &cq->flags); in t4_arm_cq() 722 while (cq->cidx_inc > CIDXINC_M) { in t4_arm_cq() [all …]
|
| H A D | restrack.c | 271 static int fill_cq(struct sk_buff *msg, struct t4_cq *cq) in fill_cq() argument 273 if (rdma_nl_put_driver_u32(msg, "cqid", cq->cqid)) in fill_cq() 275 if (rdma_nl_put_driver_u32(msg, "memsize", cq->memsize)) in fill_cq() 277 if (rdma_nl_put_driver_u32(msg, "size", cq->size)) in fill_cq() 279 if (rdma_nl_put_driver_u32(msg, "cidx", cq->cidx)) in fill_cq() 281 if (rdma_nl_put_driver_u32(msg, "cidx_inc", cq->cidx_inc)) in fill_cq() 283 if (rdma_nl_put_driver_u32(msg, "sw_cidx", cq->sw_cidx)) in fill_cq() 285 if (rdma_nl_put_driver_u32(msg, "sw_pidx", cq->sw_pidx)) in fill_cq() 287 if (rdma_nl_put_driver_u32(msg, "sw_in_use", cq->sw_in_use)) in fill_cq() 289 if (rdma_nl_put_driver_u32(msg, "vector", cq->vector)) in fill_cq() [all …]
|
| /linux/drivers/scsi/elx/efct/ |
| H A D | efct_hw_queues.c | 15 struct hw_cq *cq = NULL; in efct_hw_init_queues() local 44 cq = efct_hw_new_cq(eq, in efct_hw_init_queues() 46 if (!cq) { in efct_hw_init_queues() 51 mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH); in efct_hw_init_queues() 59 cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]); in efct_hw_init_queues() 60 if (!cq) { in efct_hw_init_queues() 65 wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]); in efct_hw_init_queues() 162 struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL); in efct_hw_new_cq() local 164 if (!cq) in efct_hw_new_cq() 167 cq->eq = eq; in efct_hw_new_cq() [all …]
|
| /linux/include/linux/mlx5/ |
| H A D | cq.h | 48 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); 56 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); 141 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) in mlx5_cq_set_ci() argument 143 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); in mlx5_cq_set_ci() 151 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, in mlx5_cq_arm() argument 159 sn = cq->arm_sn & 3; in mlx5_cq_arm() 162 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); in mlx5_cq_arm() 170 doorbell[1] = cpu_to_be32(cq->cqn); in mlx5_cq_arm() 175 static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) in mlx5_cq_hold() argument 177 refcount_inc(&cq->refcount); in mlx5_cq_hold() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | cq.c | 82 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) in mlx4_add_cq_to_tasklet() argument 84 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; in mlx4_add_cq_to_tasklet() 94 if (list_empty_careful(&cq->tasklet_ctx.list)) { in mlx4_add_cq_to_tasklet() 95 refcount_inc(&cq->refcount); in mlx4_add_cq_to_tasklet() 97 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); in mlx4_add_cq_to_tasklet() 106 struct mlx4_cq *cq; in mlx4_cq_completion() local 109 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, in mlx4_cq_completion() 113 if (!cq) { in mlx4_cq_completion() 121 ++cq->arm_sn; in mlx4_cq_completion() 123 cq->comp(cq); in mlx4_cq_completion() [all …]
|
| /linux/include/trace/events/ |
| H A D | rdma_core.h | 51 struct ib_cq *cq 54 TP_ARGS(cq), 61 cq->timestamp = ktime_get(); 62 cq->interrupt = true; 64 __entry->cq_id = cq->res.id; 72 struct ib_cq *cq 75 TP_ARGS(cq), 82 cq->timestamp = ktime_get(); 83 cq->interrupt = false; 85 __entry->cq_id = cq->res.id; [all …]
|