Home
last modified time | relevance | path

Searched refs:cq (Results 1 – 25 of 185) sorted by relevance

12345678

/freebsd/sys/dev/mlx4/mlx4_en/
H A Dmlx4_en_cq.c42 static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) in mlx4_en_cq_event() argument
58 struct mlx4_en_cq *cq; in mlx4_en_create_cq() local
61 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); in mlx4_en_create_cq()
62 if (!cq) { in mlx4_en_create_cq()
63 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in mlx4_en_create_cq()
64 if (!cq) { in mlx4_en_create_cq()
70 cq->size = entries; in mlx4_en_create_cq()
71 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; in mlx4_en_create_cq()
73 cq->tq = taskqueue_create_fast("mlx4_en_que", M_NOWAIT, in mlx4_en_create_cq()
74 taskqueue_thread_enqueue, &cq->tq); in mlx4_en_create_cq()
[all …]
H A Den.h676 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) in mlx4_en_cq_init_lock() argument
678 spin_lock_init(&cq->poll_lock); in mlx4_en_cq_init_lock()
679 cq->state = MLX4_EN_CQ_STATE_IDLE; in mlx4_en_cq_init_lock()
683 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) in mlx4_en_cq_lock_napi() argument
686 spin_lock(&cq->poll_lock); in mlx4_en_cq_lock_napi()
687 if (cq->state & MLX4_CQ_LOCKED) { in mlx4_en_cq_lock_napi()
688 WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI); in mlx4_en_cq_lock_napi()
689 cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD; in mlx4_en_cq_lock_napi()
693 cq->state = MLX4_EN_CQ_STATE_NAPI; in mlx4_en_cq_lock_napi()
694 spin_unlock(&cq->poll_lock); in mlx4_en_cq_lock_napi()
[all …]
/freebsd/sys/dev/ice/
H A Dice_controlq.c64 struct ice_ctl_q_info *cq = &hw->adminq; in ice_adminq_init_regs() local
68 ICE_CQ_INIT_REGS(cq, PF_FW); in ice_adminq_init_regs()
79 struct ice_ctl_q_info *cq = &hw->mailboxq; in ice_mailbox_init_regs() local
81 ICE_CQ_INIT_REGS(cq, PF_MBX); in ice_mailbox_init_regs()
92 struct ice_ctl_q_info *cq = &hw->sbq; in ice_check_sq_alive()
96 ICE_CQ_INIT_REGS(cq, PF_SB); in ice_check_sq_alive()
102 * @cq: pointer to the specific Control queue
106 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
109 if (cq->sq.len && cq in ice_alloc_ctrlq_sq_ring()
91 ice_check_sq_alive(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_check_sq_alive() argument
108 ice_alloc_ctrlq_sq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_alloc_ctrlq_sq_ring() argument
125 ice_alloc_ctrlq_rq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_alloc_ctrlq_rq_ring() argument
154 ice_alloc_rq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_alloc_rq_bufs() argument
218 ice_alloc_sq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_alloc_sq_bufs() argument
279 ice_cfg_sq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_cfg_sq_regs() argument
292 ice_cfg_rq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_cfg_rq_regs() argument
335 ice_init_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_init_sq() argument
397 ice_init_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_init_rq() argument
453 ice_shutdown_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_shutdown_sq() argument
524 ice_shutdown_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_shutdown_rq() argument
561 ice_idle_aq(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_idle_aq() argument
575 struct ice_ctl_q_info *cq = &hw->adminq; ice_init_check_adminq() local
613 struct ice_ctl_q_info *cq; ice_init_ctrlq() local
671 struct ice_ctl_q_info *cq; ice_shutdown_ctrlq() local
757 ice_init_ctrlq_locks(struct ice_ctl_q_info * cq) ice_init_ctrlq_locks() argument
793 ice_destroy_ctrlq_locks(struct ice_ctl_q_info * cq) ice_destroy_ctrlq_locks() argument
824 ice_clean_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_clean_sq() argument
878 ice_debug_cq(struct ice_hw * hw,struct ice_ctl_q_info * cq,void * desc,void * buf,u16 buf_len,bool response) ice_debug_cq() argument
926 ice_sq_done(struct ice_hw * hw,struct ice_ctl_q_info * cq) ice_sq_done() argument
948 ice_sq_send_cmd_nolock(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd) ice_sq_send_cmd_nolock() argument
1119 ice_sq_send_cmd(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd) ice_sq_send_cmd() argument
1163 ice_clean_rq_elem(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_rq_event_info * e,u16 * pending) ice_clean_rq_elem() argument
[all...]
/freebsd/sys/dev/mthca/
H A Dmthca_cq.c169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) in get_cqe() argument
171 return get_cqe_from_buf(&cq->buf, entry); in get_cqe()
179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) in next_cqe_sw() argument
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, in update_cons_index() argument
208 *cq->set_ci_db = cpu_to_be32(cq->cons_index); in update_cons_index()
211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, in update_cons_index()
224 struct mthca_cq *cq; in mthca_cq_completion() local
226 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion()
228 if (!cq) { in mthca_cq_completion()
[all …]
/freebsd/contrib/ofed/libmlx4/
H A Dcq.c97 static struct mlx4_cqe *get_cqe(struct mlx4_cq *cq, int entry) in get_cqe() argument
99 return cq->buf.buf + entry * cq->cqe_size; in get_cqe()
102 static void *get_sw_cqe(struct mlx4_cq *cq, int n) in get_sw_cqe() argument
104 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe()
105 struct mlx4_cqe *tcqe = cq->cqe_size == 64 ? cqe + 1 : cqe; in get_sw_cqe()
108 !!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
111 static struct mlx4_cqe *next_cqe_sw(struct mlx4_cq *cq) in next_cqe_sw() argument
113 return get_sw_cqe(cq, cq->cons_index); in next_cqe_sw()
200 static inline int mlx4_get_next_cqe(struct mlx4_cq *cq,
203 static inline int mlx4_get_next_cqe(struct mlx4_cq *cq, in mlx4_get_next_cqe() argument
[all …]
/freebsd/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_cq.c44 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) in mlx4_ib_cq_comp() argument
46 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp()
50 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) in mlx4_ib_cq_event() argument
57 "on CQ %06x\n", type, cq->cqn); in mlx4_ib_cq_event()
61 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event()
65 event.element.cq = ibcq; in mlx4_ib_cq_event()
75 static void *get_cqe(struct mlx4_ib_cq *cq, int n) in get_cqe() argument
77 return get_cqe_from_buf(&cq->buf, n); in get_cqe()
80 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) in get_sw_cqe() argument
82 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
[all …]
/freebsd/contrib/ofed/libmlx5/
H A Dcq.c76 static void *get_cqe(struct mlx5_cq *cq, int n) in get_cqe() argument
78 return cq->active_buf->buf + n * cq->cqe_sz; in get_cqe()
81 static void *get_sw_cqe(struct mlx5_cq *cq, int n) in get_sw_cqe() argument
83 void *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe()
86 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
89 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibv_cq.cqe + 1)))) { in get_sw_cqe()
96 static void *next_cqe_sw(struct mlx5_cq *cq) in next_cqe_sw() argument
98 return get_sw_cqe(cq, cq->cons_index); in next_cqe_sw()
101 static void update_cons_index(struct mlx5_cq *cq) in update_cons_index() argument
103 cq->dbrec[MLX5_CQ_SET_CI] = htobe32(cq->cons_index & 0xffffff); in update_cons_index()
[all …]
/freebsd/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_cq.c36 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe __unused) in mlx5_ib_cq_comp() argument
38 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp()
45 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local
46 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
47 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event()
59 event.element.cq = ibcq; in mlx5_ib_cq_event()
69 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument
71 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); in get_cqe()
79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, in argument
94 next_cqe_sw(struct mlx5_ib_cq * cq) next_cqe_sw() argument
494 mlx5_ib_poll_sw_comp(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc,int * npolled) mlx5_ib_poll_sw_comp() argument
514 mlx5_poll_one(struct mlx5_ib_cq * cq,struct mlx5_ib_qp ** cur_qp,struct ib_wc * wc) mlx5_poll_one() argument
646 poll_soft_wc(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc) poll_soft_wc() argument
670 struct mlx5_ib_cq *cq = to_mcq(ibcq); mlx5_ib_poll_cq() local
703 struct mlx5_ib_cq *cq = to_mcq(ibcq); mlx5_ib_arm_cq() local
746 create_cq_user(struct mlx5_ib_dev * dev,struct ib_udata * udata,struct mlx5_ib_cq * cq,int entries,u32 ** cqb,int * cqe_size,int * index,int * inlen) create_cq_user() argument
831 destroy_cq_user(struct mlx5_ib_cq * cq,struct ib_udata * udata) destroy_cq_user() argument
840 init_cq_buf(struct mlx5_ib_cq * cq,struct mlx5_ib_cq_buf * buf) init_cq_buf() argument
853 create_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size,u32 ** cqb,int * index,int * inlen) create_cq_kernel() argument
902 destroy_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq) destroy_cq_kernel() argument
910 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, notify_soft_wc_handler() local
924 struct mlx5_ib_cq *cq = to_mcq(ibcq); mlx5_ib_create_cq() local
1017 mlx5_ib_destroy_cq(struct ib_cq * cq,struct ib_udata * udata) mlx5_ib_destroy_cq() argument
1034 __mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 rsn,struct mlx5_ib_srq * srq) __mlx5_ib_cq_clean() argument
1085 mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 qpn,struct mlx5_ib_srq * srq) mlx5_ib_cq_clean() argument
1095 mlx5_ib_modify_cq(struct ib_cq * cq,u16 cq_count,u16 cq_period) mlx5_ib_modify_cq() argument
1112 resize_user(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,struct ib_udata * udata,int * npas,int * page_shift,int * cqe_size) resize_user() argument
1150 un_resize_user(struct mlx5_ib_cq * cq) un_resize_user() argument
1155 resize_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size) resize_kernel() argument
1177 un_resize_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq) un_resize_kernel() argument
1183 copy_resize_cqes(struct mlx5_ib_cq * cq) copy_resize_cqes() argument
1242 struct mlx5_ib_cq *cq = to_mcq(ibcq); mlx5_ib_resize_cq() local
1370 struct mlx5_ib_cq *cq; mlx5_ib_get_cqe_size() local
1383 struct mlx5_ib_cq *cq = to_mcq(ibcq); mlx5_ib_generate_wc() local
[all...]
/freebsd/sys/ofed/drivers/infiniband/core/
H A Dib_cq.c51 struct ib_cq *cq = container_of(work, struct ib_cq, work); in ib_cq_poll_work() local
57 n = ib_poll_cq(cq, IB_CQ_POLL_MAX, ib_wc); in ib_cq_poll_work()
62 wc->wr_cqe->done(cq, wc); in ib_cq_poll_work()
66 if (ib_req_notify_cq(cq, IB_CQ_POLL_FLAGS) > 0) in ib_cq_poll_work()
77 queue_work(ib_comp_wq, &cq->work); in ib_cq_poll_work()
81 ib_cq_completion_workqueue(struct ib_cq *cq, void *private) in ib_cq_completion_workqueue() argument
83 queue_work(ib_comp_wq, &cq->work); in ib_cq_completion_workqueue()
96 struct ib_cq *cq; in __ib_alloc_cq_user() local
112 cq = rdma_zalloc_drv_obj(dev, ib_cq); in __ib_alloc_cq_user()
113 if (!cq) in __ib_alloc_cq_user()
[all …]
H A Dib_uverbs_std_types_cq.c41 struct ib_cq *cq = uobject->object; in uverbs_free_cq() local
42 struct ib_uverbs_event_queue *ev_queue = cq->cq_context; in uverbs_free_cq()
47 ret = ib_destroy_cq_user(cq, &attrs->driver_udata); in uverbs_free_cq()
70 struct ib_cq *cq; in UVERBS_HANDLER() local
111 cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); in UVERBS_HANDLER()
112 if (!cq) { in UVERBS_HANDLER()
117 cq->device = ib_dev; in UVERBS_HANDLER()
118 cq->uobject = obj; in UVERBS_HANDLER()
119 cq->comp_handler = ib_uverbs_comp_handler; in UVERBS_HANDLER()
120 cq->event_handler = ib_uverbs_cq_event_handler; in UVERBS_HANDLER()
[all …]
/freebsd/sys/dev/enic/
H A Dvnic_cq.c10 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument
18 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_cq_init()
19 ENIC_BUS_WRITE_8(cq->ctrl, CQ_RING_BASE, paddr); in vnic_cq_init()
20 ENIC_BUS_WRITE_4(cq->ctrl, CQ_RING_SIZE, cq->ring.desc_count); in vnic_cq_init()
21 ENIC_BUS_WRITE_4(cq->ctrl, CQ_FLOW_CONTROL_ENABLE, flow_control_enable); in vnic_cq_init()
22 ENIC_BUS_WRITE_4(cq->ctrl, CQ_COLOR_ENABLE, color_enable); in vnic_cq_init()
23 ENIC_BUS_WRITE_4(cq->ctrl, CQ_HEAD, cq_head); in vnic_cq_init()
24 ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL, cq_tail); in vnic_cq_init()
25 ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL_COLOR, cq_tail_color); in vnic_cq_init()
26 ENIC_BUS_WRITE_4(cq->ctrl, CQ_INTR_ENABLE, interrupt_enable); in vnic_cq_init()
[all …]
H A Dvnic_cq.h78 void vnic_cq_free(struct vnic_cq *cq);
79 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
84 void vnic_cq_clean(struct vnic_cq *cq);
85 int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
88 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, in vnic_cq_service() argument
99 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
100 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
104 while (color != cq->last_color) { in vnic_cq_service()
105 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service()
109 cq->to_clean++; in vnic_cq_service()
[all …]
/freebsd/sys/dev/mlx5/mlx5_core/
H A Dmlx5_cq.c62 struct mlx5_core_cq *cq; in mlx5_cq_completion() local
76 cq = table->linear_array[cqn].cq; in mlx5_cq_completion()
78 cq = radix_tree_lookup(&table->tree, cqn); in mlx5_cq_completion()
83 if (likely(cq != NULL)) { in mlx5_cq_completion()
84 ++cq->arm_sn; in mlx5_cq_completion()
85 cq->comp(cq, eqe); in mlx5_cq_completion()
97 struct mlx5_core_cq *cq; in mlx5_cq_event() local
108 cq = table->linear_array[cqn].cq; in mlx5_cq_event()
110 cq = radix_tree_lookup(&table->tree, cqn); in mlx5_cq_event()
115 if (likely(cq != NULL)) { in mlx5_cq_event()
[all …]
/freebsd/sys/dev/cxgbe/iw_cxgbe/
H A Dcq.c52 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in destroy_cq() argument
76 res->u.cq.restype = FW_RI_RES_TYPE_CQ; in destroy_cq()
77 res->u.cq.op = FW_RI_RES_OP_RESET; in destroy_cq()
78 res->u.cq.iqid = cpu_to_be32(cq->cqid); in destroy_cq()
86 kfree(cq->sw_queue); in destroy_cq()
88 cq->memsize, cq->queue, in destroy_cq()
89 dma_unmap_addr(cq, mapping)); in destroy_cq()
90 c4iw_put_cqid(rdev, cq->cqid, uctx); in destroy_cq()
95 create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in create_cq() argument
111 cq->cqid = c4iw_get_cqid(rdev, uctx); in create_cq()
[all …]
H A Dt4.h569 static inline void write_gts(struct t4_cq *cq, u32 val) in write_gts() argument
571 writel(val | V_INGRESSQID(cq->bar2_qid), in write_gts()
572 (void __iomem *)((u64)cq->bar2_va + SGE_UDB_GTS)); in write_gts()
575 static inline int t4_clear_cq_armed(struct t4_cq *cq) in t4_clear_cq_armed() argument
577 return test_and_clear_bit(CQ_ARMED, &cq->flags); in t4_clear_cq_armed()
580 static inline int t4_arm_cq(struct t4_cq *cq, int se) in t4_arm_cq() argument
584 set_bit(CQ_ARMED, &cq->flags); in t4_arm_cq()
585 while (cq->cidx_inc > CIDXINC_MASK) { in t4_arm_cq()
587 writel(val | V_INGRESSQID(cq->bar2_qid), in t4_arm_cq()
588 (void __iomem *)((u64)cq->bar2_va + SGE_UDB_GTS)); in t4_arm_cq()
[all …]
/freebsd/contrib/ofed/libcxgb4/
H A Dt4.h594 static inline int t4_arm_cq(struct t4_cq *cq, int se) in t4_arm_cq() argument
598 while (cq->cidx_inc > CIDXINC_M) { in t4_arm_cq()
600 INGRESSQID_V(cq->cqid & cq->qid_mask); in t4_arm_cq()
601 writel(val, cq->ugts); in t4_arm_cq()
602 cq->cidx_inc -= CIDXINC_M; in t4_arm_cq()
604 val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | in t4_arm_cq()
605 INGRESSQID_V(cq->cqid & cq->qid_mask); in t4_arm_cq()
606 writel(val, cq->ugts); in t4_arm_cq()
607 cq->cidx_inc = 0; in t4_arm_cq()
611 static inline void t4_swcq_produce(struct t4_cq *cq) in t4_swcq_produce() argument
[all …]
H A Dcq.c42 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) in insert_recv_cqe() argument
47 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe()
54 cqe.bits_type_ts = htobe64(V_CQE_GENBIT((u64)cq->gen)); in insert_recv_cqe()
55 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
56 t4_swcq_produce(cq); in insert_recv_cqe()
59 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() argument
66 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq()
68 insert_recv_cqe(wq, cq); in c4iw_flush_rq()
74 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() argument
80 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe()
[all …]
/freebsd/sys/dev/mlx4/mlx4_core/
H A Dmlx4_cq.c56 struct mlx4_cq *cq; in mlx4_cq_completion() local
58 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, in mlx4_cq_completion()
60 if (!cq) { in mlx4_cq_completion()
65 ++cq->arm_sn; in mlx4_cq_completion()
67 cq->comp(cq); in mlx4_cq_completion()
73 struct mlx4_cq *cq; in mlx4_cq_event() local
77 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_event()
78 if (cq) in mlx4_cq_event()
79 atomic_inc(&cq->refcount); in mlx4_cq_event()
83 if (!cq) { in mlx4_cq_event()
[all …]
/freebsd/sys/dev/oce/
H A Doce_queue.c78 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
314 struct oce_cq *cq; in oce_wq_create() local
318 cq = oce_cq_create(sc, in oce_wq_create()
322 if (!cq) in oce_wq_create()
325 wq->cq = cq; in oce_wq_create()
336 eq->cq[eq->cq_valid] = cq; in oce_wq_create()
338 cq->cb_arg = wq; in oce_wq_create()
339 cq->cq_handler = oce_wq_handler; in oce_wq_create()
370 if (wq->cq != NULL) { in oce_wq_del()
371 oce_cq_del(sc, wq->cq); in oce_wq_del()
[all …]
/freebsd/lib/libc/gen/
H A Ddisklabel.c65 char *cp, *cq; /* can't be register */ in getdiskbyname() local
77 cq = dp->d_typename; in getdiskbyname()
79 while (cq < dp->d_typename + sizeof(dp->d_typename) - 1 && in getdiskbyname()
80 (*cq = *cp) && *cq != '|' && *cq != ':') in getdiskbyname()
81 cq++, cp++; in getdiskbyname()
82 *cq = '\0'; in getdiskbyname()
84 if (cgetstr(buf, "ty", &cq) > 0) { in getdiskbyname()
85 if (strcmp(cq, "removable") == 0) in getdiskbyname()
87 else if (cq && strcmp(cq, "simulated") == 0) in getdiskbyname()
89 free(cq); in getdiskbyname()
[all …]
/freebsd/sys/dev/mlx5/
H A Dcq.h125 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
127 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
135 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, in mlx5_cq_arm()
144 sn = cq->arm_sn & 3; in mlx5_cq_arm()
147 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); in mlx5_cq_arm()
155 doorbell[1] = cpu_to_be32(cq->cqn);
162 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
164 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
165 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
119 mlx5_cq_set_ci(struct mlx5_core_cq * cq) mlx5_cq_set_ci() argument
129 mlx5_cq_arm(struct mlx5_core_cq * cq,u32 cmd,void __iomem * uar_page,spinlock_t * doorbell_lock,u32 cons_index) mlx5_cq_arm() argument
[all...]
/freebsd/sys/dev/mlx5/mlx5_en/
H A Dmlx5_en_txrx.c34 mlx5e_get_cqe(struct mlx5e_cq *cq) in mlx5e_get_cqe() argument
38 cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq)); in mlx5e_get_cqe()
40 if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK) in mlx5e_get_cqe()
52 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); in mlx5e_cq_error_event() local
54 mlx5_en_err(cq->priv->ifp, "cqn=0x%.6x event=0x%.2x\n", in mlx5e_cq_error_event()
59 mlx5e_dump_err_cqe(struct mlx5e_cq *cq, u32 qn, const struct mlx5_err_cqe *err_cqe) in mlx5e_dump_err_cqe() argument
70 ci = (cq->wq.cc - 1) & cq->wq.sz_m1; in mlx5e_dump_err_cqe()
72 mlx5_en_err(cq->priv->ifp, in mlx5e_dump_err_cqe()
74 cq->mcq.cqn, ci, qn, err_cqe->op_own >> 4, in mlx5e_dump_err_cqe()
/freebsd/contrib/ofed/libibverbs/
H A Ddevice.c135 int verbs_init_cq(struct ibv_cq *cq, struct ibv_context *context, in verbs_init_cq() argument
141 cq->context = context; in verbs_init_cq()
142 cq->channel = channel; in verbs_init_cq()
144 err = pthread_mutex_init(&cq->mutex, NULL); in verbs_init_cq()
147 err = pthread_cond_init(&cq->cond, NULL); in verbs_init_cq()
151 if (cq->channel) { in verbs_init_cq()
153 ++cq->channel->refcnt; in verbs_init_cq()
157 cq->cq_context = cq_context; in verbs_init_cq()
158 cq->comp_events_completed = 0; in verbs_init_cq()
159 cq->async_events_completed = 0; in verbs_init_cq()
[all …]
H A Dverbs.h374 struct ibv_cq *cq; member
674 struct ibv_cq *cq; member
698 struct ibv_cq *cq; member
1024 struct ibv_cq *cq; member
1112 static inline struct ibv_cq *ibv_cq_ex_to_cq(struct ibv_cq_ex *cq) in ibv_cq_ex_to_cq() argument
1114 return (struct ibv_cq *)cq; in ibv_cq_ex_to_cq()
1117 static inline int ibv_start_poll(struct ibv_cq_ex *cq, in ibv_start_poll() argument
1120 return cq->start_poll(cq, attr); in ibv_start_poll()
1123 static inline int ibv_next_poll(struct ibv_cq_ex *cq) in ibv_next_poll() argument
1125 return cq->next_poll(cq); in ibv_next_poll()
[all …]
/freebsd/sys/dev/bnxt/bnxt_re/
H A Dqplib_fp.c49 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
151 struct bnxt_qplib_cq *cq = nq_work->cq; in bnxt_qpn_cqn_sched_task() local
154 if (cq && nq) { in bnxt_qpn_cqn_sched_task()
155 spin_lock_bh(&cq->compl_lock); in bnxt_qpn_cqn_sched_task()
159 __func__, cq, nq); in bnxt_qpn_cqn_sched_task()
160 nq->cqn_handler(nq, cq); in bnxt_qpn_cqn_sched_task()
162 spin_unlock_bh(&cq->compl_lock); in bnxt_qpn_cqn_sched_task()
253 static void clean_nq(struct bnxt_qplib_cq *cq) in clean_nq() argument
264 nq = cq->nq; in clean_nq()
288 if (q_handle == (u64)cq) { in clean_nq()
[all …]

12345678