/linux/drivers/infiniband/core/ |
H A D | cq.c | 42 struct ib_cq *cq = dim->priv; in ib_cq_rdma_dim_work() 53 static void rdma_dim_init(struct ib_cq *cq) in rdma_dim_init() 74 static void rdma_dim_destroy(struct ib_cq *cq) in rdma_dim_destroy() 83 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) in __poll_cq() 92 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, in __ib_process_cq() 138 int ib_process_cq_direct(struct ib_cq *cq, int budget) in ib_process_cq_direct() 146 static void ib_cq_completion_direct(struct ib_cq *cq, void *private) in ib_cq_completion_direct() 153 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); in ib_poll_handler() 172 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) in ib_cq_completion_softirq() 180 struct ib_cq *cq = container_of(work, struct ib_cq, work); in ib_cq_poll_work() [all …]
|
H A D | uverbs_std_types_cq.c | 42 struct ib_cq *cq = uobject->object; in uverbs_free_cq() 71 struct ib_cq *cq; in UVERBS_HANDLER() 115 cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); in UVERBS_HANDLER()
|
/linux/drivers/infiniband/sw/rdmavt/ |
H A D | cq.h | 12 int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 14 int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 15 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); 16 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); 17 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
H A D | cq.c | 159 int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, in rvt_create_cq() 278 int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) in rvt_destroy_cq() 304 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) in rvt_req_notify_cq() 340 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() 478 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) in rvt_poll_cq()
|
/linux/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_verbs.h | 51 int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); 52 int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags); 72 int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 74 int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); 75 int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
/linux/drivers/infiniband/hw/hns/ |
H A D | hns_roce_restrack.c | 12 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq) in hns_roce_fill_res_cq_entry() argument 14 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry() 43 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq) in hns_roce_fill_res_cq_entry_raw() argument 45 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); in hns_roce_fill_res_cq_entry_raw() 46 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry_raw()
|
/linux/include/trace/events/ |
H A D | rdma_core.h | 51 struct ib_cq *cq 72 struct ib_cq *cq 93 const struct ib_cq *cq 120 const struct ib_cq *cq, 146 const struct ib_cq *cq 167 const struct ib_cq *cq, 193 const struct ib_cq *cq, 253 const struct ib_cq *cq
|
/linux/drivers/infiniband/ulp/iser/ |
H A D | iscsi_iser.h | 370 struct ib_cq *cq; 497 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc); 498 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc); 499 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc); 500 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc); 501 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc); 502 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
|
H A D | iser_initiator.c | 145 void (*done)(struct ib_cq *cq, struct ib_wc *wc)) in iser_create_send_desc() 533 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_login_rsp() 634 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_task_rsp() 674 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_cmd_comp() 680 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_ctrl_comp() 696 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_dataout_comp()
|
/linux/drivers/infiniband/hw/bnxt_re/ |
H A D | ib_verbs.h | 102 struct ib_cq ib_cq; member 243 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 245 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); 246 int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 247 int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); 248 int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
H A D | main.c | 1064 static int bnxt_re_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq) in bnxt_re_fill_res_cq_entry() argument 1070 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); in bnxt_re_fill_res_cq_entry() 1094 static int bnxt_re_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq) in bnxt_re_fill_res_cq_entry_raw() argument 1101 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); in bnxt_re_fill_res_cq_entry_raw() 1300 INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq), 1575 if (ibevent.event == IB_EVENT_CQ_ERR && cq->ib_cq.event_handler) { in bnxt_re_handle_cq_async_error() 1576 ibevent.element.cq = &cq->ib_cq; in bnxt_re_handle_cq_async_error() 1581 cq->ib_cq.event_handler(&ibevent, cq->ib_cq.cq_context); in bnxt_re_handle_cq_async_error() 1663 if (cq->ib_cq.comp_handler) in bnxt_re_cqn_handler() 1664 (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context); in bnxt_re_cqn_handler()
|
/linux/net/smc/ |
H A D | smc_wr.h | 131 void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context); 136 void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
|
H A D | smc_ib.h | 40 struct ib_cq *roce_cq_send; /* send completion queue */ 41 struct ib_cq *roce_cq_recv; /* recv completion queue */
|
/linux/drivers/infiniband/hw/qedr/ |
H A D | verbs.h | 54 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 56 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 57 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 89 int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
|
/linux/drivers/infiniband/sw/siw/ |
H A D | siw_verbs.h | 45 int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, 64 int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata); 65 int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc); 66 int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags);
|
/linux/include/rdma/ |
H A D | rdmavt_cq.h | 47 struct ib_cq ibcq; 60 static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq) in ibcq_to_rvtcq()
|
/linux/drivers/infiniband/hw/usnic/ |
H A D | usnic_ib_verbs.h | 58 int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 60 int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_verbs.h | 377 int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 379 int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 380 int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 381 int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
H A D | pvrdma_cq.c | 63 int pvrdma_req_notify_cq(struct ib_cq *ibcq, in pvrdma_req_notify_cq() 101 int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, in pvrdma_create_cq() 242 int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) in pvrdma_destroy_cq() 387 int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in pvrdma_poll_cq()
|
/linux/drivers/infiniband/hw/mlx5/ |
H A D | wr.h | 94 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq);
|
/linux/drivers/nvme/host/ |
H A D | rdma.c | 91 struct ib_cq *ib_cq; member 153 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 273 init_attr.send_cq = queue->ib_cq; in nvme_rdma_create_qp() 274 init_attr.recv_cq = queue->ib_cq; in nvme_rdma_create_qp() 416 ib_free_cq(queue->ib_cq); in nvme_rdma_free_cq() 418 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq() 475 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq() 478 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq() 481 if (IS_ERR(queue->ib_cq)) { in nvme_rdma_create_cq() 482 ret = PTR_ERR(queue->ib_cq); in nvme_rdma_create_cq() [all …]
|
/linux/drivers/infiniband/hw/cxgb4/ |
H A D | iw_cxgb4.h | 423 struct ib_cq ibcq; 434 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) in to_c4iw_cq() 990 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 1012 int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); 1014 int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 1016 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 1072 int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq);
|
/linux/drivers/infiniband/ulp/srp/ |
H A D | ib_srp.h | 157 struct ib_cq *send_cq; 158 struct ib_cq *recv_cq;
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_provider.h | 184 struct ib_cq ibcq; 301 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) in to_mcq()
|
/linux/drivers/infiniband/hw/mana/ |
H A D | cq.c | 8 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, in mana_ib_create_cq() 88 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) in mana_ib_destroy_cq()
|