| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_srq.c | 74 static void *get_wqe(struct mthca_srq *srq, int n) in get_wqe() argument 76 if (srq->is_direct) in get_wqe() 77 return srq->queue.direct.buf + (n << srq->wqe_shift); in get_wqe() 79 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + in get_wqe() 80 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); in get_wqe() 99 struct mthca_srq *srq, in mthca_tavor_init_srq_context() argument 108 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); in mthca_tavor_init_srq_context() 110 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context() 120 struct mthca_srq *srq, in mthca_arbel_init_srq_context() argument 134 max = srq->max; in mthca_arbel_init_srq_context() [all …]
|
| H A D | mthca_dev.h | 248 struct mthca_array srq; member 496 struct mthca_srq *srq); 502 struct ib_srq_attr *attr, struct mthca_srq *srq, 504 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); 507 int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 511 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); 512 int mthca_tavor_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr, 514 int mthca_arbel_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr,
|
| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_srq.c | 15 struct hns_roce_srq *srq; in hns_roce_srq_event() local 18 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1)); in hns_roce_srq_event() 19 if (srq) in hns_roce_srq_event() 20 refcount_inc(&srq->refcount); in hns_roce_srq_event() 23 if (!srq) { in hns_roce_srq_event() 28 srq->event(srq, event_type); in hns_roce_srq_event() 30 if (refcount_dec_and_test(&srq->refcount)) in hns_roce_srq_event() 31 complete(&srq->free); in hns_roce_srq_event() 34 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq, in hns_roce_ib_srq_event() argument 37 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); in hns_roce_ib_srq_event() [all …]
|
| H A D | hns_roce_hw_v2.c | 879 static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n) in get_srq_wqe_buf() argument 881 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift); in get_srq_wqe_buf() 890 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index) in hns_roce_free_srq_wqe() argument 893 spin_lock(&srq->lock); in hns_roce_free_srq_wqe() 895 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1); in hns_roce_free_srq_wqe() 896 srq->idx_que.tail++; in hns_roce_free_srq_wqe() 898 spin_unlock(&srq->lock); in hns_roce_free_srq_wqe() 901 static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq) in hns_roce_srqwq_overflow() argument 903 struct hns_roce_idx_que *idx_que = &srq->idx_que; in hns_roce_srqwq_overflow() 905 return idx_que->head - idx_que->tail >= srq->wqe_cnt; in hns_roce_srqwq_overflow() [all …]
|
| /linux/drivers/infiniband/hw/mlx4/ |
| H A D | srq.c | 42 static void *get_wqe(struct mlx4_ib_srq *srq, int n) in get_wqe() argument 44 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe() 47 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) in mlx4_ib_srq_event() argument 50 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; in mlx4_ib_srq_event() 54 event.element.srq = ibsrq; in mlx4_ib_srq_event() 64 "on SRQ %06x\n", type, srq->srqn); in mlx4_ib_srq_event() 79 struct mlx4_ib_srq *srq = to_msrq(ib_srq); in mlx4_ib_create_srq() local 98 mutex_init(&srq->mutex); in mlx4_ib_create_srq() 99 spin_lock_init(&srq->lock); in mlx4_ib_create_srq() 100 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx4_ib_create_srq() [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | srq.c | 13 static void *get_wqe(struct mlx5_ib_srq *srq, int n) in get_wqe() argument 15 return mlx5_frag_buf_get_wqe(&srq->fbc, n); in get_wqe() 18 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) in mlx5_ib_srq_event() argument 21 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; in mlx5_ib_srq_event() 25 event.element.srq = ibsrq; in mlx5_ib_srq_event() 35 type, srq->srqn); in mlx5_ib_srq_event() 43 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, in create_srq_user() argument 76 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); in create_srq_user() 78 srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0); in create_srq_user() 79 if (IS_ERR(srq->umem)) { in create_srq_user() [all …]
|
| H A D | srq_cmd.c | 84 struct mlx5_core_srq *srq; in mlx5_cmd_get_srq() local 87 srq = xa_load(&table->array, srqn); in mlx5_cmd_get_srq() 88 if (srq) in mlx5_cmd_get_srq() 89 refcount_inc(&srq->common.refcount); in mlx5_cmd_get_srq() 92 return srq; in mlx5_cmd_get_srq() 114 static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, in create_srq_cmd() argument 157 srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); in create_srq_cmd() 158 srq->uid = in->uid; in create_srq_cmd() 164 static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) in destroy_srq_cmd() argument 169 MLX5_SET(destroy_srq_in, in, srqn, srq->srqn); in destroy_srq_cmd() [all …]
|
| H A D | srq.h | 48 void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e); 58 int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, 60 int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq); 61 int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, 63 int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
| H A D | cq.c | 174 struct mlx5_ib_srq *srq = NULL; in handle_responder() local 181 if (qp->ibqp.srq || qp->ibqp.xrcd) { in handle_responder() 187 srq = to_mibsrq(msrq); in handle_responder() 189 srq = to_msrq(qp->ibqp.srq); in handle_responder() 191 if (srq) { in handle_responder() 193 wc->wr_id = srq->wrid[wqe_ctr]; in handle_responder() 194 mlx5_ib_free_srq_wqe(srq, wqe_ctr); in handle_responder() 543 struct mlx5_ib_srq *srq; in mlx5_poll_one() local 545 if ((*cur_qp)->ibqp.srq) { in mlx5_poll_one() 546 srq = to_msrq((*cur_qp)->ibqp.srq); in mlx5_poll_one() [all …]
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_srq.c | 44 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, in rxe_srq_from_init() argument 52 srq->ibsrq.event_handler = init->event_handler; in rxe_srq_from_init() 53 srq->ibsrq.srq_context = init->srq_context; in rxe_srq_from_init() 54 srq->limit = init->attr.srq_limit; in rxe_srq_from_init() 55 srq->srq_num = srq->elem.index; in rxe_srq_from_init() 56 srq->rq.max_wr = init->attr.max_wr; in rxe_srq_from_init() 57 srq->rq.max_sge = init->attr.max_sge; in rxe_srq_from_init() 60 srq->rq.max_sge*sizeof(struct ib_sge); in rxe_srq_from_init() 62 spin_lock_init(&srq->rq.producer_lock); in rxe_srq_from_init() 63 spin_lock_init(&srq->rq.consumer_lock); in rxe_srq_from_init() [all …]
|
| H A D | rxe_qp.c | 128 if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) in rxe_qp_chk_init() 382 if (!qp->srq) { in rxe_qp_init_resp() 406 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; in rxe_qp_from_init() local 412 if (srq) in rxe_qp_from_init() 413 rxe_get(srq); in rxe_qp_from_init() 418 qp->srq = srq; in rxe_qp_from_init() 450 qp->srq = NULL; in rxe_qp_from_init() 452 if (srq) in rxe_qp_from_init() 453 rxe_put(srq); in rxe_qp_from_init() 468 init->srq = qp->ibqp.srq; in rxe_qp_to_init() [all …]
|
| H A D | rxe_resp.c | 261 struct rxe_srq *srq = qp->srq; in get_srq_wqe() local 262 struct rxe_queue *q = srq->rq.queue; in get_srq_wqe() 269 if (srq->error) in get_srq_wqe() 272 spin_lock_irqsave(&srq->rq.consumer_lock, flags); in get_srq_wqe() 276 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); in get_srq_wqe() 281 if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) { in get_srq_wqe() 282 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); in get_srq_wqe() 293 if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) { in get_srq_wqe() 294 srq->limit = 0; in get_srq_wqe() 298 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); in get_srq_wqe() [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | srq.c | 38 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); in rvt_create_srq() local 54 srq->rq.size = srq_init_attr->attr.max_wr + 1; in rvt_create_srq() 55 srq->rq.max_sge = srq_init_attr->attr.max_sge; in rvt_create_srq() 56 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in rvt_create_srq() 58 if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz, in rvt_create_srq() 69 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; in rvt_create_srq() 71 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); in rvt_create_srq() 72 if (IS_ERR(srq->ip)) { in rvt_create_srq() 73 ret = PTR_ERR(srq->ip); in rvt_create_srq() 77 ret = ib_copy_to_udata(udata, &srq->ip->offset, in rvt_create_srq() [all …]
|
| /linux/drivers/infiniband/hw/vmw_pvrdma/ |
| H A D | pvrdma_srq.c | 65 struct pvrdma_srq *srq = to_vsrq(ibsrq); in pvrdma_query_srq() local 74 cmd->srq_handle = srq->srq_handle; in pvrdma_query_srq() 102 struct pvrdma_srq *srq = to_vsrq(ibsrq); in pvrdma_create_srq() local 137 spin_lock_init(&srq->lock); in pvrdma_create_srq() 138 refcount_set(&srq->refcnt, 1); in pvrdma_create_srq() 139 init_completion(&srq->free); in pvrdma_create_srq() 149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq() 150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq() 151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq() 155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | srq.c | 46 struct mlx4_srq *srq; in mlx4_srq_event() local 49 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); in mlx4_srq_event() 51 if (srq) in mlx4_srq_event() 52 refcount_inc(&srq->refcount); in mlx4_srq_event() 58 srq->event(srq, event_type); in mlx4_srq_event() 60 if (refcount_dec_and_test(&srq->refcount)) in mlx4_srq_event() 61 complete(&srq->free); in mlx4_srq_event() 163 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) in mlx4_srq_alloc() argument 171 err = mlx4_srq_alloc_icm(dev, &srq->srqn); in mlx4_srq_alloc() 176 err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); in mlx4_srq_alloc() [all …]
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | t4.h | 425 static inline u32 t4_srq_avail(struct t4_srq *srq) in t4_srq_avail() argument 427 return srq->size - 1 - srq->in_use; in t4_srq_avail() 430 static inline void t4_srq_produce(struct t4_srq *srq, u8 len16) in t4_srq_produce() argument 432 srq->in_use++; in t4_srq_produce() 433 if (++srq->pidx == srq->size) in t4_srq_produce() 434 srq->pidx = 0; in t4_srq_produce() 435 srq->wq_pidx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE); in t4_srq_produce() 436 if (srq->wq_pidx >= srq->size * T4_RQ_NUM_SLOTS) in t4_srq_produce() 437 srq->wq_pidx %= srq->size * T4_RQ_NUM_SLOTS; in t4_srq_produce() 438 srq->queue[srq->size].status.host_pidx = srq->pidx; in t4_srq_produce() [all …]
|
| H A D | cq.c | 462 static void post_pending_srq_wrs(struct t4_srq *srq) in post_pending_srq_wrs() argument 467 while (srq->pending_in_use) { in post_pending_srq_wrs() 468 pwr = &srq->pending_wrs[srq->pending_cidx]; in post_pending_srq_wrs() 469 srq->sw_rq[srq->pidx].wr_id = pwr->wr_id; in post_pending_srq_wrs() 470 srq->sw_rq[srq->pidx].valid = 1; in post_pending_srq_wrs() 474 srq->cidx, srq->pidx, srq->wq_pidx, in post_pending_srq_wrs() 475 srq->in_use, srq->size, in post_pending_srq_wrs() 478 c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16); in post_pending_srq_wrs() 479 t4_srq_consume_pending_wr(srq); in post_pending_srq_wrs() 480 t4_srq_produce(srq, pwr->len16); in post_pending_srq_wrs() [all …]
|
| /linux/drivers/infiniband/core/ |
| H A D | uverbs_std_types_srq.c | 14 struct ib_srq *srq = uobject->object; in uverbs_free_srq() local 17 enum ib_srq_type srq_type = srq->srq_type; in uverbs_free_srq() 20 ret = ib_destroy_srq_user(srq, &attrs->driver_udata); in uverbs_free_srq() 46 struct ib_srq *srq; in UVERBS_HANDLER() local 107 srq = ib_create_srq_user(pd, &attr, obj, &attrs->driver_udata); in UVERBS_HANDLER() 108 if (IS_ERR(srq)) { in UVERBS_HANDLER() 109 ret = PTR_ERR(srq); in UVERBS_HANDLER() 113 obj->uevent.uobject.object = srq; in UVERBS_HANDLER() 131 &srq->ext.xrc.srq_num, in UVERBS_HANDLER() 132 sizeof(srq->ext.xrc.srq_num)); in UVERBS_HANDLER()
|
| H A D | verbs.c | 1018 struct ib_srq *srq; in ib_create_srq_user() local 1021 srq = rdma_zalloc_drv_obj(pd->device, ib_srq); in ib_create_srq_user() 1022 if (!srq) in ib_create_srq_user() 1025 srq->device = pd->device; in ib_create_srq_user() 1026 srq->pd = pd; in ib_create_srq_user() 1027 srq->event_handler = srq_init_attr->event_handler; in ib_create_srq_user() 1028 srq->srq_context = srq_init_attr->srq_context; in ib_create_srq_user() 1029 srq->srq_type = srq_init_attr->srq_type; in ib_create_srq_user() 1030 srq->uobject = uobject; in ib_create_srq_user() 1032 if (ib_srq_has_cq(srq->srq_type)) { in ib_create_srq_user() [all …]
|
| /linux/drivers/infiniband/hw/bnxt_re/ |
| H A D | qplib_fp.c | 86 if (!qp->srq) { in __bnxt_qplib_add_flush_qp() 133 if (!qp->srq) { in __bnxt_qplib_del_flush_qp() 358 struct bnxt_qplib_srq *srq; in bnxt_qplib_service_nq() local 366 srq = (struct bnxt_qplib_srq *)q_handle; in bnxt_qplib_service_nq() 367 srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK) in bnxt_qplib_service_nq() 369 srq->dbinfo.toggle = srq->toggle; in bnxt_qplib_service_nq() 370 srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq); in bnxt_qplib_service_nq() 372 *((u32 *)srq_p->uctx_srq_page) = srq->toggle; in bnxt_qplib_service_nq() 373 bnxt_qplib_armen_db(&srq->dbinfo, in bnxt_qplib_service_nq() 619 struct bnxt_qplib_srq *srq) in bnxt_qplib_destroy_srq() argument [all …]
|
| H A D | ib_verbs.c | 1143 if (!qp->qplib_qp.srq) { in bnxt_re_init_user_qp() 1298 if (init_attr->srq) { in bnxt_re_init_rq_attr() 1299 struct bnxt_re_srq *srq; in bnxt_re_init_rq_attr() local 1301 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq); in bnxt_re_init_rq_attr() 1302 qplqp->srq = &srq->qplib_srq; in bnxt_re_init_rq_attr() 1824 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, in bnxt_re_destroy_srq() local 1826 struct bnxt_re_dev *rdev = srq->rdev; in bnxt_re_destroy_srq() 1827 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; in bnxt_re_destroy_srq() 1830 free_page((unsigned long)srq->uctx_srq_page); in bnxt_re_destroy_srq() 1831 hash_del(&srq->hash_entry); in bnxt_re_destroy_srq() [all …]
|
| /linux/drivers/infiniband/hw/irdma/ |
| H A D | uk.c | 177 __le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx) in irdma_srq_get_next_recv_wqe() argument 182 if (IRDMA_RING_FULL_ERR(srq->srq_ring)) in irdma_srq_get_next_recv_wqe() 185 IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code); in irdma_srq_get_next_recv_wqe() 190 srq->srwqe_polarity = !srq->srwqe_polarity; in irdma_srq_get_next_recv_wqe() 192 wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem; in irdma_srq_get_next_recv_wqe() 423 int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq, in irdma_uk_srq_post_receive() argument 431 if (srq->max_srq_frag_cnt < info->num_sges) in irdma_uk_srq_post_receive() 434 wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx); in irdma_uk_srq_post_receive() 439 srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list, in irdma_uk_srq_post_receive() 440 srq->srwqe_polarity); in irdma_uk_srq_post_receive() [all …]
|
| /linux/drivers/net/ |
| H A D | eql.c | 264 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq); 265 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq); 416 slaving_request_t srq; in eql_enslave() local 418 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) in eql_enslave() 421 slave_dev = __dev_get_by_name(&init_net, srq.slave_name); in eql_enslave() 436 s->priority = srq.priority; in eql_enslave() 437 s->priority_bps = srq.priority; in eql_enslave() 438 s->priority_Bps = srq.priority / 8; in eql_enslave() 458 slaving_request_t srq; in eql_emancipate() local 461 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) in eql_emancipate() [all …]
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_qp_rx.c | 333 struct siw_srq *srq; in siw_rqe_get() local 338 srq = qp->srq; in siw_rqe_get() 339 if (srq) { in siw_rqe_get() 340 spin_lock_irqsave(&srq->lock, flags); in siw_rqe_get() 341 if (unlikely(!srq->num_rqe)) in siw_rqe_get() 344 rqe = &srq->recvq[srq->rq_get % srq->num_rqe]; in siw_rqe_get() 378 if (srq) in siw_rqe_get() 379 spin_unlock_irqrestore(&srq->lock, flags); in siw_rqe_get() 382 if (!srq) { in siw_rqe_get() 385 if (srq->armed) { in siw_rqe_get() [all …]
|
| /linux/drivers/infiniband/hw/ocrdma/ |
| H A D | ocrdma_hw.c | 756 ib_evt.element.srq = &qp->srq->ibsrq; in ocrdma_dispatch_ibevent() 762 ib_evt.element.srq = &qp->srq->ibsrq; in ocrdma_dispatch_ibevent() 790 if (qp->srq->ibsrq.event_handler) in ocrdma_dispatch_ibevent() 791 qp->srq->ibsrq.event_handler(&ib_evt, in ocrdma_dispatch_ibevent() 792 qp->srq->ibsrq. in ocrdma_dispatch_ibevent() 916 if (qp->srq) in _ocrdma_qp_buddy_cq_handler() 2114 if (!qp->srq) { in ocrdma_flush_qp() 2357 if (!attrs->srq) { in ocrdma_get_create_qp_rsp() 2400 if (attrs->srq) { in ocrdma_mbx_create_qp() 2401 struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq); in ocrdma_mbx_create_qp() local [all …]
|