1*c8017f5bSKonstantin Taranov // SPDX-License-Identifier: GPL-2.0-only 2*c8017f5bSKonstantin Taranov /* 3*c8017f5bSKonstantin Taranov * Copyright (c) 2024, Microsoft Corporation. All rights reserved. 4*c8017f5bSKonstantin Taranov */ 5*c8017f5bSKonstantin Taranov 6*c8017f5bSKonstantin Taranov #include "mana_ib.h" 7*c8017f5bSKonstantin Taranov 8*c8017f5bSKonstantin Taranov #define MAX_WR_SGL_NUM (2) 9*c8017f5bSKonstantin Taranov 10*c8017f5bSKonstantin Taranov static int mana_ib_post_recv_ud(struct mana_ib_qp *qp, const struct ib_recv_wr *wr) 11*c8017f5bSKonstantin Taranov { 12*c8017f5bSKonstantin Taranov struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); 13*c8017f5bSKonstantin Taranov struct gdma_queue *queue = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].kmem; 14*c8017f5bSKonstantin Taranov struct gdma_posted_wqe_info wqe_info = {0}; 15*c8017f5bSKonstantin Taranov struct gdma_sge gdma_sgl[MAX_WR_SGL_NUM]; 16*c8017f5bSKonstantin Taranov struct gdma_wqe_request wqe_req = {0}; 17*c8017f5bSKonstantin Taranov struct ud_rq_shadow_wqe *shadow_wqe; 18*c8017f5bSKonstantin Taranov int err, i; 19*c8017f5bSKonstantin Taranov 20*c8017f5bSKonstantin Taranov if (shadow_queue_full(&qp->shadow_rq)) 21*c8017f5bSKonstantin Taranov return -EINVAL; 22*c8017f5bSKonstantin Taranov 23*c8017f5bSKonstantin Taranov if (wr->num_sge > MAX_WR_SGL_NUM) 24*c8017f5bSKonstantin Taranov return -EINVAL; 25*c8017f5bSKonstantin Taranov 26*c8017f5bSKonstantin Taranov for (i = 0; i < wr->num_sge; ++i) { 27*c8017f5bSKonstantin Taranov gdma_sgl[i].address = wr->sg_list[i].addr; 28*c8017f5bSKonstantin Taranov gdma_sgl[i].mem_key = wr->sg_list[i].lkey; 29*c8017f5bSKonstantin Taranov gdma_sgl[i].size = wr->sg_list[i].length; 30*c8017f5bSKonstantin Taranov } 31*c8017f5bSKonstantin Taranov wqe_req.num_sge = wr->num_sge; 32*c8017f5bSKonstantin Taranov wqe_req.sgl = gdma_sgl; 33*c8017f5bSKonstantin Taranov 34*c8017f5bSKonstantin Taranov err = mana_gd_post_work_request(queue, &wqe_req, &wqe_info); 35*c8017f5bSKonstantin Taranov if (err) 36*c8017f5bSKonstantin Taranov return err; 37*c8017f5bSKonstantin Taranov 38*c8017f5bSKonstantin Taranov shadow_wqe = shadow_queue_producer_entry(&qp->shadow_rq); 39*c8017f5bSKonstantin Taranov memset(shadow_wqe, 0, sizeof(*shadow_wqe)); 40*c8017f5bSKonstantin Taranov shadow_wqe->header.opcode = IB_WC_RECV; 41*c8017f5bSKonstantin Taranov shadow_wqe->header.wr_id = wr->wr_id; 42*c8017f5bSKonstantin Taranov shadow_wqe->header.posted_wqe_size = wqe_info.wqe_size_in_bu; 43*c8017f5bSKonstantin Taranov shadow_queue_advance_producer(&qp->shadow_rq); 44*c8017f5bSKonstantin Taranov 45*c8017f5bSKonstantin Taranov mana_gd_wq_ring_doorbell(mdev_to_gc(mdev), queue); 46*c8017f5bSKonstantin Taranov return 0; 47*c8017f5bSKonstantin Taranov } 48*c8017f5bSKonstantin Taranov 49*c8017f5bSKonstantin Taranov int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 50*c8017f5bSKonstantin Taranov const struct ib_recv_wr **bad_wr) 51*c8017f5bSKonstantin Taranov { 52*c8017f5bSKonstantin Taranov struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); 53*c8017f5bSKonstantin Taranov int err = 0; 54*c8017f5bSKonstantin Taranov 55*c8017f5bSKonstantin Taranov for (; wr; wr = wr->next) { 56*c8017f5bSKonstantin Taranov switch (ibqp->qp_type) { 57*c8017f5bSKonstantin Taranov case IB_QPT_UD: 58*c8017f5bSKonstantin Taranov case IB_QPT_GSI: 59*c8017f5bSKonstantin Taranov err = mana_ib_post_recv_ud(qp, wr); 60*c8017f5bSKonstantin Taranov if (unlikely(err)) { 61*c8017f5bSKonstantin Taranov *bad_wr = wr; 62*c8017f5bSKonstantin Taranov return err; 63*c8017f5bSKonstantin Taranov } 64*c8017f5bSKonstantin Taranov break; 65*c8017f5bSKonstantin Taranov default: 66*c8017f5bSKonstantin Taranov ibdev_dbg(ibqp->device, "Posting recv wr on qp type %u is not supported\n", 67*c8017f5bSKonstantin Taranov ibqp->qp_type); 68*c8017f5bSKonstantin Taranov return -EINVAL; 69*c8017f5bSKonstantin Taranov } 70*c8017f5bSKonstantin Taranov } 71*c8017f5bSKonstantin Taranov 72*c8017f5bSKonstantin Taranov return err; 73*c8017f5bSKonstantin Taranov } 74*c8017f5bSKonstantin Taranov 75*c8017f5bSKonstantin Taranov static int mana_ib_post_send_ud(struct mana_ib_qp *qp, const struct ib_ud_wr *wr) 76*c8017f5bSKonstantin Taranov { 77*c8017f5bSKonstantin Taranov struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); 78*c8017f5bSKonstantin Taranov struct mana_ib_ah *ah = container_of(wr->ah, struct mana_ib_ah, ibah); 79*c8017f5bSKonstantin Taranov struct net_device *ndev = mana_ib_get_netdev(&mdev->ib_dev, qp->port); 80*c8017f5bSKonstantin Taranov struct gdma_queue *queue = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].kmem; 81*c8017f5bSKonstantin Taranov struct gdma_sge gdma_sgl[MAX_WR_SGL_NUM + 1]; 82*c8017f5bSKonstantin Taranov struct gdma_posted_wqe_info wqe_info = {0}; 83*c8017f5bSKonstantin Taranov struct gdma_wqe_request wqe_req = {0}; 84*c8017f5bSKonstantin Taranov struct rdma_send_oob send_oob = {0}; 85*c8017f5bSKonstantin Taranov struct ud_sq_shadow_wqe *shadow_wqe; 86*c8017f5bSKonstantin Taranov int err, i; 87*c8017f5bSKonstantin Taranov 88*c8017f5bSKonstantin Taranov if (!ndev) { 89*c8017f5bSKonstantin Taranov ibdev_dbg(&mdev->ib_dev, "Invalid port %u in QP %u\n", 90*c8017f5bSKonstantin Taranov qp->port, qp->ibqp.qp_num); 91*c8017f5bSKonstantin Taranov return -EINVAL; 92*c8017f5bSKonstantin Taranov } 93*c8017f5bSKonstantin Taranov 94*c8017f5bSKonstantin Taranov if (wr->wr.opcode != IB_WR_SEND) 95*c8017f5bSKonstantin Taranov return -EINVAL; 96*c8017f5bSKonstantin Taranov 97*c8017f5bSKonstantin Taranov if (shadow_queue_full(&qp->shadow_sq)) 98*c8017f5bSKonstantin Taranov return -EINVAL; 99*c8017f5bSKonstantin Taranov 100*c8017f5bSKonstantin Taranov if (wr->wr.num_sge > MAX_WR_SGL_NUM) 101*c8017f5bSKonstantin Taranov return -EINVAL; 102*c8017f5bSKonstantin Taranov 103*c8017f5bSKonstantin Taranov gdma_sgl[0].address = ah->dma_handle; 104*c8017f5bSKonstantin Taranov gdma_sgl[0].mem_key = qp->ibqp.pd->local_dma_lkey; 105*c8017f5bSKonstantin Taranov gdma_sgl[0].size = sizeof(struct mana_ib_av); 106*c8017f5bSKonstantin Taranov for (i = 0; i < wr->wr.num_sge; ++i) { 107*c8017f5bSKonstantin Taranov gdma_sgl[i + 1].address = wr->wr.sg_list[i].addr; 108*c8017f5bSKonstantin Taranov gdma_sgl[i + 1].mem_key = wr->wr.sg_list[i].lkey; 109*c8017f5bSKonstantin Taranov gdma_sgl[i + 1].size = wr->wr.sg_list[i].length; 110*c8017f5bSKonstantin Taranov } 111*c8017f5bSKonstantin Taranov 112*c8017f5bSKonstantin Taranov wqe_req.num_sge = wr->wr.num_sge + 1; 113*c8017f5bSKonstantin Taranov wqe_req.sgl = gdma_sgl; 114*c8017f5bSKonstantin Taranov wqe_req.inline_oob_size = sizeof(struct rdma_send_oob); 115*c8017f5bSKonstantin Taranov wqe_req.inline_oob_data = &send_oob; 116*c8017f5bSKonstantin Taranov wqe_req.flags = GDMA_WR_OOB_IN_SGL; 117*c8017f5bSKonstantin Taranov wqe_req.client_data_unit = ib_mtu_enum_to_int(ib_mtu_int_to_enum(ndev->mtu)); 118*c8017f5bSKonstantin Taranov 119*c8017f5bSKonstantin Taranov send_oob.wqe_type = WQE_TYPE_UD_SEND; 120*c8017f5bSKonstantin Taranov send_oob.fence = !!(wr->wr.send_flags & IB_SEND_FENCE); 121*c8017f5bSKonstantin Taranov send_oob.signaled = !!(wr->wr.send_flags & IB_SEND_SIGNALED); 122*c8017f5bSKonstantin Taranov send_oob.solicited = !!(wr->wr.send_flags & IB_SEND_SOLICITED); 123*c8017f5bSKonstantin Taranov send_oob.psn = qp->ud_qp.sq_psn; 124*c8017f5bSKonstantin Taranov send_oob.ssn_or_rqpn = wr->remote_qpn; 125*c8017f5bSKonstantin Taranov send_oob.ud_send.remote_qkey = 126*c8017f5bSKonstantin Taranov qp->ibqp.qp_type == IB_QPT_GSI ? IB_QP1_QKEY : wr->remote_qkey; 127*c8017f5bSKonstantin Taranov 128*c8017f5bSKonstantin Taranov err = mana_gd_post_work_request(queue, &wqe_req, &wqe_info); 129*c8017f5bSKonstantin Taranov if (err) 130*c8017f5bSKonstantin Taranov return err; 131*c8017f5bSKonstantin Taranov 132*c8017f5bSKonstantin Taranov qp->ud_qp.sq_psn++; 133*c8017f5bSKonstantin Taranov shadow_wqe = shadow_queue_producer_entry(&qp->shadow_sq); 134*c8017f5bSKonstantin Taranov memset(shadow_wqe, 0, sizeof(*shadow_wqe)); 135*c8017f5bSKonstantin Taranov shadow_wqe->header.opcode = IB_WC_SEND; 136*c8017f5bSKonstantin Taranov shadow_wqe->header.wr_id = wr->wr.wr_id; 137*c8017f5bSKonstantin Taranov shadow_wqe->header.posted_wqe_size = wqe_info.wqe_size_in_bu; 138*c8017f5bSKonstantin Taranov shadow_queue_advance_producer(&qp->shadow_sq); 139*c8017f5bSKonstantin Taranov 140*c8017f5bSKonstantin Taranov mana_gd_wq_ring_doorbell(mdev_to_gc(mdev), queue); 141*c8017f5bSKonstantin Taranov return 0; 142*c8017f5bSKonstantin Taranov } 143*c8017f5bSKonstantin Taranov 144*c8017f5bSKonstantin Taranov int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 145*c8017f5bSKonstantin Taranov const struct ib_send_wr **bad_wr) 146*c8017f5bSKonstantin Taranov { 147*c8017f5bSKonstantin Taranov int err; 148*c8017f5bSKonstantin Taranov struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); 149*c8017f5bSKonstantin Taranov 150*c8017f5bSKonstantin Taranov for (; wr; wr = wr->next) { 151*c8017f5bSKonstantin Taranov switch (ibqp->qp_type) { 152*c8017f5bSKonstantin Taranov case IB_QPT_UD: 153*c8017f5bSKonstantin Taranov case IB_QPT_GSI: 154*c8017f5bSKonstantin Taranov err = mana_ib_post_send_ud(qp, ud_wr(wr)); 155*c8017f5bSKonstantin Taranov if (unlikely(err)) { 156*c8017f5bSKonstantin Taranov *bad_wr = wr; 157*c8017f5bSKonstantin Taranov return err; 158*c8017f5bSKonstantin Taranov } 159*c8017f5bSKonstantin Taranov break; 160*c8017f5bSKonstantin Taranov default: 161*c8017f5bSKonstantin Taranov ibdev_dbg(ibqp->device, "Posting send wr on qp type %u is not supported\n", 162*c8017f5bSKonstantin Taranov ibqp->qp_type); 163*c8017f5bSKonstantin Taranov return -EINVAL; 164*c8017f5bSKonstantin Taranov } 165*c8017f5bSKonstantin Taranov } 166*c8017f5bSKonstantin Taranov 167*c8017f5bSKonstantin Taranov return err; 168*c8017f5bSKonstantin Taranov } 169