1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 // Copyright (c) 2019 Hisilicon Limited. 3 4 #include <rdma/rdma_cm.h> 5 #include <rdma/restrack.h> 6 #include <uapi/rdma/rdma_netlink.h> 7 #include "hns_roce_common.h" 8 #include "hns_roce_device.h" 9 #include "hns_roce_hw_v2.h" 10 11 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq) 12 { 13 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); 14 struct nlattr *table_attr; 15 16 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 17 if (!table_attr) 18 return -EMSGSIZE; 19 20 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth)) 21 goto err; 22 23 if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index)) 24 goto err; 25 26 if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size)) 27 goto err; 28 29 if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn)) 30 goto err; 31 32 nla_nest_end(msg, table_attr); 33 34 return 0; 35 36 err: 37 nla_nest_cancel(msg, table_attr); 38 39 return -EMSGSIZE; 40 } 41 42 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq) 43 { 44 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); 45 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); 46 struct hns_roce_v2_cq_context context; 47 int ret; 48 49 if (!hr_dev->hw->query_cqc) 50 return -EINVAL; 51 52 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context); 53 if (ret) 54 return -EINVAL; 55 56 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); 57 58 return ret; 59 } 60 61 int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp) 62 { 63 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); 64 struct nlattr *table_attr; 65 66 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 67 if (!table_attr) 68 return -EMSGSIZE; 69 70 if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt)) 71 goto err; 72 73 if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs)) 74 goto err; 75 76 if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt)) 77 goto err; 78 79 if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs)) 80 goto err; 81 82 if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt)) 83 goto err; 84 85 nla_nest_end(msg, table_attr); 86 87 return 0; 88 89 err: 90 nla_nest_cancel(msg, table_attr); 91 92 return -EMSGSIZE; 93 } 94 95 int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) 96 { 97 struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device); 98 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); 99 struct hns_roce_full_qp_ctx { 100 struct hns_roce_v2_qp_context qpc; 101 struct hns_roce_v2_scc_context sccc; 102 } context = {}; 103 u32 sccn = hr_qp->qpn; 104 int ret; 105 106 if (!hr_dev->hw->query_qpc) 107 return -EINVAL; 108 109 ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc); 110 if (ret) 111 return ret; 112 113 /* If SCC is disabled or the query fails, the queried SCCC will 114 * be all 0. 115 */ 116 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) || 117 !hr_dev->hw->query_sccc) 118 goto out; 119 120 if (hr_qp->cong_type == CONG_TYPE_DIP) { 121 if (!hr_qp->dip) 122 goto out; 123 sccn = hr_qp->dip->dip_idx; 124 } 125 126 ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc); 127 if (ret) 128 ibdev_warn_ratelimited(&hr_dev->ib_dev, 129 "failed to query SCCC, ret = %d.\n", 130 ret); 131 132 out: 133 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); 134 135 return ret; 136 } 137 138 int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr) 139 { 140 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr); 141 struct nlattr *table_attr; 142 143 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 144 if (!table_attr) 145 return -EMSGSIZE; 146 147 if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num)) 148 goto err; 149 150 if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift", 151 hr_mr->pbl_mtr.hem_cfg.ba_pg_shift)) 152 goto err; 153 154 if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift", 155 hr_mr->pbl_mtr.hem_cfg.buf_pg_shift)) 156 goto err; 157 158 nla_nest_end(msg, table_attr); 159 160 return 0; 161 162 err: 163 nla_nest_cancel(msg, table_attr); 164 165 return -EMSGSIZE; 166 } 167 168 int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr) 169 { 170 struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device); 171 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr); 172 struct hns_roce_v2_mpt_entry context; 173 int ret; 174 175 if (!hr_dev->hw->query_mpt) 176 return -EINVAL; 177 178 ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context); 179 if (ret) 180 return -EINVAL; 181 182 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); 183 184 return ret; 185 } 186 187 int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq) 188 { 189 struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq); 190 struct nlattr *table_attr; 191 192 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 193 if (!table_attr) 194 return -EMSGSIZE; 195 196 if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn)) 197 goto err; 198 199 if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt)) 200 goto err; 201 202 if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs)) 203 goto err; 204 205 if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn)) 206 goto err; 207 208 nla_nest_end(msg, table_attr); 209 210 return 0; 211 212 err: 213 nla_nest_cancel(msg, table_attr); 214 return -EMSGSIZE; 215 } 216 217 int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq) 218 { 219 struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device); 220 struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq); 221 struct hns_roce_srq_context context; 222 int ret; 223 224 if (!hr_dev->hw->query_srqc) 225 return -EINVAL; 226 227 ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context); 228 if (ret) 229 return ret; 230 231 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); 232 233 return ret; 234 } 235