xref: /linux/drivers/infiniband/hw/hns/hns_roce_restrack.c (revision 3d0fe49454652117522f60bfbefb978ba0e5300b)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2 // Copyright (c) 2019 Hisilicon Limited.
3 
4 #include <rdma/rdma_cm.h>
5 #include <rdma/restrack.h>
6 #include <uapi/rdma/rdma_netlink.h>
7 #include "hnae3.h"
8 #include "hns_roce_common.h"
9 #include "hns_roce_device.h"
10 #include "hns_roce_hw_v2.h"
11 
12 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
13 {
14 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
15 	struct nlattr *table_attr;
16 
17 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
18 	if (!table_attr)
19 		return -EMSGSIZE;
20 
21 	if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
22 		goto err;
23 
24 	if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
25 		goto err;
26 
27 	if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
28 		goto err;
29 
30 	if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
31 		goto err;
32 
33 	nla_nest_end(msg, table_attr);
34 
35 	return 0;
36 
37 err:
38 	nla_nest_cancel(msg, table_attr);
39 
40 	return -EMSGSIZE;
41 }
42 
43 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
44 {
45 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
46 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
47 	struct hns_roce_v2_cq_context context;
48 	int ret;
49 
50 	if (!hr_dev->hw->query_cqc)
51 		return -EINVAL;
52 
53 	ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
54 	if (ret)
55 		return -EINVAL;
56 
57 	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
58 
59 	return ret;
60 }
61 
62 int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
63 {
64 	struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
65 	struct nlattr *table_attr;
66 
67 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
68 	if (!table_attr)
69 		return -EMSGSIZE;
70 
71 	if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
72 		goto err;
73 
74 	if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
75 		goto err;
76 
77 	if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
78 		goto err;
79 
80 	if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
81 		goto err;
82 
83 	if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
84 		goto err;
85 
86 	nla_nest_end(msg, table_attr);
87 
88 	return 0;
89 
90 err:
91 	nla_nest_cancel(msg, table_attr);
92 
93 	return -EMSGSIZE;
94 }
95 
96 int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
97 {
98 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
99 	struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
100 	struct hns_roce_v2_qp_context context;
101 	int ret;
102 
103 	if (!hr_dev->hw->query_qpc)
104 		return -EINVAL;
105 
106 	ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
107 	if (ret)
108 		return -EINVAL;
109 
110 	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
111 
112 	return ret;
113 }
114 
115 int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
116 {
117 	struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
118 	struct nlattr *table_attr;
119 
120 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
121 	if (!table_attr)
122 		return -EMSGSIZE;
123 
124 	if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
125 		goto err;
126 
127 	if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
128 				       hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
129 		goto err;
130 
131 	if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
132 				       hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
133 		goto err;
134 
135 	nla_nest_end(msg, table_attr);
136 
137 	return 0;
138 
139 err:
140 	nla_nest_cancel(msg, table_attr);
141 
142 	return -EMSGSIZE;
143 }
144 
145 int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
146 {
147 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
148 	struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
149 	struct hns_roce_v2_mpt_entry context;
150 	int ret;
151 
152 	if (!hr_dev->hw->query_mpt)
153 		return -EINVAL;
154 
155 	ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
156 	if (ret)
157 		return -EINVAL;
158 
159 	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
160 
161 	return ret;
162 }
163 
164 int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
165 {
166 	struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
167 	struct nlattr *table_attr;
168 
169 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
170 	if (!table_attr)
171 		return -EMSGSIZE;
172 
173 	if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn))
174 		goto err;
175 
176 	if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt))
177 		goto err;
178 
179 	if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs))
180 		goto err;
181 
182 	if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn))
183 		goto err;
184 
185 	nla_nest_end(msg, table_attr);
186 
187 	return 0;
188 
189 err:
190 	nla_nest_cancel(msg, table_attr);
191 	return -EMSGSIZE;
192 }
193 
194 int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq)
195 {
196 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
197 	struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
198 	struct hns_roce_srq_context context;
199 	int ret;
200 
201 	if (!hr_dev->hw->query_srqc)
202 		return -EINVAL;
203 
204 	ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context);
205 	if (ret)
206 		return ret;
207 
208 	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
209 
210 	return ret;
211 }
212