xref: /linux/drivers/infiniband/hw/bng_re/bng_dev.c (revision 4f830cd8d7fe3e98fc12d25f347ed461e11fc1de)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3 
4 #include <linux/module.h>
5 #include <linux/pci.h>
6 #include <linux/auxiliary_bus.h>
7 
8 #include <rdma/ib_verbs.h>
9 
10 #include "bng_res.h"
11 #include "bng_fw.h"
12 #include "bnge.h"
13 #include "bnge_auxr.h"
14 #include "bng_re.h"
15 #include "bnge_hwrm.h"
16 
17 MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@broadcom.com>");
18 MODULE_DESCRIPTION(BNG_RE_DESC);
19 MODULE_LICENSE("Dual BSD/GPL");
20 
21 static struct bng_re_dev *bng_re_dev_add(struct auxiliary_device *adev,
22 					 struct bnge_auxr_dev *aux_dev)
23 {
24 	struct bng_re_dev *rdev;
25 
26 	/* Allocate bng_re_dev instance */
27 	rdev = ib_alloc_device(bng_re_dev, ibdev);
28 	if (!rdev) {
29 		pr_err("%s: bng_re_dev allocation failure!", KBUILD_MODNAME);
30 		return NULL;
31 	}
32 
33 	/* Assign auxiliary device specific data */
34 	rdev->netdev = aux_dev->net;
35 	rdev->aux_dev = aux_dev;
36 	rdev->adev = adev;
37 	rdev->fn_id = rdev->aux_dev->pdev->devfn;
38 
39 	return rdev;
40 }
41 
42 
43 static int bng_re_register_netdev(struct bng_re_dev *rdev)
44 {
45 	struct bnge_auxr_dev *aux_dev;
46 
47 	aux_dev = rdev->aux_dev;
48 	return bnge_register_dev(aux_dev, rdev->adev);
49 }
50 
51 static void bng_re_destroy_chip_ctx(struct bng_re_dev *rdev)
52 {
53 	struct bng_re_chip_ctx *chip_ctx;
54 
55 	if (!rdev->chip_ctx)
56 		return;
57 
58 	chip_ctx = rdev->chip_ctx;
59 	rdev->chip_ctx = NULL;
60 	rdev->rcfw.res = NULL;
61 	rdev->bng_res.cctx = NULL;
62 	rdev->bng_res.pdev = NULL;
63 	kfree(chip_ctx);
64 }
65 
66 static int bng_re_setup_chip_ctx(struct bng_re_dev *rdev)
67 {
68 	struct bng_re_chip_ctx *chip_ctx;
69 	struct bnge_auxr_dev *aux_dev;
70 
71 	aux_dev = rdev->aux_dev;
72 	rdev->bng_res.pdev = aux_dev->pdev;
73 	rdev->rcfw.res = &rdev->bng_res;
74 	chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
75 	if (!chip_ctx)
76 		return -ENOMEM;
77 	chip_ctx->chip_num = aux_dev->chip_num;
78 	chip_ctx->hw_stats_size = aux_dev->hw_ring_stats_size;
79 
80 	rdev->chip_ctx = chip_ctx;
81 	rdev->bng_res.cctx = rdev->chip_ctx;
82 
83 	return 0;
84 }
85 
86 static void bng_re_init_hwrm_hdr(struct input *hdr, u16 opcd)
87 {
88 	hdr->req_type = cpu_to_le16(opcd);
89 	hdr->cmpl_ring = cpu_to_le16(-1);
90 	hdr->target_id = cpu_to_le16(-1);
91 }
92 
93 static void bng_re_fill_fw_msg(struct bnge_fw_msg *fw_msg, void *msg,
94 			       int msg_len, void *resp, int resp_max_len,
95 			       int timeout)
96 {
97 	fw_msg->msg = msg;
98 	fw_msg->msg_len = msg_len;
99 	fw_msg->resp = resp;
100 	fw_msg->resp_max_len = resp_max_len;
101 	fw_msg->timeout = timeout;
102 }
103 
104 static int bng_re_net_ring_free(struct bng_re_dev *rdev,
105 				u16 fw_ring_id, int type)
106 {
107 	struct bnge_auxr_dev *aux_dev = rdev->aux_dev;
108 	struct hwrm_ring_free_input req = {};
109 	struct hwrm_ring_free_output resp;
110 	struct bnge_fw_msg fw_msg = {};
111 	int rc = -EINVAL;
112 
113 	if (!rdev)
114 		return rc;
115 
116 	if (!aux_dev)
117 		return rc;
118 
119 	bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE);
120 	req.ring_type = type;
121 	req.ring_id = cpu_to_le16(fw_ring_id);
122 	bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
123 			    sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT);
124 	rc = bnge_send_msg(aux_dev, &fw_msg);
125 	if (rc)
126 		ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
127 			  req.ring_id, rc);
128 	return rc;
129 }
130 
131 static int bng_re_net_ring_alloc(struct bng_re_dev *rdev,
132 				 struct bng_re_ring_attr *ring_attr,
133 				 u16 *fw_ring_id)
134 {
135 	struct bnge_auxr_dev *aux_dev = rdev->aux_dev;
136 	struct hwrm_ring_alloc_input req = {};
137 	struct hwrm_ring_alloc_output resp;
138 	struct bnge_fw_msg fw_msg = {};
139 	int rc = -EINVAL;
140 
141 	if (!aux_dev)
142 		return rc;
143 
144 	bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
145 	req.enables = 0;
146 	req.page_tbl_addr =  cpu_to_le64(ring_attr->dma_arr[0]);
147 	if (ring_attr->pages > 1) {
148 		/* Page size is in log2 units */
149 		req.page_size = BNGE_PAGE_SHIFT;
150 		req.page_tbl_depth = 1;
151 	}
152 	req.fbo = 0;
153 	/* Association of ring index with doorbell index and MSIX number */
154 	req.logical_id = cpu_to_le16(ring_attr->lrid);
155 	req.length = cpu_to_le32(ring_attr->depth + 1);
156 	req.ring_type = ring_attr->type;
157 	req.int_mode = ring_attr->mode;
158 	bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
159 			   sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT);
160 	rc = bnge_send_msg(aux_dev, &fw_msg);
161 	if (!rc)
162 		*fw_ring_id = le16_to_cpu(resp.ring_id);
163 
164 	return rc;
165 }
166 
167 static void bng_re_query_hwrm_version(struct bng_re_dev *rdev)
168 {
169 	struct bnge_auxr_dev *aux_dev = rdev->aux_dev;
170 	struct hwrm_ver_get_output ver_get_resp = {};
171 	struct hwrm_ver_get_input ver_get_req = {};
172 	struct bng_re_chip_ctx *cctx;
173 	struct bnge_fw_msg fw_msg = {};
174 	int rc;
175 
176 	bng_re_init_hwrm_hdr((void *)&ver_get_req, HWRM_VER_GET);
177 	ver_get_req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
178 	ver_get_req.hwrm_intf_min = HWRM_VERSION_MINOR;
179 	ver_get_req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
180 	bng_re_fill_fw_msg(&fw_msg, (void *)&ver_get_req, sizeof(ver_get_req),
181 			    (void *)&ver_get_resp, sizeof(ver_get_resp),
182 			    BNGE_DFLT_HWRM_CMD_TIMEOUT);
183 	rc = bnge_send_msg(aux_dev, &fw_msg);
184 	if (rc) {
185 		ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
186 			  rc);
187 		return;
188 	}
189 
190 	cctx = rdev->chip_ctx;
191 	cctx->hwrm_intf_ver =
192 		(u64)le16_to_cpu(ver_get_resp.hwrm_intf_major) << 48 |
193 		(u64)le16_to_cpu(ver_get_resp.hwrm_intf_minor) << 32 |
194 		(u64)le16_to_cpu(ver_get_resp.hwrm_intf_build) << 16 |
195 		le16_to_cpu(ver_get_resp.hwrm_intf_patch);
196 
197 	cctx->hwrm_cmd_max_timeout = le16_to_cpu(ver_get_resp.max_req_timeout);
198 
199 	if (!cctx->hwrm_cmd_max_timeout)
200 		cctx->hwrm_cmd_max_timeout = BNG_ROCE_FW_MAX_TIMEOUT;
201 }
202 
203 static void bng_re_dev_uninit(struct bng_re_dev *rdev)
204 {
205 	bng_re_disable_rcfw_channel(&rdev->rcfw);
206 	bng_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id,
207 			     RING_ALLOC_REQ_RING_TYPE_NQ);
208 	bng_re_free_rcfw_channel(&rdev->rcfw);
209 
210 	kfree(rdev->nqr);
211 	rdev->nqr = NULL;
212 	bng_re_destroy_chip_ctx(rdev);
213 	if (test_and_clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
214 		bnge_unregister_dev(rdev->aux_dev);
215 }
216 
217 static int bng_re_dev_init(struct bng_re_dev *rdev)
218 {
219 	struct bng_re_ring_attr rattr = {};
220 	struct bng_re_creq_ctx *creq;
221 	u32 db_offt;
222 	int vid;
223 	u8 type;
224 	int rc;
225 
226 	/* Registered a new RoCE device instance to netdev */
227 	rc = bng_re_register_netdev(rdev);
228 	if (rc) {
229 		ibdev_err(&rdev->ibdev,
230 				"Failed to register with netedev: %#x\n", rc);
231 		return -EINVAL;
232 	}
233 
234 	set_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
235 
236 	if (rdev->aux_dev->auxr_info->msix_requested < BNG_RE_MIN_MSIX) {
237 		ibdev_err(&rdev->ibdev,
238 			  "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n",
239 			  rdev->aux_dev->auxr_info->msix_requested);
240 		bnge_unregister_dev(rdev->aux_dev);
241 		clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
242 		return -EINVAL;
243 	}
244 	ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
245 		  rdev->aux_dev->auxr_info->msix_requested);
246 
247 	rc = bng_re_setup_chip_ctx(rdev);
248 	if (rc) {
249 		bnge_unregister_dev(rdev->aux_dev);
250 		clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
251 		ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
252 		return -EINVAL;
253 	}
254 
255 	bng_re_query_hwrm_version(rdev);
256 
257 	rc = bng_re_alloc_fw_channel(&rdev->bng_res, &rdev->rcfw);
258 	if (rc) {
259 		ibdev_err(&rdev->ibdev,
260 			  "Failed to allocate RCFW Channel: %#x\n", rc);
261 		goto fail;
262 	}
263 
264 	/* Allocate nq record memory */
265 	rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL);
266 	if (!rdev->nqr) {
267 		bng_re_destroy_chip_ctx(rdev);
268 		bnge_unregister_dev(rdev->aux_dev);
269 		clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
270 		return -ENOMEM;
271 	}
272 
273 	rdev->nqr->num_msix = rdev->aux_dev->auxr_info->msix_requested;
274 	memcpy(rdev->nqr->msix_entries, rdev->aux_dev->msix_info,
275 	       sizeof(struct bnge_msix_info) * rdev->nqr->num_msix);
276 
277 	type = RING_ALLOC_REQ_RING_TYPE_NQ;
278 	creq = &rdev->rcfw.creq;
279 	rattr.dma_arr = creq->hwq.pbl[BNG_PBL_LVL_0].pg_map_arr;
280 	rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
281 	rattr.type = type;
282 	rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
283 	rattr.depth = BNG_FW_CREQE_MAX_CNT - 1;
284 	rattr.lrid = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].ring_idx;
285 	rc = bng_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
286 	if (rc) {
287 		ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
288 		goto free_rcfw;
289 	}
290 	db_offt = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].db_offset;
291 	vid = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].vector;
292 
293 	rc = bng_re_enable_fw_channel(&rdev->rcfw,
294 					vid, db_offt);
295 	if (rc) {
296 		ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
297 			  rc);
298 		goto free_ring;
299 	}
300 
301 	return 0;
302 free_ring:
303 	bng_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
304 free_rcfw:
305 	bng_re_free_rcfw_channel(&rdev->rcfw);
306 fail:
307 	bng_re_dev_uninit(rdev);
308 	return rc;
309 }
310 
311 static int bng_re_add_device(struct auxiliary_device *adev)
312 {
313 	struct bnge_auxr_priv *auxr_priv =
314 		container_of(adev, struct bnge_auxr_priv, aux_dev);
315 	struct bng_re_en_dev_info *dev_info;
316 	struct bng_re_dev *rdev;
317 	int rc;
318 
319 	dev_info = auxiliary_get_drvdata(adev);
320 
321 	rdev = bng_re_dev_add(adev, auxr_priv->auxr_dev);
322 	if (!rdev) {
323 		rc = -ENOMEM;
324 		goto exit;
325 	}
326 
327 	dev_info->rdev = rdev;
328 
329 	rc = bng_re_dev_init(rdev);
330 	if (rc)
331 		goto re_dev_dealloc;
332 
333 	return 0;
334 
335 re_dev_dealloc:
336 	ib_dealloc_device(&rdev->ibdev);
337 exit:
338 	return rc;
339 }
340 
341 
342 static void bng_re_remove_device(struct bng_re_dev *rdev,
343 				 struct auxiliary_device *aux_dev)
344 {
345 	bng_re_dev_uninit(rdev);
346 	ib_dealloc_device(&rdev->ibdev);
347 }
348 
349 
350 static int bng_re_probe(struct auxiliary_device *adev,
351 			const struct auxiliary_device_id *id)
352 {
353 	struct bnge_auxr_priv *aux_priv =
354 		container_of(adev, struct bnge_auxr_priv, aux_dev);
355 	struct bng_re_en_dev_info *en_info;
356 	int rc;
357 
358 	en_info = kzalloc(sizeof(*en_info), GFP_KERNEL);
359 	if (!en_info)
360 		return -ENOMEM;
361 
362 	en_info->auxr_dev = aux_priv->auxr_dev;
363 
364 	auxiliary_set_drvdata(adev, en_info);
365 
366 	rc = bng_re_add_device(adev);
367 	if (rc)
368 		kfree(en_info);
369 
370 	return rc;
371 }
372 
373 static void bng_re_remove(struct auxiliary_device *adev)
374 {
375 	struct bng_re_en_dev_info *dev_info = auxiliary_get_drvdata(adev);
376 	struct bng_re_dev *rdev;
377 
378 	rdev = dev_info->rdev;
379 
380 	if (rdev)
381 		bng_re_remove_device(rdev, adev);
382 	kfree(dev_info);
383 }
384 
385 static const struct auxiliary_device_id bng_re_id_table[] = {
386 	{ .name = BNG_RE_ADEV_NAME ".rdma", },
387 	{},
388 };
389 
390 MODULE_DEVICE_TABLE(auxiliary, bng_re_id_table);
391 
392 static struct auxiliary_driver bng_re_driver = {
393 	.name = "rdma",
394 	.probe = bng_re_probe,
395 	.remove = bng_re_remove,
396 	.id_table = bng_re_id_table,
397 };
398 
399 static int __init bng_re_mod_init(void)
400 {
401 	int rc;
402 
403 
404 	rc = auxiliary_driver_register(&bng_re_driver);
405 	if (rc) {
406 		pr_err("%s: Failed to register auxiliary driver\n",
407 		       KBUILD_MODNAME);
408 	}
409 	return rc;
410 }
411 
412 static void __exit bng_re_mod_exit(void)
413 {
414 	auxiliary_driver_unregister(&bng_re_driver);
415 }
416 
417 module_init(bng_re_mod_init);
418 module_exit(bng_re_mod_exit);
419