xref: /linux/drivers/infiniband/hw/ionic/ionic_ibdev.c (revision e3c81bae4f282a6be56bc22e05e2ce3dd92ae301)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3 
4 #include <linux/module.h>
5 #include <linux/printk.h>
6 #include <linux/pci.h>
7 #include <linux/irq.h>
8 #include <net/addrconf.h>
9 #include <rdma/ib_addr.h>
10 #include <rdma/ib_mad.h>
11 
12 #include "ionic_ibdev.h"
13 
14 #define DRIVER_DESCRIPTION "AMD Pensando RoCE HCA driver"
15 #define DEVICE_DESCRIPTION "AMD Pensando RoCE HCA"
16 
17 MODULE_AUTHOR("Allen Hubbe <allen.hubbe@amd.com>");
18 MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
19 MODULE_LICENSE("GPL");
20 MODULE_IMPORT_NS("NET_IONIC");
21 
ionic_query_device(struct ib_device * ibdev,struct ib_device_attr * attr,struct ib_udata * udata)22 static int ionic_query_device(struct ib_device *ibdev,
23 			      struct ib_device_attr *attr,
24 			      struct ib_udata *udata)
25 {
26 	struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
27 	struct net_device *ndev;
28 
29 	ndev = ib_device_get_netdev(ibdev, 1);
30 	addrconf_ifid_eui48((u8 *)&attr->sys_image_guid, ndev);
31 	dev_put(ndev);
32 	attr->max_mr_size = dev->lif_cfg.npts_per_lif * PAGE_SIZE / 2;
33 	attr->page_size_cap = dev->lif_cfg.page_size_supported;
34 
35 	attr->vendor_id = to_pci_dev(dev->lif_cfg.hwdev)->vendor;
36 	attr->vendor_part_id = to_pci_dev(dev->lif_cfg.hwdev)->device;
37 
38 	attr->hw_ver = ionic_lif_asic_rev(dev->lif_cfg.lif);
39 	attr->fw_ver = 0;
40 	attr->max_qp = dev->lif_cfg.qp_count;
41 	attr->max_qp_wr = IONIC_MAX_DEPTH;
42 	attr->device_cap_flags =
43 		IB_DEVICE_MEM_WINDOW |
44 		IB_DEVICE_MEM_MGT_EXTENSIONS |
45 		IB_DEVICE_MEM_WINDOW_TYPE_2B |
46 		0;
47 	attr->max_send_sge =
48 		min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
49 		    IONIC_SPEC_HIGH);
50 	attr->max_recv_sge =
51 		min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
52 		    IONIC_SPEC_HIGH);
53 	attr->max_sge_rd = attr->max_send_sge;
54 	attr->max_cq = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
55 	attr->max_cqe = IONIC_MAX_CQ_DEPTH - IONIC_CQ_GRACE;
56 	attr->max_mr = dev->lif_cfg.nmrs_per_lif;
57 	attr->max_pd = IONIC_MAX_PD;
58 	attr->max_qp_rd_atom = IONIC_MAX_RD_ATOM;
59 	attr->max_ee_rd_atom = 0;
60 	attr->max_res_rd_atom = IONIC_MAX_RD_ATOM;
61 	attr->max_qp_init_rd_atom = IONIC_MAX_RD_ATOM;
62 	attr->max_ee_init_rd_atom = 0;
63 	attr->atomic_cap = IB_ATOMIC_GLOB;
64 	attr->masked_atomic_cap = IB_ATOMIC_GLOB;
65 	attr->max_mw = dev->lif_cfg.nmrs_per_lif;
66 	attr->max_mcast_grp = 0;
67 	attr->max_mcast_qp_attach = 0;
68 	attr->max_ah = dev->lif_cfg.nahs_per_lif;
69 	attr->max_fast_reg_page_list_len = dev->lif_cfg.npts_per_lif / 2;
70 	attr->max_pkeys = IONIC_PKEY_TBL_LEN;
71 
72 	return 0;
73 }
74 
ionic_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * attr)75 static int ionic_query_port(struct ib_device *ibdev, u32 port,
76 			    struct ib_port_attr *attr)
77 {
78 	struct net_device *ndev;
79 
80 	if (port != 1)
81 		return -EINVAL;
82 
83 	ndev = ib_device_get_netdev(ibdev, port);
84 	if (!ndev)
85 		return -ENODEV;
86 
87 	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
88 		attr->state = IB_PORT_ACTIVE;
89 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
90 	} else if (netif_running(ndev)) {
91 		attr->state = IB_PORT_DOWN;
92 		attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
93 	} else {
94 		attr->state = IB_PORT_DOWN;
95 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
96 	}
97 
98 	attr->max_mtu = iboe_get_mtu(ndev->max_mtu);
99 	attr->active_mtu = min(attr->max_mtu, iboe_get_mtu(ndev->mtu));
100 	attr->gid_tbl_len = IONIC_GID_TBL_LEN;
101 	attr->ip_gids = true;
102 	attr->port_cap_flags = 0;
103 	attr->max_msg_sz = 0x80000000;
104 	attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
105 	attr->max_vl_num = 1;
106 	attr->subnet_prefix = 0xfe80000000000000ull;
107 
108 	dev_put(ndev);
109 
110 	return ib_get_eth_speed(ibdev, port,
111 				&attr->active_speed,
112 				&attr->active_width);
113 }
114 
ionic_get_link_layer(struct ib_device * ibdev,u32 port)115 static enum rdma_link_layer ionic_get_link_layer(struct ib_device *ibdev,
116 						 u32 port)
117 {
118 	return IB_LINK_LAYER_ETHERNET;
119 }
120 
ionic_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)121 static int ionic_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
122 			    u16 *pkey)
123 {
124 	if (port != 1)
125 		return -EINVAL;
126 
127 	if (index != 0)
128 		return -EINVAL;
129 
130 	*pkey = IB_DEFAULT_PKEY_FULL;
131 
132 	return 0;
133 }
134 
ionic_modify_device(struct ib_device * ibdev,int mask,struct ib_device_modify * attr)135 static int ionic_modify_device(struct ib_device *ibdev, int mask,
136 			       struct ib_device_modify *attr)
137 {
138 	struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
139 
140 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
141 		return -EOPNOTSUPP;
142 
143 	if (mask & IB_DEVICE_MODIFY_NODE_DESC)
144 		memcpy(dev->ibdev.node_desc, attr->node_desc,
145 		       IB_DEVICE_NODE_DESC_MAX);
146 
147 	return 0;
148 }
149 
ionic_get_port_immutable(struct ib_device * ibdev,u32 port,struct ib_port_immutable * attr)150 static int ionic_get_port_immutable(struct ib_device *ibdev, u32 port,
151 				    struct ib_port_immutable *attr)
152 {
153 	if (port != 1)
154 		return -EINVAL;
155 
156 	attr->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
157 
158 	attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
159 	attr->gid_tbl_len = IONIC_GID_TBL_LEN;
160 	attr->max_mad_size = IB_MGMT_MAD_SIZE;
161 
162 	return 0;
163 }
164 
ionic_get_dev_fw_str(struct ib_device * ibdev,char * str)165 static void ionic_get_dev_fw_str(struct ib_device *ibdev, char *str)
166 {
167 	struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
168 
169 	ionic_lif_fw_version(dev->lif_cfg.lif, str, IB_FW_VERSION_NAME_MAX);
170 }
171 
hw_rev_show(struct device * device,struct device_attribute * attr,char * buf)172 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
173 			   char *buf)
174 {
175 	struct ionic_ibdev *dev =
176 		rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
177 
178 	return sysfs_emit(buf, "0x%x\n", ionic_lif_asic_rev(dev->lif_cfg.lif));
179 }
180 static DEVICE_ATTR_RO(hw_rev);
181 
hca_type_show(struct device * device,struct device_attribute * attr,char * buf)182 static ssize_t hca_type_show(struct device *device,
183 			     struct device_attribute *attr, char *buf)
184 {
185 	struct ionic_ibdev *dev =
186 		rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
187 
188 	return sysfs_emit(buf, "%s\n", dev->ibdev.node_desc);
189 }
190 static DEVICE_ATTR_RO(hca_type);
191 
192 static struct attribute *ionic_rdma_attributes[] = {
193 	&dev_attr_hw_rev.attr,
194 	&dev_attr_hca_type.attr,
195 	NULL
196 };
197 
198 static const struct attribute_group ionic_rdma_attr_group = {
199 	.attrs = ionic_rdma_attributes,
200 };
201 
ionic_disassociate_ucontext(struct ib_ucontext * ibcontext)202 static void ionic_disassociate_ucontext(struct ib_ucontext *ibcontext)
203 {
204 	/*
205 	 * Dummy define disassociate_ucontext so that it does not
206 	 * wait for user context before cleaning up hw resources.
207 	 */
208 }
209 
210 static const struct ib_device_ops ionic_dev_ops = {
211 	.owner = THIS_MODULE,
212 	.driver_id = RDMA_DRIVER_IONIC,
213 	.uverbs_abi_ver = IONIC_ABI_VERSION,
214 
215 	.alloc_ucontext = ionic_alloc_ucontext,
216 	.dealloc_ucontext = ionic_dealloc_ucontext,
217 	.mmap = ionic_mmap,
218 	.mmap_free = ionic_mmap_free,
219 	.alloc_pd = ionic_alloc_pd,
220 	.dealloc_pd = ionic_dealloc_pd,
221 	.create_ah = ionic_create_ah,
222 	.query_ah = ionic_query_ah,
223 	.destroy_ah = ionic_destroy_ah,
224 	.create_user_ah = ionic_create_ah,
225 	.get_dma_mr = ionic_get_dma_mr,
226 	.reg_user_mr = ionic_reg_user_mr,
227 	.reg_user_mr_dmabuf = ionic_reg_user_mr_dmabuf,
228 	.dereg_mr = ionic_dereg_mr,
229 	.alloc_mr = ionic_alloc_mr,
230 	.map_mr_sg = ionic_map_mr_sg,
231 	.alloc_mw = ionic_alloc_mw,
232 	.dealloc_mw = ionic_dealloc_mw,
233 	.create_cq = ionic_create_cq,
234 	.destroy_cq = ionic_destroy_cq,
235 	.create_qp = ionic_create_qp,
236 	.modify_qp = ionic_modify_qp,
237 	.query_qp = ionic_query_qp,
238 	.destroy_qp = ionic_destroy_qp,
239 
240 	.post_send = ionic_post_send,
241 	.post_recv = ionic_post_recv,
242 	.poll_cq = ionic_poll_cq,
243 	.req_notify_cq = ionic_req_notify_cq,
244 
245 	.query_device = ionic_query_device,
246 	.query_port = ionic_query_port,
247 	.get_link_layer = ionic_get_link_layer,
248 	.query_pkey = ionic_query_pkey,
249 	.modify_device = ionic_modify_device,
250 	.get_port_immutable = ionic_get_port_immutable,
251 	.get_dev_fw_str = ionic_get_dev_fw_str,
252 	.device_group = &ionic_rdma_attr_group,
253 	.disassociate_ucontext = ionic_disassociate_ucontext,
254 
255 	INIT_RDMA_OBJ_SIZE(ib_ucontext, ionic_ctx, ibctx),
256 	INIT_RDMA_OBJ_SIZE(ib_pd, ionic_pd, ibpd),
257 	INIT_RDMA_OBJ_SIZE(ib_ah, ionic_ah, ibah),
258 	INIT_RDMA_OBJ_SIZE(ib_cq, ionic_vcq, ibcq),
259 	INIT_RDMA_OBJ_SIZE(ib_qp, ionic_qp, ibqp),
260 	INIT_RDMA_OBJ_SIZE(ib_mw, ionic_mr, ibmw),
261 };
262 
ionic_init_resids(struct ionic_ibdev * dev)263 static void ionic_init_resids(struct ionic_ibdev *dev)
264 {
265 	ionic_resid_init(&dev->inuse_cqid, dev->lif_cfg.cq_count);
266 	dev->half_cqid_udma_shift =
267 		order_base_2(dev->lif_cfg.cq_count / dev->lif_cfg.udma_count);
268 	ionic_resid_init(&dev->inuse_pdid, IONIC_MAX_PD);
269 	ionic_resid_init(&dev->inuse_ahid, dev->lif_cfg.nahs_per_lif);
270 	ionic_resid_init(&dev->inuse_mrid, dev->lif_cfg.nmrs_per_lif);
271 	/* skip reserved lkey */
272 	dev->next_mrkey = 1;
273 	ionic_resid_init(&dev->inuse_qpid, dev->lif_cfg.qp_count);
274 	/* skip reserved SMI and GSI qpids */
275 	dev->half_qpid_udma_shift =
276 		order_base_2(dev->lif_cfg.qp_count / dev->lif_cfg.udma_count);
277 	ionic_resid_init(&dev->inuse_dbid, dev->lif_cfg.dbid_count);
278 }
279 
ionic_destroy_resids(struct ionic_ibdev * dev)280 static void ionic_destroy_resids(struct ionic_ibdev *dev)
281 {
282 	ionic_resid_destroy(&dev->inuse_cqid);
283 	ionic_resid_destroy(&dev->inuse_pdid);
284 	ionic_resid_destroy(&dev->inuse_ahid);
285 	ionic_resid_destroy(&dev->inuse_mrid);
286 	ionic_resid_destroy(&dev->inuse_qpid);
287 	ionic_resid_destroy(&dev->inuse_dbid);
288 }
289 
ionic_destroy_ibdev(struct ionic_ibdev * dev)290 static void ionic_destroy_ibdev(struct ionic_ibdev *dev)
291 {
292 	ionic_kill_rdma_admin(dev, false);
293 	ib_unregister_device(&dev->ibdev);
294 	ionic_stats_cleanup(dev);
295 	ionic_destroy_rdma_admin(dev);
296 	ionic_destroy_resids(dev);
297 	WARN_ON(!xa_empty(&dev->qp_tbl));
298 	xa_destroy(&dev->qp_tbl);
299 	WARN_ON(!xa_empty(&dev->cq_tbl));
300 	xa_destroy(&dev->cq_tbl);
301 	ib_dealloc_device(&dev->ibdev);
302 }
303 
ionic_create_ibdev(struct ionic_aux_dev * ionic_adev)304 static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
305 {
306 	struct ib_device *ibdev;
307 	struct ionic_ibdev *dev;
308 	struct net_device *ndev;
309 	int rc;
310 
311 	dev = ib_alloc_device(ionic_ibdev, ibdev);
312 	if (!dev)
313 		return ERR_PTR(-EINVAL);
314 
315 	ionic_fill_lif_cfg(ionic_adev->lif, &dev->lif_cfg);
316 
317 	xa_init_flags(&dev->qp_tbl, GFP_ATOMIC);
318 	xa_init_flags(&dev->cq_tbl, GFP_ATOMIC);
319 
320 	ionic_init_resids(dev);
321 
322 	rc = ionic_rdma_reset_devcmd(dev);
323 	if (rc)
324 		goto err_reset;
325 
326 	rc = ionic_create_rdma_admin(dev);
327 	if (rc)
328 		goto err_admin;
329 
330 	ibdev = &dev->ibdev;
331 	ibdev->dev.parent = dev->lif_cfg.hwdev;
332 
333 	strscpy(ibdev->name, "ionic_%d", IB_DEVICE_NAME_MAX);
334 	strscpy(ibdev->node_desc, DEVICE_DESCRIPTION, IB_DEVICE_NODE_DESC_MAX);
335 
336 	ibdev->node_type = RDMA_NODE_IB_CA;
337 	ibdev->phys_port_cnt = 1;
338 
339 	/* the first two eq are reserved for async events */
340 	ibdev->num_comp_vectors = dev->lif_cfg.eq_count - 2;
341 
342 	ndev = ionic_lif_netdev(ionic_adev->lif);
343 	addrconf_ifid_eui48((u8 *)&ibdev->node_guid, ndev);
344 	rc = ib_device_set_netdev(ibdev, ndev, 1);
345 	/* ionic_lif_netdev() returns ndev with refcount held */
346 	dev_put(ndev);
347 	if (rc)
348 		goto err_admin;
349 
350 	ib_set_device_ops(&dev->ibdev, &ionic_dev_ops);
351 
352 	ionic_stats_init(dev);
353 
354 	rc = ib_register_device(ibdev, "ionic_%d", ibdev->dev.parent);
355 	if (rc)
356 		goto err_register;
357 
358 	return dev;
359 
360 err_register:
361 	ionic_stats_cleanup(dev);
362 err_admin:
363 	ionic_kill_rdma_admin(dev, false);
364 	ionic_destroy_rdma_admin(dev);
365 err_reset:
366 	ionic_destroy_resids(dev);
367 	xa_destroy(&dev->qp_tbl);
368 	xa_destroy(&dev->cq_tbl);
369 	ib_dealloc_device(&dev->ibdev);
370 
371 	return ERR_PTR(rc);
372 }
373 
ionic_aux_probe(struct auxiliary_device * adev,const struct auxiliary_device_id * id)374 static int ionic_aux_probe(struct auxiliary_device *adev,
375 			   const struct auxiliary_device_id *id)
376 {
377 	struct ionic_aux_dev *ionic_adev;
378 	struct ionic_ibdev *dev;
379 
380 	ionic_adev = container_of(adev, struct ionic_aux_dev, adev);
381 	dev = ionic_create_ibdev(ionic_adev);
382 	if (IS_ERR(dev))
383 		return dev_err_probe(&adev->dev, PTR_ERR(dev),
384 				     "Failed to register ibdev\n");
385 
386 	auxiliary_set_drvdata(adev, dev);
387 	ibdev_dbg(&dev->ibdev, "registered\n");
388 
389 	return 0;
390 }
391 
ionic_aux_remove(struct auxiliary_device * adev)392 static void ionic_aux_remove(struct auxiliary_device *adev)
393 {
394 	struct ionic_ibdev *dev = auxiliary_get_drvdata(adev);
395 
396 	dev_dbg(&adev->dev, "unregister ibdev\n");
397 	ionic_destroy_ibdev(dev);
398 	dev_dbg(&adev->dev, "unregistered\n");
399 }
400 
401 static const struct auxiliary_device_id ionic_aux_id_table[] = {
402 	{ .name = "ionic.rdma", },
403 	{},
404 };
405 
406 MODULE_DEVICE_TABLE(auxiliary, ionic_aux_id_table);
407 
408 static struct auxiliary_driver ionic_aux_r_driver = {
409 	.name = "rdma",
410 	.probe = ionic_aux_probe,
411 	.remove = ionic_aux_remove,
412 	.id_table = ionic_aux_id_table,
413 };
414 
ionic_mod_init(void)415 static int __init ionic_mod_init(void)
416 {
417 	int rc;
418 
419 	ionic_evt_workq = create_workqueue(KBUILD_MODNAME "-evt");
420 	if (!ionic_evt_workq)
421 		return -ENOMEM;
422 
423 	rc = auxiliary_driver_register(&ionic_aux_r_driver);
424 	if (rc)
425 		goto err_aux;
426 
427 	return 0;
428 
429 err_aux:
430 	destroy_workqueue(ionic_evt_workq);
431 
432 	return rc;
433 }
434 
ionic_mod_exit(void)435 static void __exit ionic_mod_exit(void)
436 {
437 	auxiliary_driver_unregister(&ionic_aux_r_driver);
438 	destroy_workqueue(ionic_evt_workq);
439 }
440 
441 module_init(ionic_mod_init);
442 module_exit(ionic_mod_exit);
443