xref: /linux/drivers/infiniband/hw/mana/device.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 #include <net/mana/mana_auxiliary.h>
8 #include <net/addrconf.h>
9 
10 MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver");
11 MODULE_LICENSE("GPL");
12 MODULE_IMPORT_NS("NET_MANA");
13 
14 static const struct ib_device_ops mana_ib_dev_ops = {
15 	.owner = THIS_MODULE,
16 	.driver_id = RDMA_DRIVER_MANA,
17 	.uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
18 
19 	.add_gid = mana_ib_gd_add_gid,
20 	.alloc_pd = mana_ib_alloc_pd,
21 	.alloc_ucontext = mana_ib_alloc_ucontext,
22 	.create_ah = mana_ib_create_ah,
23 	.create_cq = mana_ib_create_cq,
24 	.create_qp = mana_ib_create_qp,
25 	.create_rwq_ind_table = mana_ib_create_rwq_ind_table,
26 	.create_wq = mana_ib_create_wq,
27 	.dealloc_pd = mana_ib_dealloc_pd,
28 	.dealloc_ucontext = mana_ib_dealloc_ucontext,
29 	.del_gid = mana_ib_gd_del_gid,
30 	.dereg_mr = mana_ib_dereg_mr,
31 	.destroy_ah = mana_ib_destroy_ah,
32 	.destroy_cq = mana_ib_destroy_cq,
33 	.destroy_qp = mana_ib_destroy_qp,
34 	.destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table,
35 	.destroy_wq = mana_ib_destroy_wq,
36 	.disassociate_ucontext = mana_ib_disassociate_ucontext,
37 	.get_dma_mr = mana_ib_get_dma_mr,
38 	.get_link_layer = mana_ib_get_link_layer,
39 	.get_port_immutable = mana_ib_get_port_immutable,
40 	.mmap = mana_ib_mmap,
41 	.modify_qp = mana_ib_modify_qp,
42 	.modify_wq = mana_ib_modify_wq,
43 	.poll_cq = mana_ib_poll_cq,
44 	.post_recv = mana_ib_post_recv,
45 	.post_send = mana_ib_post_send,
46 	.query_device = mana_ib_query_device,
47 	.query_gid = mana_ib_query_gid,
48 	.query_pkey = mana_ib_query_pkey,
49 	.query_port = mana_ib_query_port,
50 	.reg_user_mr = mana_ib_reg_user_mr,
51 	.reg_user_mr_dmabuf = mana_ib_reg_user_mr_dmabuf,
52 	.req_notify_cq = mana_ib_arm_cq,
53 
54 	INIT_RDMA_OBJ_SIZE(ib_ah, mana_ib_ah, ibah),
55 	INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq),
56 	INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd),
57 	INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp),
58 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mana_ib_ucontext, ibucontext),
59 	INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mana_ib_rwq_ind_table,
60 			   ib_ind_table),
61 };
62 
63 static const struct ib_device_ops mana_ib_stats_ops = {
64 	.alloc_hw_port_stats = mana_ib_alloc_hw_port_stats,
65 	.get_hw_stats = mana_ib_get_hw_stats,
66 };
67 
68 static const struct ib_device_ops mana_ib_device_stats_ops = {
69 	.alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
70 };
71 
72 const struct ib_device_ops mana_ib_dev_dm_ops = {
73 	.alloc_dm = mana_ib_alloc_dm,
74 	.dealloc_dm = mana_ib_dealloc_dm,
75 	.reg_dm_mr = mana_ib_reg_dm_mr,
76 };
77 
78 static int mana_ib_netdev_event(struct notifier_block *this,
79 				unsigned long event, void *ptr)
80 {
81 	struct mana_ib_dev *dev = container_of(this, struct mana_ib_dev, nb);
82 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
83 	struct gdma_context *gc = dev->gdma_dev->gdma_context;
84 	struct mana_context *mc = gc->mana.driver_data;
85 	struct net_device *ndev;
86 	int i;
87 
88 	/* Only process events from our parent device */
89 	for (i = 0; i < dev->ib_dev.phys_port_cnt; i++)
90 		if (event_dev == mc->ports[i]) {
91 			switch (event) {
92 			case NETDEV_CHANGEUPPER:
93 				ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
94 				/*
95 				 * RDMA core will setup GID based on updated netdev.
96 				 * It's not possible to race with the core as rtnl lock is being
97 				 * held.
98 				 */
99 				ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
100 
101 				/* mana_get_primary_netdev() returns ndev with refcount held */
102 				if (ndev)
103 					netdev_put(ndev, &dev->dev_tracker);
104 
105 				return NOTIFY_OK;
106 			default:
107 				return NOTIFY_DONE;
108 			}
109 		}
110 	return NOTIFY_DONE;
111 }
112 
113 static int mana_ib_probe(struct auxiliary_device *adev,
114 			 const struct auxiliary_device_id *id)
115 {
116 	struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
117 	struct gdma_context *gc = madev->mdev->gdma_context;
118 	struct mana_context *mc = gc->mana.driver_data;
119 	struct gdma_dev *mdev = madev->mdev;
120 	struct net_device *ndev;
121 	struct mana_ib_dev *dev;
122 	u8 mac_addr[ETH_ALEN];
123 	int ret, i;
124 
125 	dev = ib_alloc_device(mana_ib_dev, ib_dev);
126 	if (!dev)
127 		return -ENOMEM;
128 
129 	ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
130 	dev->ib_dev.node_type = RDMA_NODE_IB_CA;
131 	dev->ib_dev.num_comp_vectors = gc->max_num_queues;
132 	dev->ib_dev.dev.parent = gc->dev;
133 	dev->gdma_dev = mdev;
134 	xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ);
135 
136 	if (mana_ib_is_rnic(dev)) {
137 		dev->ib_dev.phys_port_cnt = 1;
138 		addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, mc->ports[0]->dev_addr);
139 		ret = mana_ib_gd_query_adapter_caps(dev);
140 		if (ret) {
141 			ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
142 			goto free_ib_device;
143 		}
144 
145 		ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
146 		if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
147 			ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);
148 		ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_dm_ops);
149 
150 		ret = mana_ib_create_eqs(dev);
151 		if (ret) {
152 			ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
153 			goto free_ib_device;
154 		}
155 
156 		ret = mana_ib_gd_create_rnic_adapter(dev);
157 		if (ret)
158 			goto destroy_eqs;
159 
160 		if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_MULTI_PORTS_SUPPORT)
161 			dev->ib_dev.phys_port_cnt = mc->num_ports;
162 
163 		for (i = 0; i < dev->ib_dev.phys_port_cnt; i++) {
164 			ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
165 			if (!ndev) {
166 				ret = -ENODEV;
167 				ibdev_err(&dev->ib_dev,
168 					  "Failed to get netdev for IB port %d", i + 1);
169 				goto destroy_rnic;
170 			}
171 			ether_addr_copy(mac_addr, ndev->dev_addr);
172 			ret = ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
173 			/* mana_get_primary_netdev() returns ndev with refcount held */
174 			netdev_put(ndev, &dev->dev_tracker);
175 			if (ret) {
176 				ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
177 				goto destroy_rnic;
178 			}
179 			ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
180 			if (ret) {
181 				ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
182 				goto destroy_rnic;
183 			}
184 		}
185 		dev->nb.notifier_call = mana_ib_netdev_event;
186 		ret = register_netdevice_notifier(&dev->nb);
187 		if (ret) {
188 			ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d", ret);
189 			goto destroy_rnic;
190 		}
191 	} else {
192 		dev->ib_dev.phys_port_cnt = mc->num_ports;
193 		ret = mana_eth_query_adapter_caps(dev);
194 		if (ret) {
195 			ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret);
196 			goto free_ib_device;
197 		}
198 	}
199 
200 	dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE,
201 				       MANA_AV_BUFFER_SIZE, 0);
202 	if (!dev->av_pool) {
203 		ret = -ENOMEM;
204 		goto deregister_net_notifier;
205 	}
206 
207 	ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
208 		  mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
209 
210 	ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d",
211 				 gc->dev);
212 	if (ret)
213 		goto deallocate_pool;
214 
215 	dev_set_drvdata(&adev->dev, dev);
216 
217 	return 0;
218 
219 deallocate_pool:
220 	dma_pool_destroy(dev->av_pool);
221 deregister_net_notifier:
222 	if (mana_ib_is_rnic(dev))
223 		unregister_netdevice_notifier(&dev->nb);
224 destroy_rnic:
225 	if (mana_ib_is_rnic(dev))
226 		mana_ib_gd_destroy_rnic_adapter(dev);
227 destroy_eqs:
228 	if (mana_ib_is_rnic(dev))
229 		mana_ib_destroy_eqs(dev);
230 free_ib_device:
231 	xa_destroy(&dev->qp_table_wq);
232 	ib_dealloc_device(&dev->ib_dev);
233 	return ret;
234 }
235 
236 static void mana_ib_remove(struct auxiliary_device *adev)
237 {
238 	struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
239 
240 	if (mana_ib_is_rnic(dev))
241 		mana_drain_gsi_sqs(dev);
242 
243 	ib_unregister_device(&dev->ib_dev);
244 	dma_pool_destroy(dev->av_pool);
245 	if (mana_ib_is_rnic(dev)) {
246 		unregister_netdevice_notifier(&dev->nb);
247 		mana_ib_gd_destroy_rnic_adapter(dev);
248 		mana_ib_destroy_eqs(dev);
249 	}
250 	xa_destroy(&dev->qp_table_wq);
251 	ib_dealloc_device(&dev->ib_dev);
252 }
253 
254 static const struct auxiliary_device_id mana_id_table[] = {
255 	{ .name = "mana.rdma", },
256 	{ .name = "mana.eth", },
257 	{},
258 };
259 
260 MODULE_DEVICE_TABLE(auxiliary, mana_id_table);
261 
262 static struct auxiliary_driver mana_driver = {
263 	.probe = mana_ib_probe,
264 	.remove = mana_ib_remove,
265 	.id_table = mana_id_table,
266 };
267 
268 module_auxiliary_driver(mana_driver);
269