xref: /linux/drivers/net/ethernet/mellanox/mlx4/intf.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/slab.h>
35 #include <linux/export.h>
36 #include <linux/errno.h>
37 #include <net/devlink.h>
38 
39 #include "mlx4.h"
40 
41 static DEFINE_MUTEX(intf_mutex);
42 static DEFINE_IDA(mlx4_adev_ida);
43 
44 static bool is_eth_supported(struct mlx4_dev *dev)
45 {
46 	for (int port = 1; port <= dev->caps.num_ports; port++)
47 		if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
48 			return true;
49 
50 	return false;
51 }
52 
53 static bool is_ib_supported(struct mlx4_dev *dev)
54 {
55 	for (int port = 1; port <= dev->caps.num_ports; port++)
56 		if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
57 			return true;
58 
59 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
60 		return true;
61 
62 	return false;
63 }
64 
65 static const struct mlx4_adev_device {
66 	const char *suffix;
67 	bool (*is_supported)(struct mlx4_dev *dev);
68 } mlx4_adev_devices[] = {
69 	{ "eth", is_eth_supported },
70 	{ "ib", is_ib_supported },
71 };
72 
73 int mlx4_adev_init(struct mlx4_dev *dev)
74 {
75 	struct mlx4_priv *priv = mlx4_priv(dev);
76 
77 	priv->adev_idx = ida_alloc(&mlx4_adev_ida, GFP_KERNEL);
78 	if (priv->adev_idx < 0)
79 		return priv->adev_idx;
80 
81 	priv->adev = kcalloc(ARRAY_SIZE(mlx4_adev_devices),
82 			     sizeof(struct mlx4_adev *), GFP_KERNEL);
83 	if (!priv->adev) {
84 		ida_free(&mlx4_adev_ida, priv->adev_idx);
85 		return -ENOMEM;
86 	}
87 
88 	return 0;
89 }
90 
91 void mlx4_adev_cleanup(struct mlx4_dev *dev)
92 {
93 	struct mlx4_priv *priv = mlx4_priv(dev);
94 
95 	kfree(priv->adev);
96 	ida_free(&mlx4_adev_ida, priv->adev_idx);
97 }
98 
99 static void adev_release(struct device *dev)
100 {
101 	struct mlx4_adev *mlx4_adev =
102 		container_of(dev, struct mlx4_adev, adev.dev);
103 	struct mlx4_priv *priv = mlx4_priv(mlx4_adev->mdev);
104 	int idx = mlx4_adev->idx;
105 
106 	kfree(mlx4_adev);
107 	priv->adev[idx] = NULL;
108 }
109 
110 static struct mlx4_adev *add_adev(struct mlx4_dev *dev, int idx)
111 {
112 	struct mlx4_priv *priv = mlx4_priv(dev);
113 	const char *suffix = mlx4_adev_devices[idx].suffix;
114 	struct auxiliary_device *adev;
115 	struct mlx4_adev *madev;
116 	int ret;
117 
118 	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
119 	if (!madev)
120 		return ERR_PTR(-ENOMEM);
121 
122 	adev = &madev->adev;
123 	adev->id = priv->adev_idx;
124 	adev->name = suffix;
125 	adev->dev.parent = &dev->persist->pdev->dev;
126 	adev->dev.release = adev_release;
127 	madev->mdev = dev;
128 	madev->idx = idx;
129 
130 	ret = auxiliary_device_init(adev);
131 	if (ret) {
132 		kfree(madev);
133 		return ERR_PTR(ret);
134 	}
135 
136 	ret = auxiliary_device_add(adev);
137 	if (ret) {
138 		auxiliary_device_uninit(adev);
139 		return ERR_PTR(ret);
140 	}
141 	return madev;
142 }
143 
144 static void del_adev(struct auxiliary_device *adev)
145 {
146 	auxiliary_device_delete(adev);
147 	auxiliary_device_uninit(adev);
148 }
149 
150 int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv)
151 {
152 	return auxiliary_driver_register(&madrv->adrv);
153 }
154 EXPORT_SYMBOL_GPL(mlx4_register_auxiliary_driver);
155 
156 void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv)
157 {
158 	auxiliary_driver_unregister(&madrv->adrv);
159 }
160 EXPORT_SYMBOL_GPL(mlx4_unregister_auxiliary_driver);
161 
162 int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
163 {
164 	struct mlx4_priv *priv = mlx4_priv(dev);
165 	int i, ret;
166 
167 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
168 		return -EOPNOTSUPP;
169 
170 	ret = mlx4_disable_rx_port_check(dev, enable);
171 	if (ret) {
172 		mlx4_err(dev, "Fail to %s rx port check\n",
173 			 enable ? "enable" : "disable");
174 		return ret;
175 	}
176 	if (enable) {
177 		dev->flags |= MLX4_FLAG_BONDED;
178 	} else {
179 		ret = mlx4_virt2phy_port_map(dev, 1, 2);
180 		if (ret) {
181 			mlx4_err(dev, "Fail to reset port map\n");
182 			return ret;
183 		}
184 		dev->flags &= ~MLX4_FLAG_BONDED;
185 	}
186 
187 	mutex_lock(&intf_mutex);
188 
189 	for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) {
190 		struct mlx4_adev *madev = priv->adev[i];
191 		struct mlx4_adrv *madrv;
192 		enum mlx4_protocol protocol;
193 
194 		if (!madev)
195 			continue;
196 
197 		device_lock(&madev->adev.dev);
198 		if (!madev->adev.dev.driver) {
199 			device_unlock(&madev->adev.dev);
200 			continue;
201 		}
202 
203 		madrv = container_of(madev->adev.dev.driver, struct mlx4_adrv,
204 				     adrv.driver);
205 		if (!(madrv->flags & MLX4_INTFF_BONDING)) {
206 			device_unlock(&madev->adev.dev);
207 			continue;
208 		}
209 
210 		if (mlx4_is_mfunc(dev)) {
211 			mlx4_dbg(dev,
212 				 "SRIOV, disabled HA mode for intf proto %d\n",
213 				 madrv->protocol);
214 			device_unlock(&madev->adev.dev);
215 			continue;
216 		}
217 
218 		protocol = madrv->protocol;
219 		device_unlock(&madev->adev.dev);
220 
221 		del_adev(&madev->adev);
222 		priv->adev[i] = add_adev(dev, i);
223 		if (IS_ERR(priv->adev[i])) {
224 			mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i,
225 				  mlx4_adev_devices[i].suffix);
226 			priv->adev[i] = NULL;
227 			continue;
228 		}
229 
230 		mlx4_dbg(dev,
231 			 "Interface for protocol %d restarted with bonded mode %s\n",
232 			 protocol, enable ? "enabled" : "disabled");
233 	}
234 
235 	mutex_unlock(&intf_mutex);
236 
237 	return 0;
238 }
239 
240 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
241 			 void *param)
242 {
243 	struct mlx4_priv *priv = mlx4_priv(dev);
244 
245 	atomic_notifier_call_chain(&priv->event_nh, type, param);
246 }
247 
248 int mlx4_register_event_notifier(struct mlx4_dev *dev,
249 				 struct notifier_block *nb)
250 {
251 	struct mlx4_priv *priv = mlx4_priv(dev);
252 
253 	return atomic_notifier_chain_register(&priv->event_nh, nb);
254 }
255 EXPORT_SYMBOL(mlx4_register_event_notifier);
256 
257 int mlx4_unregister_event_notifier(struct mlx4_dev *dev,
258 				   struct notifier_block *nb)
259 {
260 	struct mlx4_priv *priv = mlx4_priv(dev);
261 
262 	return atomic_notifier_chain_unregister(&priv->event_nh, nb);
263 }
264 EXPORT_SYMBOL(mlx4_unregister_event_notifier);
265 
266 static int add_drivers(struct mlx4_dev *dev)
267 {
268 	struct mlx4_priv *priv = mlx4_priv(dev);
269 	int i, ret = 0;
270 
271 	for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) {
272 		bool is_supported = false;
273 
274 		if (priv->adev[i])
275 			continue;
276 
277 		if (mlx4_adev_devices[i].is_supported)
278 			is_supported = mlx4_adev_devices[i].is_supported(dev);
279 
280 		if (!is_supported)
281 			continue;
282 
283 		priv->adev[i] = add_adev(dev, i);
284 		if (IS_ERR(priv->adev[i])) {
285 			mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i,
286 				  mlx4_adev_devices[i].suffix);
287 			/* We continue to rescan drivers and leave to the caller
288 			 * to make decision if to release everything or
289 			 * continue. */
290 			ret = PTR_ERR(priv->adev[i]);
291 			priv->adev[i] = NULL;
292 		}
293 	}
294 	return ret;
295 }
296 
297 static void delete_drivers(struct mlx4_dev *dev)
298 {
299 	struct mlx4_priv *priv = mlx4_priv(dev);
300 	bool delete_all;
301 	int i;
302 
303 	delete_all = !(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP);
304 
305 	for (i = ARRAY_SIZE(mlx4_adev_devices) - 1; i >= 0; i--) {
306 		bool is_supported = false;
307 
308 		if (!priv->adev[i])
309 			continue;
310 
311 		if (mlx4_adev_devices[i].is_supported && !delete_all)
312 			is_supported = mlx4_adev_devices[i].is_supported(dev);
313 
314 		if (is_supported)
315 			continue;
316 
317 		del_adev(&priv->adev[i]->adev);
318 		priv->adev[i] = NULL;
319 	}
320 }
321 
322 /* This function is used after mlx4_dev is reconfigured.
323  */
324 static int rescan_drivers_locked(struct mlx4_dev *dev)
325 {
326 	lockdep_assert_held(&intf_mutex);
327 
328 	delete_drivers(dev);
329 	if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
330 		return 0;
331 
332 	return add_drivers(dev);
333 }
334 
335 int mlx4_register_device(struct mlx4_dev *dev)
336 {
337 	int ret;
338 
339 	mutex_lock(&intf_mutex);
340 
341 	dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
342 
343 	ret = rescan_drivers_locked(dev);
344 
345 	mutex_unlock(&intf_mutex);
346 
347 	if (ret) {
348 		mlx4_unregister_device(dev);
349 		return ret;
350 	}
351 
352 	mlx4_start_catas_poll(dev);
353 
354 	return ret;
355 }
356 
357 void mlx4_unregister_device(struct mlx4_dev *dev)
358 {
359 	if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
360 		return;
361 
362 	mlx4_stop_catas_poll(dev);
363 	if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
364 	    mlx4_is_slave(dev)) {
365 		/* In mlx4_remove_one on a VF */
366 		u32 slave_read =
367 			swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
368 
369 		if (mlx4_comm_internal_err(slave_read)) {
370 			mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
371 				 __func__);
372 			mlx4_enter_error_state(dev->persist);
373 		}
374 	}
375 	mutex_lock(&intf_mutex);
376 
377 	dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
378 
379 	rescan_drivers_locked(dev);
380 
381 	mutex_unlock(&intf_mutex);
382 }
383 
384 struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port)
385 {
386 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
387 
388 	return &info->devlink_port;
389 }
390 EXPORT_SYMBOL_GPL(mlx4_get_devlink_port);
391