xref: /linux/drivers/infiniband/hw/mlx5/main.c (revision c0368933dd3d4a8210a07a0c95c471421fbf7523)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4  * Copyright (c) 2020, Intel Corporation. All rights reserved.
5  */
6 
7 #include <linux/debugfs.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/pci.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/bitmap.h>
16 #include <linux/log2.h>
17 #include <linux/sched.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task.h>
20 #include <linux/delay.h>
21 #include <rdma/ib_user_verbs.h>
22 #include <rdma/ib_addr.h>
23 #include <rdma/ib_cache.h>
24 #include <linux/mlx5/port.h>
25 #include <linux/mlx5/vport.h>
26 #include <linux/mlx5/fs.h>
27 #include <linux/mlx5/eswitch.h>
28 #include <linux/mlx5/driver.h>
29 #include <linux/mlx5/lag.h>
30 #include <linux/list.h>
31 #include <rdma/ib_smi.h>
32 #include <rdma/ib_umem_odp.h>
33 #include <rdma/lag.h>
34 #include <linux/in.h>
35 #include <linux/etherdevice.h>
36 #include "mlx5_ib.h"
37 #include "ib_rep.h"
38 #include "cmd.h"
39 #include "devx.h"
40 #include "dm.h"
41 #include "fs.h"
42 #include "srq.h"
43 #include "qp.h"
44 #include "wr.h"
45 #include "restrack.h"
46 #include "counters.h"
47 #include "umr.h"
48 #include <rdma/uverbs_std_types.h>
49 #include <rdma/uverbs_ioctl.h>
50 #include <rdma/mlx5_user_ioctl_verbs.h>
51 #include <rdma/mlx5_user_ioctl_cmds.h>
52 #include <rdma/ib_ucaps.h>
53 #include "macsec.h"
54 #include "data_direct.h"
55 #include "dmah.h"
56 
57 #define UVERBS_MODULE_NAME mlx5_ib
58 #include <rdma/uverbs_named_ioctl.h>
59 
60 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
61 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63 
64 struct mlx5_ib_event_work {
65 	struct work_struct	work;
66 	union {
67 		struct mlx5_ib_dev	      *dev;
68 		struct mlx5_ib_multiport_info *mpi;
69 	};
70 	bool			is_slave;
71 	unsigned int		event;
72 	void			*param;
73 };
74 
75 enum {
76 	MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
77 };
78 
79 static struct workqueue_struct *mlx5_ib_event_wq;
80 static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
81 static LIST_HEAD(mlx5_ib_dev_list);
82 /*
83  * This mutex should be held when accessing either of the above lists
84  */
85 static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
86 
87 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
88 {
89 	struct mlx5_ib_dev *dev;
90 
91 	mutex_lock(&mlx5_ib_multiport_mutex);
92 	dev = mpi->ibdev;
93 	mutex_unlock(&mlx5_ib_multiport_mutex);
94 	return dev;
95 }
96 
97 static enum rdma_link_layer
98 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
99 {
100 	switch (port_type_cap) {
101 	case MLX5_CAP_PORT_TYPE_IB:
102 		return IB_LINK_LAYER_INFINIBAND;
103 	case MLX5_CAP_PORT_TYPE_ETH:
104 		return IB_LINK_LAYER_ETHERNET;
105 	default:
106 		return IB_LINK_LAYER_UNSPECIFIED;
107 	}
108 }
109 
110 static enum rdma_link_layer
111 mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
112 {
113 	struct mlx5_ib_dev *dev = to_mdev(device);
114 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
115 
116 	return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
117 }
118 
119 static int get_port_state(struct ib_device *ibdev,
120 			  u32 port_num,
121 			  enum ib_port_state *state)
122 {
123 	struct ib_port_attr attr;
124 	int ret;
125 
126 	memset(&attr, 0, sizeof(attr));
127 	ret = ibdev->ops.query_port(ibdev, port_num, &attr);
128 	if (!ret)
129 		*state = attr.state;
130 	return ret;
131 }
132 
133 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
134 					   struct net_device *ndev,
135 					   struct net_device *upper,
136 					   u32 *port_num)
137 {
138 	struct net_device *rep_ndev;
139 	struct mlx5_ib_port *port;
140 	int i;
141 
142 	for (i = 0; i < dev->num_ports; i++) {
143 		port  = &dev->port[i];
144 		if (!port->rep)
145 			continue;
146 
147 		if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) {
148 			*port_num = i + 1;
149 			return &port->roce;
150 		}
151 
152 		if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
153 			continue;
154 		rep_ndev = ib_device_get_netdev(&dev->ib_dev, i + 1);
155 		if (rep_ndev && rep_ndev == ndev) {
156 			dev_put(rep_ndev);
157 			*port_num = i + 1;
158 			return &port->roce;
159 		}
160 
161 		dev_put(rep_ndev);
162 	}
163 
164 	return NULL;
165 }
166 
167 static bool mlx5_netdev_send_event(struct mlx5_ib_dev *dev,
168 				   struct net_device *ndev,
169 				   struct net_device *upper,
170 				   struct net_device *ib_ndev)
171 {
172 	if (!dev->ib_active)
173 		return false;
174 
175 	/* Event is about our upper device */
176 	if (upper == ndev)
177 		return true;
178 
179 	/* RDMA device is not in lag and not in switchdev */
180 	if (!dev->is_rep && !upper && ndev == ib_ndev)
181 		return true;
182 
183 	/* RDMA devie is in switchdev */
184 	if (dev->is_rep && ndev == ib_ndev)
185 		return true;
186 
187 	return false;
188 }
189 
190 static struct net_device *mlx5_ib_get_rep_uplink_netdev(struct mlx5_ib_dev *ibdev)
191 {
192 	struct mlx5_ib_port *port;
193 	int i;
194 
195 	for (i = 0; i < ibdev->num_ports; i++) {
196 		port = &ibdev->port[i];
197 		if (port->rep && port->rep->vport == MLX5_VPORT_UPLINK) {
198 			return ib_device_get_netdev(&ibdev->ib_dev, i + 1);
199 		}
200 	}
201 
202 	return NULL;
203 }
204 
205 static int mlx5_netdev_event(struct notifier_block *this,
206 			     unsigned long event, void *ptr)
207 {
208 	struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
209 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
210 	u32 port_num = roce->native_port_num;
211 	struct net_device *ib_ndev = NULL;
212 	struct mlx5_core_dev *mdev;
213 	struct mlx5_ib_dev *ibdev;
214 
215 	ibdev = roce->dev;
216 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
217 	if (!mdev)
218 		return NOTIFY_DONE;
219 
220 	switch (event) {
221 	case NETDEV_REGISTER:
222 		/* Should already be registered during the load */
223 		if (ibdev->is_rep)
224 			break;
225 
226 		ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
227 		/* Exit if already registered */
228 		if (ib_ndev)
229 			goto put_ndev;
230 
231 		if (ndev->dev.parent == mdev->device)
232 			ib_device_set_netdev(&ibdev->ib_dev, ndev, port_num);
233 		break;
234 
235 	case NETDEV_UNREGISTER:
236 		/* In case of reps, ib device goes away before the netdevs */
237 		if (ibdev->is_rep)
238 			break;
239 		ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
240 		if (ib_ndev == ndev)
241 			ib_device_set_netdev(&ibdev->ib_dev, NULL, port_num);
242 		goto put_ndev;
243 
244 	case NETDEV_CHANGE:
245 	case NETDEV_UP:
246 	case NETDEV_DOWN: {
247 		struct net_device *upper = NULL;
248 
249 		if (!netif_is_lag_master(ndev) && !netif_is_lag_port(ndev) &&
250 		    !mlx5_core_mp_enabled(mdev))
251 			return NOTIFY_DONE;
252 
253 		if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
254 			struct net_device *lag_ndev;
255 
256 			if(mlx5_lag_is_roce(mdev))
257 				lag_ndev = ib_device_get_netdev(&ibdev->ib_dev, 1);
258 			else /* sriov lag */
259 				lag_ndev = mlx5_ib_get_rep_uplink_netdev(ibdev);
260 
261 			if (lag_ndev) {
262 				upper = netdev_master_upper_dev_get(lag_ndev);
263 				dev_put(lag_ndev);
264 			} else {
265 				goto done;
266 			}
267 		}
268 
269 		if (ibdev->is_rep)
270 			roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
271 		if (!roce)
272 			return NOTIFY_DONE;
273 
274 		ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
275 
276 		if (mlx5_netdev_send_event(ibdev, ndev, upper, ib_ndev)) {
277 			struct ib_event ibev = { };
278 			enum ib_port_state port_state;
279 
280 			if (get_port_state(&ibdev->ib_dev, port_num,
281 					   &port_state))
282 				goto put_ndev;
283 
284 			if (roce->last_port_state == port_state)
285 				goto put_ndev;
286 
287 			roce->last_port_state = port_state;
288 			ibev.device = &ibdev->ib_dev;
289 			if (port_state == IB_PORT_DOWN)
290 				ibev.event = IB_EVENT_PORT_ERR;
291 			else if (port_state == IB_PORT_ACTIVE)
292 				ibev.event = IB_EVENT_PORT_ACTIVE;
293 			else
294 				goto put_ndev;
295 
296 			ibev.element.port_num = port_num;
297 			ib_dispatch_event(&ibev);
298 		}
299 		break;
300 	}
301 
302 	default:
303 		break;
304 	}
305 put_ndev:
306 	dev_put(ib_ndev);
307 done:
308 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
309 	return NOTIFY_DONE;
310 }
311 
312 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
313 						   u32 ib_port_num,
314 						   u32 *native_port_num)
315 {
316 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
317 							  ib_port_num);
318 	struct mlx5_core_dev *mdev = NULL;
319 	struct mlx5_ib_multiport_info *mpi;
320 	struct mlx5_ib_port *port;
321 
322 	if (ibdev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
323 		if (native_port_num)
324 			*native_port_num = smi_to_native_portnum(ibdev,
325 								 ib_port_num);
326 		return ibdev->mdev;
327 
328 	}
329 
330 	if (!mlx5_core_mp_enabled(ibdev->mdev) ||
331 	    ll != IB_LINK_LAYER_ETHERNET) {
332 		if (native_port_num)
333 			*native_port_num = ib_port_num;
334 		return ibdev->mdev;
335 	}
336 
337 	if (native_port_num)
338 		*native_port_num = 1;
339 
340 	port = &ibdev->port[ib_port_num - 1];
341 	spin_lock(&port->mp.mpi_lock);
342 	mpi = ibdev->port[ib_port_num - 1].mp.mpi;
343 	if (mpi && !mpi->unaffiliate) {
344 		mdev = mpi->mdev;
345 		/* If it's the master no need to refcount, it'll exist
346 		 * as long as the ib_dev exists.
347 		 */
348 		if (!mpi->is_master)
349 			mpi->mdev_refcnt++;
350 	}
351 	spin_unlock(&port->mp.mpi_lock);
352 
353 	return mdev;
354 }
355 
356 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
357 {
358 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
359 							  port_num);
360 	struct mlx5_ib_multiport_info *mpi;
361 	struct mlx5_ib_port *port;
362 
363 	if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
364 		return;
365 
366 	port = &ibdev->port[port_num - 1];
367 
368 	spin_lock(&port->mp.mpi_lock);
369 	mpi = ibdev->port[port_num - 1].mp.mpi;
370 	if (mpi->is_master)
371 		goto out;
372 
373 	mpi->mdev_refcnt--;
374 	if (mpi->unaffiliate)
375 		complete(&mpi->unref_comp);
376 out:
377 	spin_unlock(&port->mp.mpi_lock);
378 }
379 
380 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper,
381 					   u16 *active_speed, u8 *active_width)
382 {
383 	switch (eth_proto_oper) {
384 	case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
385 	case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
386 	case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
387 	case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
388 		*active_width = IB_WIDTH_1X;
389 		*active_speed = IB_SPEED_SDR;
390 		break;
391 	case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
392 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
393 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
394 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
395 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
396 	case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
397 	case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
398 		*active_width = IB_WIDTH_1X;
399 		*active_speed = IB_SPEED_QDR;
400 		break;
401 	case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
402 	case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
403 	case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
404 		*active_width = IB_WIDTH_1X;
405 		*active_speed = IB_SPEED_EDR;
406 		break;
407 	case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
408 	case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
409 	case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
410 	case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
411 		*active_width = IB_WIDTH_4X;
412 		*active_speed = IB_SPEED_QDR;
413 		break;
414 	case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
415 	case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
416 	case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
417 		*active_width = IB_WIDTH_1X;
418 		*active_speed = IB_SPEED_HDR;
419 		break;
420 	case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
421 		*active_width = IB_WIDTH_4X;
422 		*active_speed = IB_SPEED_FDR;
423 		break;
424 	case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
425 	case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
426 	case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
427 	case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
428 		*active_width = IB_WIDTH_4X;
429 		*active_speed = IB_SPEED_EDR;
430 		break;
431 	default:
432 		return -EINVAL;
433 	}
434 
435 	return 0;
436 }
437 
438 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
439 					u8 *active_width)
440 {
441 	switch (eth_proto_oper) {
442 	case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
443 	case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
444 		*active_width = IB_WIDTH_1X;
445 		*active_speed = IB_SPEED_SDR;
446 		break;
447 	case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
448 		*active_width = IB_WIDTH_1X;
449 		*active_speed = IB_SPEED_DDR;
450 		break;
451 	case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
452 		*active_width = IB_WIDTH_1X;
453 		*active_speed = IB_SPEED_QDR;
454 		break;
455 	case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
456 		*active_width = IB_WIDTH_4X;
457 		*active_speed = IB_SPEED_QDR;
458 		break;
459 	case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
460 		*active_width = IB_WIDTH_1X;
461 		*active_speed = IB_SPEED_EDR;
462 		break;
463 	case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
464 		*active_width = IB_WIDTH_2X;
465 		*active_speed = IB_SPEED_EDR;
466 		break;
467 	case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
468 		*active_width = IB_WIDTH_1X;
469 		*active_speed = IB_SPEED_HDR;
470 		break;
471 	case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
472 		*active_width = IB_WIDTH_4X;
473 		*active_speed = IB_SPEED_EDR;
474 		break;
475 	case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
476 		*active_width = IB_WIDTH_2X;
477 		*active_speed = IB_SPEED_HDR;
478 		break;
479 	case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR):
480 		*active_width = IB_WIDTH_1X;
481 		*active_speed = IB_SPEED_NDR;
482 		break;
483 	case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
484 		*active_width = IB_WIDTH_4X;
485 		*active_speed = IB_SPEED_HDR;
486 		break;
487 	case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2):
488 		*active_width = IB_WIDTH_2X;
489 		*active_speed = IB_SPEED_NDR;
490 		break;
491 	case MLX5E_PROT_MASK(MLX5E_200GAUI_1_200GBASE_CR1_KR1):
492 		*active_width = IB_WIDTH_1X;
493 		*active_speed = IB_SPEED_XDR;
494 		break;
495 	case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
496 		*active_width = IB_WIDTH_8X;
497 		*active_speed = IB_SPEED_HDR;
498 		break;
499 	case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
500 		*active_width = IB_WIDTH_4X;
501 		*active_speed = IB_SPEED_NDR;
502 		break;
503 	case MLX5E_PROT_MASK(MLX5E_400GAUI_2_400GBASE_CR2_KR2):
504 		*active_width = IB_WIDTH_2X;
505 		*active_speed = IB_SPEED_XDR;
506 		break;
507 	case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
508 		*active_width = IB_WIDTH_8X;
509 		*active_speed = IB_SPEED_NDR;
510 		break;
511 	case MLX5E_PROT_MASK(MLX5E_800GAUI_4_800GBASE_CR4_KR4):
512 		*active_width = IB_WIDTH_4X;
513 		*active_speed = IB_SPEED_XDR;
514 		break;
515 	case MLX5E_PROT_MASK(MLX5E_1600GAUI_8_1600GBASE_CR8_KR8):
516 		*active_width = IB_WIDTH_8X;
517 		*active_speed = IB_SPEED_XDR;
518 		break;
519 	default:
520 		return -EINVAL;
521 	}
522 
523 	return 0;
524 }
525 
526 static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
527 				    u8 *active_width, bool ext)
528 {
529 	return ext ?
530 		translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
531 					     active_width) :
532 		translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
533 						active_width);
534 }
535 
536 static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
537 				struct ib_port_attr *props)
538 {
539 	struct mlx5_ib_dev *dev = to_mdev(device);
540 	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
541 	struct mlx5_core_dev *mdev;
542 	struct net_device *ndev, *upper;
543 	enum ib_mtu ndev_ib_mtu;
544 	bool put_mdev = true;
545 	u32 eth_prot_oper;
546 	u32 mdev_port_num;
547 	bool ext;
548 	int err;
549 
550 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
551 	if (!mdev) {
552 		/* This means the port isn't affiliated yet. Get the
553 		 * info for the master port instead.
554 		 */
555 		put_mdev = false;
556 		mdev = dev->mdev;
557 		mdev_port_num = 1;
558 		port_num = 1;
559 	}
560 
561 	/* Possible bad flows are checked before filling out props so in case
562 	 * of an error it will still be zeroed out.
563 	 * Use native port in case of reps
564 	 */
565 	if (dev->is_rep) {
566 		struct mlx5_eswitch_rep *rep;
567 
568 		rep = dev->port[port_num - 1].rep;
569 		if (rep) {
570 			mdev = mlx5_eswitch_get_core_dev(rep->esw);
571 			WARN_ON(!mdev);
572 		}
573 		mdev_port_num = 1;
574 	}
575 
576 	err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
577 				   mdev_port_num, 0);
578 
579 	if (err)
580 		goto out;
581 	ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
582 	eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
583 
584 	props->active_width     = IB_WIDTH_4X;
585 	props->active_speed     = IB_SPEED_QDR;
586 
587 	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
588 				 &props->active_width, ext);
589 
590 	if (!dev->is_rep && dev->mdev->roce.roce_en) {
591 		u16 qkey_viol_cntr;
592 
593 		props->port_cap_flags |= IB_PORT_CM_SUP;
594 		props->ip_gids = true;
595 		props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
596 						   roce_address_table_size);
597 		mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
598 		props->qkey_viol_cntr = qkey_viol_cntr;
599 	}
600 	props->max_mtu          = IB_MTU_4096;
601 	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
602 	props->pkey_tbl_len     = 1;
603 	props->state            = IB_PORT_DOWN;
604 	props->phys_state       = IB_PORT_PHYS_STATE_DISABLED;
605 
606 	/* If this is a stub query for an unaffiliated port stop here */
607 	if (!put_mdev)
608 		goto out;
609 
610 	ndev = ib_device_get_netdev(device, port_num);
611 	if (!ndev)
612 		goto out;
613 
614 	if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
615 		rcu_read_lock();
616 		upper = netdev_master_upper_dev_get_rcu(ndev);
617 		if (upper) {
618 			dev_put(ndev);
619 			ndev = upper;
620 			dev_hold(ndev);
621 		}
622 		rcu_read_unlock();
623 	}
624 
625 	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
626 		props->state      = IB_PORT_ACTIVE;
627 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
628 	}
629 
630 	ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
631 
632 	dev_put(ndev);
633 
634 	props->active_mtu	= min(props->max_mtu, ndev_ib_mtu);
635 out:
636 	if (put_mdev)
637 		mlx5_ib_put_native_port_mdev(dev, port_num);
638 	return err;
639 }
640 
641 int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
642 		  unsigned int index, const union ib_gid *gid,
643 		  const struct ib_gid_attr *attr)
644 {
645 	enum ib_gid_type gid_type;
646 	u16 vlan_id = 0xffff;
647 	u8 roce_version = 0;
648 	u8 roce_l3_type = 0;
649 	u8 mac[ETH_ALEN];
650 	int ret;
651 
652 	gid_type = attr->gid_type;
653 	if (gid) {
654 		ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
655 		if (ret)
656 			return ret;
657 	}
658 
659 	switch (gid_type) {
660 	case IB_GID_TYPE_ROCE:
661 		roce_version = MLX5_ROCE_VERSION_1;
662 		break;
663 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
664 		roce_version = MLX5_ROCE_VERSION_2;
665 		if (gid && ipv6_addr_v4mapped((void *)gid))
666 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
667 		else
668 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
669 		break;
670 
671 	default:
672 		mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
673 	}
674 
675 	return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
676 				      roce_l3_type, gid->raw, mac,
677 				      vlan_id < VLAN_CFI_MASK, vlan_id,
678 				      port_num);
679 }
680 
681 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
682 			   __always_unused void **context)
683 {
684 	int ret;
685 
686 	ret = mlx5r_add_gid_macsec_operations(attr);
687 	if (ret)
688 		return ret;
689 
690 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
691 			     attr->index, &attr->gid, attr);
692 }
693 
694 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
695 			   __always_unused void **context)
696 {
697 	int ret;
698 
699 	ret = set_roce_addr(to_mdev(attr->device), attr->port_num,
700 			    attr->index, NULL, attr);
701 	if (ret)
702 		return ret;
703 
704 	mlx5r_del_gid_macsec_operations(attr);
705 	return 0;
706 }
707 
708 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
709 				   const struct ib_gid_attr *attr)
710 {
711 	if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
712 		return 0;
713 
714 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
715 }
716 
717 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
718 {
719 	if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
720 		return !MLX5_CAP_GEN(dev->mdev, ib_virt);
721 	return 0;
722 }
723 
724 enum {
725 	MLX5_VPORT_ACCESS_METHOD_MAD,
726 	MLX5_VPORT_ACCESS_METHOD_HCA,
727 	MLX5_VPORT_ACCESS_METHOD_NIC,
728 };
729 
730 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
731 {
732 	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
733 		return MLX5_VPORT_ACCESS_METHOD_MAD;
734 
735 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
736 	    IB_LINK_LAYER_ETHERNET)
737 		return MLX5_VPORT_ACCESS_METHOD_NIC;
738 
739 	return MLX5_VPORT_ACCESS_METHOD_HCA;
740 }
741 
742 static void get_atomic_caps(struct mlx5_ib_dev *dev,
743 			    u8 atomic_size_qp,
744 			    struct ib_device_attr *props)
745 {
746 	u8 tmp;
747 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
748 	u8 atomic_req_8B_endianness_mode =
749 		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
750 
751 	/* Check if HW supports 8 bytes standard atomic operations and capable
752 	 * of host endianness respond
753 	 */
754 	tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
755 	if (((atomic_operations & tmp) == tmp) &&
756 	    (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
757 	    (atomic_req_8B_endianness_mode)) {
758 		props->atomic_cap = IB_ATOMIC_HCA;
759 	} else {
760 		props->atomic_cap = IB_ATOMIC_NONE;
761 	}
762 }
763 
764 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
765 			       struct ib_device_attr *props)
766 {
767 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
768 
769 	get_atomic_caps(dev, atomic_size_qp, props);
770 }
771 
772 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
773 					__be64 *sys_image_guid)
774 {
775 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
776 	struct mlx5_core_dev *mdev = dev->mdev;
777 	u64 tmp;
778 	int err;
779 
780 	switch (mlx5_get_vport_access_method(ibdev)) {
781 	case MLX5_VPORT_ACCESS_METHOD_MAD:
782 		return mlx5_query_mad_ifc_system_image_guid(ibdev,
783 							    sys_image_guid);
784 
785 	case MLX5_VPORT_ACCESS_METHOD_HCA:
786 		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
787 		break;
788 
789 	case MLX5_VPORT_ACCESS_METHOD_NIC:
790 		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
791 		break;
792 
793 	default:
794 		return -EINVAL;
795 	}
796 
797 	if (!err)
798 		*sys_image_guid = cpu_to_be64(tmp);
799 
800 	return err;
801 
802 }
803 
804 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
805 				u16 *max_pkeys)
806 {
807 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
808 	struct mlx5_core_dev *mdev = dev->mdev;
809 
810 	switch (mlx5_get_vport_access_method(ibdev)) {
811 	case MLX5_VPORT_ACCESS_METHOD_MAD:
812 		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
813 
814 	case MLX5_VPORT_ACCESS_METHOD_HCA:
815 	case MLX5_VPORT_ACCESS_METHOD_NIC:
816 		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
817 						pkey_table_size));
818 		return 0;
819 
820 	default:
821 		return -EINVAL;
822 	}
823 }
824 
825 static int mlx5_query_vendor_id(struct ib_device *ibdev,
826 				u32 *vendor_id)
827 {
828 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
829 
830 	switch (mlx5_get_vport_access_method(ibdev)) {
831 	case MLX5_VPORT_ACCESS_METHOD_MAD:
832 		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
833 
834 	case MLX5_VPORT_ACCESS_METHOD_HCA:
835 	case MLX5_VPORT_ACCESS_METHOD_NIC:
836 		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
837 
838 	default:
839 		return -EINVAL;
840 	}
841 }
842 
843 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
844 				__be64 *node_guid)
845 {
846 	u64 tmp;
847 	int err;
848 
849 	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
850 	case MLX5_VPORT_ACCESS_METHOD_MAD:
851 		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
852 
853 	case MLX5_VPORT_ACCESS_METHOD_HCA:
854 		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
855 		break;
856 
857 	case MLX5_VPORT_ACCESS_METHOD_NIC:
858 		err = mlx5_query_nic_vport_node_guid(dev->mdev, 0, false, &tmp);
859 		break;
860 
861 	default:
862 		return -EINVAL;
863 	}
864 
865 	if (!err)
866 		*node_guid = cpu_to_be64(tmp);
867 
868 	return err;
869 }
870 
871 struct mlx5_reg_node_desc {
872 	u8	desc[IB_DEVICE_NODE_DESC_MAX];
873 };
874 
875 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
876 {
877 	struct mlx5_reg_node_desc in;
878 
879 	if (mlx5_use_mad_ifc(dev))
880 		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
881 
882 	memset(&in, 0, sizeof(in));
883 
884 	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
885 				    sizeof(struct mlx5_reg_node_desc),
886 				    MLX5_REG_NODE_DESC, 0, 0);
887 }
888 
889 static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
890 				struct mlx5_ib_query_device_resp *resp)
891 {
892 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
893 	u16 vport = mlx5_eswitch_manager_vport(mdev);
894 
895 	resp->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(esw,
896 								      vport);
897 	resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
898 }
899 
900 /*
901  * Calculate maximum SQ overhead across all QP types.
902  * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT)
903  * have smaller overhead than the types calculated below,
904  * so they are implicitly included.
905  */
906 static u32 mlx5_ib_calc_max_sq_overhead(void)
907 {
908 	u32 max_overhead_xrc, overhead_ud_lso, a, b;
909 
910 	/* XRC_INI */
911 	max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg);
912 	max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg);
913 	a = sizeof(struct mlx5_wqe_atomic_seg) +
914 	    sizeof(struct mlx5_wqe_raddr_seg);
915 	b = sizeof(struct mlx5_wqe_umr_ctrl_seg) +
916 	    sizeof(struct mlx5_mkey_seg) +
917 	    MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD;
918 	max_overhead_xrc += max(a, b);
919 
920 	/* UD with LSO */
921 	overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg);
922 	overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad);
923 	overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg);
924 	overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg);
925 
926 	return max(max_overhead_xrc, overhead_ud_lso);
927 }
928 
929 static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev)
930 {
931 	struct mlx5_core_dev *mdev = dev->mdev;
932 	u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
933 	u32 max_wqe_size;
934 	/* max QP overhead + 1 SGE, no inline, no special features */
935 	max_wqe_size = mlx5_ib_calc_max_sq_overhead() +
936 		       sizeof(struct mlx5_wqe_data_seg);
937 
938 	max_wqe_size = roundup_pow_of_two(max_wqe_size);
939 
940 	max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB);
941 
942 	return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size;
943 }
944 
945 static int mlx5_ib_query_device(struct ib_device *ibdev,
946 				struct ib_device_attr *props,
947 				struct ib_udata *uhw)
948 {
949 	size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
950 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
951 	struct mlx5_core_dev *mdev = dev->mdev;
952 	int err = -ENOMEM;
953 	int max_sq_desc;
954 	int max_rq_sg;
955 	int max_sq_sg;
956 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
957 	bool raw_support = !mlx5_core_mp_enabled(mdev);
958 	struct mlx5_ib_query_device_resp resp = {};
959 	size_t resp_len;
960 	u64 max_tso;
961 
962 	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
963 	if (uhw_outlen && uhw_outlen < resp_len)
964 		return -EINVAL;
965 
966 	resp.response_length = resp_len;
967 
968 	if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
969 		return -EINVAL;
970 
971 	memset(props, 0, sizeof(*props));
972 	err = mlx5_query_system_image_guid(ibdev,
973 					   &props->sys_image_guid);
974 	if (err)
975 		return err;
976 
977 	props->max_pkeys = dev->pkey_table_len;
978 
979 	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
980 	if (err)
981 		return err;
982 
983 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
984 		(fw_rev_min(dev->mdev) << 16) |
985 		fw_rev_sub(dev->mdev);
986 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
987 		IB_DEVICE_PORT_ACTIVE_EVENT		|
988 		IB_DEVICE_SYS_IMAGE_GUID		|
989 		IB_DEVICE_RC_RNR_NAK_GEN;
990 
991 	if (MLX5_CAP_GEN(mdev, pkv))
992 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
993 	if (MLX5_CAP_GEN(mdev, qkv))
994 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
995 	if (MLX5_CAP_GEN(mdev, apm))
996 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
997 	if (MLX5_CAP_GEN(mdev, xrc))
998 		props->device_cap_flags |= IB_DEVICE_XRC;
999 	if (MLX5_CAP_GEN(mdev, imaicl)) {
1000 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
1001 					   IB_DEVICE_MEM_WINDOW_TYPE_2B;
1002 		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
1003 		/* We support 'Gappy' memory registration too */
1004 		props->kernel_cap_flags |= IBK_SG_GAPS_REG;
1005 	}
1006 	/* IB_WR_REG_MR always requires changing the entity size with UMR */
1007 	if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1008 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
1009 	if (MLX5_CAP_GEN(mdev, sho)) {
1010 		props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER;
1011 		/* At this stage no support for signature handover */
1012 		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
1013 				      IB_PROT_T10DIF_TYPE_2 |
1014 				      IB_PROT_T10DIF_TYPE_3;
1015 		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
1016 				       IB_GUARD_T10DIF_CSUM;
1017 	}
1018 	if (MLX5_CAP_GEN(mdev, block_lb_mc))
1019 		props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK;
1020 
1021 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
1022 		if (MLX5_CAP_ETH(mdev, csum_cap)) {
1023 			/* Legacy bit to support old userspace libraries */
1024 			props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
1025 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
1026 		}
1027 
1028 		if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
1029 			props->raw_packet_caps |=
1030 				IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
1031 
1032 		if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
1033 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
1034 			if (max_tso) {
1035 				resp.tso_caps.max_tso = 1 << max_tso;
1036 				resp.tso_caps.supported_qpts |=
1037 					1 << IB_QPT_RAW_PACKET;
1038 				resp.response_length += sizeof(resp.tso_caps);
1039 			}
1040 		}
1041 
1042 		if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
1043 			resp.rss_caps.rx_hash_function =
1044 						MLX5_RX_HASH_FUNC_TOEPLITZ;
1045 			resp.rss_caps.rx_hash_fields_mask =
1046 						MLX5_RX_HASH_SRC_IPV4 |
1047 						MLX5_RX_HASH_DST_IPV4 |
1048 						MLX5_RX_HASH_SRC_IPV6 |
1049 						MLX5_RX_HASH_DST_IPV6 |
1050 						MLX5_RX_HASH_SRC_PORT_TCP |
1051 						MLX5_RX_HASH_DST_PORT_TCP |
1052 						MLX5_RX_HASH_SRC_PORT_UDP |
1053 						MLX5_RX_HASH_DST_PORT_UDP |
1054 						MLX5_RX_HASH_INNER;
1055 			resp.response_length += sizeof(resp.rss_caps);
1056 		}
1057 	} else {
1058 		if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
1059 			resp.response_length += sizeof(resp.tso_caps);
1060 		if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
1061 			resp.response_length += sizeof(resp.rss_caps);
1062 	}
1063 
1064 	if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
1065 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
1066 		props->kernel_cap_flags |= IBK_UD_TSO;
1067 	}
1068 
1069 	if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
1070 	    MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
1071 	    raw_support)
1072 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
1073 
1074 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
1075 	    MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
1076 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
1077 
1078 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
1079 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
1080 	    raw_support) {
1081 		/* Legacy bit to support old userspace libraries */
1082 		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
1083 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
1084 	}
1085 
1086 	if (MLX5_CAP_DEV_MEM(mdev, memic)) {
1087 		props->max_dm_size =
1088 			MLX5_CAP_DEV_MEM(mdev, max_memic_size);
1089 	}
1090 
1091 	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
1092 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
1093 
1094 	if (MLX5_CAP_GEN(mdev, end_pad))
1095 		props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
1096 
1097 	props->vendor_part_id	   = mdev->pdev->device;
1098 	props->hw_ver		   = mdev->pdev->revision;
1099 
1100 	props->max_mr_size	   = ~0ull;
1101 	props->page_size_cap	   = ~(min_page_size - 1);
1102 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
1103 	props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev);
1104 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
1105 		     sizeof(struct mlx5_wqe_data_seg);
1106 	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
1107 	max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
1108 		     sizeof(struct mlx5_wqe_raddr_seg)) /
1109 		sizeof(struct mlx5_wqe_data_seg);
1110 	props->max_send_sge = max_sq_sg;
1111 	props->max_recv_sge = max_rq_sg;
1112 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
1113 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
1114 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
1115 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
1116 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
1117 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
1118 	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
1119 	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
1120 	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
1121 	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
1122 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
1123 	props->max_srq_sge	   = max_rq_sg - 1;
1124 	props->max_fast_reg_page_list_len =
1125 		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
1126 	props->max_pi_fast_reg_page_list_len =
1127 		props->max_fast_reg_page_list_len / 2;
1128 	props->max_sgl_rd =
1129 		MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
1130 	get_atomic_caps_qp(dev, props);
1131 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
1132 	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1133 	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1134 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1135 					   props->max_mcast_grp;
1136 	props->max_ah = INT_MAX;
1137 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1138 	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1139 
1140 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1141 		if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1142 			props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
1143 		props->odp_caps = dev->odp_caps;
1144 		if (!uhw) {
1145 			/* ODP for kernel QPs is not implemented for receive
1146 			 * WQEs and SRQ WQEs
1147 			 */
1148 			props->odp_caps.per_transport_caps.rc_odp_caps &=
1149 				~(IB_ODP_SUPPORT_READ |
1150 				  IB_ODP_SUPPORT_SRQ_RECV);
1151 			props->odp_caps.per_transport_caps.uc_odp_caps &=
1152 				~(IB_ODP_SUPPORT_READ |
1153 				  IB_ODP_SUPPORT_SRQ_RECV);
1154 			props->odp_caps.per_transport_caps.ud_odp_caps &=
1155 				~(IB_ODP_SUPPORT_READ |
1156 				  IB_ODP_SUPPORT_SRQ_RECV);
1157 			props->odp_caps.per_transport_caps.xrc_odp_caps &=
1158 				~(IB_ODP_SUPPORT_READ |
1159 				  IB_ODP_SUPPORT_SRQ_RECV);
1160 		}
1161 	}
1162 
1163 	if (mlx5_core_is_vf(mdev))
1164 		props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION;
1165 
1166 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
1167 	    IB_LINK_LAYER_ETHERNET && raw_support) {
1168 		props->rss_caps.max_rwq_indirection_tables =
1169 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1170 		props->rss_caps.max_rwq_indirection_table_size =
1171 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1172 		props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1173 		props->max_wq_type_rq =
1174 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1175 	}
1176 
1177 	if (MLX5_CAP_GEN(mdev, tag_matching)) {
1178 		props->tm_caps.max_num_tags =
1179 			(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1180 		props->tm_caps.max_ops =
1181 			1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1182 		props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1183 	}
1184 
1185 	if (MLX5_CAP_GEN(mdev, tag_matching) &&
1186 	    MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1187 		props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
1188 		props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1189 	}
1190 
1191 	if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1192 		props->cq_caps.max_cq_moderation_count =
1193 						MLX5_MAX_CQ_COUNT;
1194 		props->cq_caps.max_cq_moderation_period =
1195 						MLX5_MAX_CQ_PERIOD;
1196 	}
1197 
1198 	if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
1199 		resp.response_length += sizeof(resp.cqe_comp_caps);
1200 
1201 		if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1202 			resp.cqe_comp_caps.max_num =
1203 				MLX5_CAP_GEN(dev->mdev,
1204 					     cqe_compression_max_num);
1205 
1206 			resp.cqe_comp_caps.supported_format =
1207 				MLX5_IB_CQE_RES_FORMAT_HASH |
1208 				MLX5_IB_CQE_RES_FORMAT_CSUM;
1209 
1210 			if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1211 				resp.cqe_comp_caps.supported_format |=
1212 					MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1213 		}
1214 	}
1215 
1216 	if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
1217 	    raw_support) {
1218 		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1219 		    MLX5_CAP_GEN(mdev, qos)) {
1220 			resp.packet_pacing_caps.qp_rate_limit_max =
1221 				MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1222 			resp.packet_pacing_caps.qp_rate_limit_min =
1223 				MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1224 			resp.packet_pacing_caps.supported_qpts |=
1225 				1 << IB_QPT_RAW_PACKET;
1226 			if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1227 			    MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1228 				resp.packet_pacing_caps.cap_flags |=
1229 					MLX5_IB_PP_SUPPORT_BURST;
1230 		}
1231 		resp.response_length += sizeof(resp.packet_pacing_caps);
1232 	}
1233 
1234 	if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
1235 	    uhw_outlen) {
1236 		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1237 			resp.mlx5_ib_support_multi_pkt_send_wqes =
1238 				MLX5_IB_ALLOW_MPW;
1239 
1240 		if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1241 			resp.mlx5_ib_support_multi_pkt_send_wqes |=
1242 				MLX5_IB_SUPPORT_EMPW;
1243 
1244 		resp.response_length +=
1245 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1246 	}
1247 
1248 	if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
1249 		resp.response_length += sizeof(resp.flags);
1250 
1251 		if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1252 			resp.flags |=
1253 				MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1254 
1255 		if (MLX5_CAP_GEN(mdev, cqe_128_always))
1256 			resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1257 		if (MLX5_CAP_GEN(mdev, qp_packet_based))
1258 			resp.flags |=
1259 				MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1260 
1261 		resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1262 
1263 		if (MLX5_CAP_GEN_2(mdev, dp_ordering_force) &&
1264 		    (MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_xrc) ||
1265 		    MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_dc) ||
1266 		    MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_rc) ||
1267 		    MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_ud) ||
1268 		    MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_uc)))
1269 			resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_OOO_DP;
1270 	}
1271 
1272 	if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
1273 		resp.response_length += sizeof(resp.sw_parsing_caps);
1274 		if (MLX5_CAP_ETH(mdev, swp)) {
1275 			resp.sw_parsing_caps.sw_parsing_offloads |=
1276 				MLX5_IB_SW_PARSING;
1277 
1278 			if (MLX5_CAP_ETH(mdev, swp_csum))
1279 				resp.sw_parsing_caps.sw_parsing_offloads |=
1280 					MLX5_IB_SW_PARSING_CSUM;
1281 
1282 			if (MLX5_CAP_ETH(mdev, swp_lso))
1283 				resp.sw_parsing_caps.sw_parsing_offloads |=
1284 					MLX5_IB_SW_PARSING_LSO;
1285 
1286 			if (resp.sw_parsing_caps.sw_parsing_offloads)
1287 				resp.sw_parsing_caps.supported_qpts =
1288 					BIT(IB_QPT_RAW_PACKET);
1289 		}
1290 	}
1291 
1292 	if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
1293 	    raw_support) {
1294 		resp.response_length += sizeof(resp.striding_rq_caps);
1295 		if (MLX5_CAP_GEN(mdev, striding_rq)) {
1296 			resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1297 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1298 			resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1299 				MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1300 			if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1301 				resp.striding_rq_caps
1302 					.min_single_wqe_log_num_of_strides =
1303 					MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1304 			else
1305 				resp.striding_rq_caps
1306 					.min_single_wqe_log_num_of_strides =
1307 					MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1308 			resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1309 				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1310 			resp.striding_rq_caps.supported_qpts =
1311 				BIT(IB_QPT_RAW_PACKET);
1312 		}
1313 	}
1314 
1315 	if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
1316 		resp.response_length += sizeof(resp.tunnel_offloads_caps);
1317 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1318 			resp.tunnel_offloads_caps |=
1319 				MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1320 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1321 			resp.tunnel_offloads_caps |=
1322 				MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1323 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1324 			resp.tunnel_offloads_caps |=
1325 				MLX5_IB_TUNNELED_OFFLOADS_GRE;
1326 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
1327 			resp.tunnel_offloads_caps |=
1328 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1329 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
1330 			resp.tunnel_offloads_caps |=
1331 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1332 	}
1333 
1334 	if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) {
1335 		resp.response_length += sizeof(resp.dci_streams_caps);
1336 
1337 		resp.dci_streams_caps.max_log_num_concurent =
1338 			MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
1339 
1340 		resp.dci_streams_caps.max_log_num_errored =
1341 			MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
1342 	}
1343 
1344 	if (offsetofend(typeof(resp), reserved) <= uhw_outlen)
1345 		resp.response_length += sizeof(resp.reserved);
1346 
1347 	if (offsetofend(typeof(resp), reg_c0) <= uhw_outlen) {
1348 		struct mlx5_eswitch *esw = mdev->priv.eswitch;
1349 
1350 		resp.response_length += sizeof(resp.reg_c0);
1351 
1352 		if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS &&
1353 		    mlx5_eswitch_vport_match_metadata_enabled(esw))
1354 			fill_esw_mgr_reg_c0(mdev, &resp);
1355 	}
1356 
1357 	if (uhw_outlen) {
1358 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1359 
1360 		if (err)
1361 			return err;
1362 	}
1363 
1364 	return 0;
1365 }
1366 
1367 static void translate_active_width(struct ib_device *ibdev, u16 active_width,
1368 				   u8 *ib_width)
1369 {
1370 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1371 
1372 	if (active_width & MLX5_PTYS_WIDTH_1X)
1373 		*ib_width = IB_WIDTH_1X;
1374 	else if (active_width & MLX5_PTYS_WIDTH_2X)
1375 		*ib_width = IB_WIDTH_2X;
1376 	else if (active_width & MLX5_PTYS_WIDTH_4X)
1377 		*ib_width = IB_WIDTH_4X;
1378 	else if (active_width & MLX5_PTYS_WIDTH_8X)
1379 		*ib_width = IB_WIDTH_8X;
1380 	else if (active_width & MLX5_PTYS_WIDTH_12X)
1381 		*ib_width = IB_WIDTH_12X;
1382 	else {
1383 		mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1384 			    active_width);
1385 		*ib_width = IB_WIDTH_4X;
1386 	}
1387 
1388 	return;
1389 }
1390 
1391 static int mlx5_mtu_to_ib_mtu(int mtu)
1392 {
1393 	switch (mtu) {
1394 	case 256: return 1;
1395 	case 512: return 2;
1396 	case 1024: return 3;
1397 	case 2048: return 4;
1398 	case 4096: return 5;
1399 	default:
1400 		pr_warn("invalid mtu\n");
1401 		return -1;
1402 	}
1403 }
1404 
1405 enum ib_max_vl_num {
1406 	__IB_MAX_VL_0		= 1,
1407 	__IB_MAX_VL_0_1		= 2,
1408 	__IB_MAX_VL_0_3		= 3,
1409 	__IB_MAX_VL_0_7		= 4,
1410 	__IB_MAX_VL_0_14	= 5,
1411 };
1412 
1413 enum mlx5_vl_hw_cap {
1414 	MLX5_VL_HW_0	= 1,
1415 	MLX5_VL_HW_0_1	= 2,
1416 	MLX5_VL_HW_0_2	= 3,
1417 	MLX5_VL_HW_0_3	= 4,
1418 	MLX5_VL_HW_0_4	= 5,
1419 	MLX5_VL_HW_0_5	= 6,
1420 	MLX5_VL_HW_0_6	= 7,
1421 	MLX5_VL_HW_0_7	= 8,
1422 	MLX5_VL_HW_0_14	= 15
1423 };
1424 
1425 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1426 				u8 *max_vl_num)
1427 {
1428 	switch (vl_hw_cap) {
1429 	case MLX5_VL_HW_0:
1430 		*max_vl_num = __IB_MAX_VL_0;
1431 		break;
1432 	case MLX5_VL_HW_0_1:
1433 		*max_vl_num = __IB_MAX_VL_0_1;
1434 		break;
1435 	case MLX5_VL_HW_0_3:
1436 		*max_vl_num = __IB_MAX_VL_0_3;
1437 		break;
1438 	case MLX5_VL_HW_0_7:
1439 		*max_vl_num = __IB_MAX_VL_0_7;
1440 		break;
1441 	case MLX5_VL_HW_0_14:
1442 		*max_vl_num = __IB_MAX_VL_0_14;
1443 		break;
1444 
1445 	default:
1446 		return -EINVAL;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
1453 			       struct ib_port_attr *props)
1454 {
1455 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1456 	struct mlx5_core_dev *mdev = dev->mdev;
1457 	struct mlx5_hca_vport_context *rep;
1458 	u8 vl_hw_cap, plane_index = 0;
1459 	u16 max_mtu;
1460 	u16 oper_mtu;
1461 	int err;
1462 	u16 ib_link_width_oper;
1463 
1464 	rep = kzalloc_obj(*rep);
1465 	if (!rep) {
1466 		err = -ENOMEM;
1467 		goto out;
1468 	}
1469 
1470 	/* props being zeroed by the caller, avoid zeroing it here */
1471 
1472 	if (ibdev->type == RDMA_DEVICE_TYPE_SMI) {
1473 		plane_index = port;
1474 		port = smi_to_native_portnum(dev, port);
1475 	}
1476 
1477 	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1478 	if (err)
1479 		goto out;
1480 
1481 	props->lid		= rep->lid;
1482 	props->lmc		= rep->lmc;
1483 	props->sm_lid		= rep->sm_lid;
1484 	props->sm_sl		= rep->sm_sl;
1485 	props->state		= rep->vport_state;
1486 	props->phys_state	= rep->port_physical_state;
1487 
1488 	props->port_cap_flags = rep->cap_mask1;
1489 	if (dev->num_plane) {
1490 		props->port_cap_flags |= IB_PORT_SM_DISABLED;
1491 		props->port_cap_flags &= ~IB_PORT_SM;
1492 	} else if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
1493 		props->port_cap_flags &= ~IB_PORT_CM_SUP;
1494 
1495 	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1496 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1497 	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1498 	props->bad_pkey_cntr	= rep->pkey_violation_counter;
1499 	props->qkey_viol_cntr	= rep->qkey_violation_counter;
1500 	props->subnet_timeout	= rep->subnet_timeout;
1501 	props->init_type_reply	= rep->init_type_reply;
1502 
1503 	if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1504 		props->port_cap_flags2 = rep->cap_mask2;
1505 
1506 	err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
1507 				      &props->active_speed, port, plane_index);
1508 	if (err)
1509 		goto out;
1510 
1511 	translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1512 
1513 	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1514 
1515 	props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1516 
1517 	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1518 
1519 	props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1520 
1521 	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1522 	if (err)
1523 		goto out;
1524 
1525 	err = translate_max_vl_num(ibdev, vl_hw_cap,
1526 				   &props->max_vl_num);
1527 out:
1528 	kfree(rep);
1529 	return err;
1530 }
1531 
1532 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1533 		       struct ib_port_attr *props)
1534 {
1535 	unsigned int count;
1536 	int ret;
1537 
1538 	switch (mlx5_get_vport_access_method(ibdev)) {
1539 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1540 		ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1541 		break;
1542 
1543 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1544 		ret = mlx5_query_hca_port(ibdev, port, props);
1545 		break;
1546 
1547 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1548 		ret = mlx5_query_port_roce(ibdev, port, props);
1549 		break;
1550 
1551 	default:
1552 		ret = -EINVAL;
1553 	}
1554 
1555 	if (!ret && props) {
1556 		struct mlx5_ib_dev *dev = to_mdev(ibdev);
1557 		struct mlx5_core_dev *mdev;
1558 		bool put_mdev = true;
1559 
1560 		mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1561 		if (!mdev) {
1562 			/* If the port isn't affiliated yet query the master.
1563 			 * The master and slave will have the same values.
1564 			 */
1565 			mdev = dev->mdev;
1566 			port = 1;
1567 			put_mdev = false;
1568 		}
1569 		count = mlx5_core_reserved_gids_count(mdev);
1570 		if (put_mdev)
1571 			mlx5_ib_put_native_port_mdev(dev, port);
1572 		props->gid_tbl_len -= count;
1573 	}
1574 	return ret;
1575 }
1576 
1577 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
1578 				  struct ib_port_attr *props)
1579 {
1580 	return mlx5_query_port_roce(ibdev, port, props);
1581 }
1582 
1583 static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1584 				  u16 *pkey)
1585 {
1586 	/* Default special Pkey for representor device port as per the
1587 	 * IB specification 1.3 section 10.9.1.2.
1588 	 */
1589 	*pkey = 0xffff;
1590 	return 0;
1591 }
1592 
1593 static int mlx5_ib_query_port_speed_from_port(struct mlx5_ib_dev *dev,
1594 					      u32 port_num, u64 *speed)
1595 {
1596 	struct ib_port_speed_info speed_info;
1597 	struct ib_port_attr attr = {};
1598 	int err;
1599 
1600 	err = mlx5_ib_query_port(&dev->ib_dev, port_num, &attr);
1601 	if (err)
1602 		return err;
1603 
1604 	if (attr.state == IB_PORT_DOWN) {
1605 		*speed = 0;
1606 		return 0;
1607 	}
1608 
1609 	err = ib_port_attr_to_speed_info(&attr, &speed_info);
1610 	if (err)
1611 		return err;
1612 
1613 	*speed = speed_info.rate;
1614 	return 0;
1615 }
1616 
1617 static int mlx5_ib_query_port_speed_from_vport(struct mlx5_core_dev *mdev,
1618 					       u8 op_mod, u16 vport,
1619 					       u8 other_vport, u64 *speed,
1620 					       struct mlx5_ib_dev *dev,
1621 					       u32 port_num)
1622 {
1623 	u32 max_tx_speed;
1624 	int err;
1625 
1626 	err = mlx5_query_vport_max_tx_speed(mdev, op_mod, vport, other_vport,
1627 					    &max_tx_speed);
1628 	if (err)
1629 		return err;
1630 
1631 	if (max_tx_speed == 0)
1632 		/* Value 0 indicates field not supported, fallback */
1633 		return mlx5_ib_query_port_speed_from_port(dev, port_num,
1634 							  speed);
1635 
1636 	*speed = max_tx_speed;
1637 	return 0;
1638 }
1639 
1640 static int mlx5_ib_query_port_speed_from_bond(struct mlx5_ib_dev *dev,
1641 					      u32 port_num, u64 *speed)
1642 {
1643 	struct mlx5_core_dev *mdev = dev->mdev;
1644 	u32 bond_speed;
1645 	int err;
1646 
1647 	err = mlx5_lag_query_bond_speed(mdev, &bond_speed);
1648 	if (err)
1649 		return err;
1650 
1651 	*speed = bond_speed / MLX5_MAX_TX_SPEED_UNIT;
1652 
1653 	return 0;
1654 }
1655 
1656 static int mlx5_ib_query_port_speed_non_rep(struct mlx5_ib_dev *dev,
1657 					    u32 port_num, u64 *speed)
1658 {
1659 	u16 op_mod = MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT;
1660 
1661 	if (mlx5_lag_is_roce(dev->mdev))
1662 		return mlx5_ib_query_port_speed_from_bond(dev, port_num,
1663 							  speed);
1664 
1665 	return mlx5_ib_query_port_speed_from_vport(dev->mdev, op_mod, 0, false,
1666 						   speed, dev, port_num);
1667 }
1668 
1669 static int mlx5_ib_query_port_speed_rep(struct mlx5_ib_dev *dev, u32 port_num,
1670 					u64 *speed)
1671 {
1672 	struct mlx5_eswitch_rep *rep;
1673 	struct mlx5_core_dev *mdev;
1674 	u16 op_mod;
1675 
1676 	if (!dev->port[port_num - 1].rep) {
1677 		mlx5_ib_warn(dev, "Representor doesn't exist for port %u\n",
1678 			     port_num);
1679 		return -EINVAL;
1680 	}
1681 
1682 	rep = dev->port[port_num - 1].rep;
1683 	mdev = mlx5_eswitch_get_core_dev(rep->esw);
1684 	if (!mdev)
1685 		return -ENODEV;
1686 
1687 	if (rep->vport == MLX5_VPORT_UPLINK) {
1688 		if (mlx5_lag_is_sriov(mdev))
1689 			return mlx5_ib_query_port_speed_from_bond(dev,
1690 								  port_num,
1691 								  speed);
1692 
1693 		return mlx5_ib_query_port_speed_from_port(dev, port_num,
1694 							  speed);
1695 	}
1696 
1697 	op_mod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1698 	return mlx5_ib_query_port_speed_from_vport(dev->mdev, op_mod,
1699 						   rep->vport, true, speed, dev,
1700 						   port_num);
1701 }
1702 
1703 int mlx5_ib_query_port_speed(struct ib_device *ibdev, u32 port_num, u64 *speed)
1704 {
1705 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1706 
1707 	if (mlx5_ib_port_link_layer(ibdev, port_num) ==
1708 	    IB_LINK_LAYER_INFINIBAND || mlx5_core_mp_enabled(dev->mdev))
1709 		return mlx5_ib_query_port_speed_from_port(dev, port_num, speed);
1710 	else if (!dev->is_rep)
1711 		return mlx5_ib_query_port_speed_non_rep(dev, port_num, speed);
1712 	else
1713 		return mlx5_ib_query_port_speed_rep(dev, port_num, speed);
1714 }
1715 
1716 static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
1717 			     union ib_gid *gid)
1718 {
1719 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1720 	struct mlx5_core_dev *mdev = dev->mdev;
1721 
1722 	switch (mlx5_get_vport_access_method(ibdev)) {
1723 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1724 		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1725 
1726 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1727 		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1728 
1729 	default:
1730 		return -EINVAL;
1731 	}
1732 
1733 }
1734 
1735 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
1736 				   u16 index, u16 *pkey)
1737 {
1738 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1739 	struct mlx5_core_dev *mdev;
1740 	bool put_mdev = true;
1741 	u32 mdev_port_num;
1742 	int err;
1743 
1744 	mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1745 	if (!mdev) {
1746 		/* The port isn't affiliated yet, get the PKey from the master
1747 		 * port. For RoCE the PKey tables will be the same.
1748 		 */
1749 		put_mdev = false;
1750 		mdev = dev->mdev;
1751 		mdev_port_num = 1;
1752 	}
1753 
1754 	err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1755 					index, pkey);
1756 	if (put_mdev)
1757 		mlx5_ib_put_native_port_mdev(dev, port);
1758 
1759 	return err;
1760 }
1761 
1762 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1763 			      u16 *pkey)
1764 {
1765 	switch (mlx5_get_vport_access_method(ibdev)) {
1766 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1767 		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1768 
1769 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1770 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1771 		return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1772 	default:
1773 		return -EINVAL;
1774 	}
1775 }
1776 
1777 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1778 				 struct ib_device_modify *props)
1779 {
1780 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1781 	struct mlx5_reg_node_desc in;
1782 	struct mlx5_reg_node_desc out;
1783 	int err;
1784 
1785 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1786 		return -EOPNOTSUPP;
1787 
1788 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1789 		return 0;
1790 
1791 	/*
1792 	 * If possible, pass node desc to FW, so it can generate
1793 	 * a 144 trap.  If cmd fails, just ignore.
1794 	 */
1795 	memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1796 	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1797 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1798 	if (err)
1799 		return err;
1800 
1801 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1802 
1803 	return err;
1804 }
1805 
1806 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
1807 				u32 value)
1808 {
1809 	struct mlx5_hca_vport_context ctx = {};
1810 	struct mlx5_core_dev *mdev;
1811 	u32 mdev_port_num;
1812 	int err;
1813 
1814 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1815 	if (!mdev)
1816 		return -ENODEV;
1817 
1818 	err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1819 	if (err)
1820 		goto out;
1821 
1822 	if (~ctx.cap_mask1_perm & mask) {
1823 		mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1824 			     mask, ctx.cap_mask1_perm);
1825 		err = -EINVAL;
1826 		goto out;
1827 	}
1828 
1829 	ctx.cap_mask1 = value;
1830 	ctx.cap_mask1_perm = mask;
1831 	err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1832 						 0, &ctx);
1833 
1834 out:
1835 	mlx5_ib_put_native_port_mdev(dev, port_num);
1836 
1837 	return err;
1838 }
1839 
1840 static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1841 			       struct ib_port_modify *props)
1842 {
1843 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1844 	struct ib_port_attr attr;
1845 	u32 tmp;
1846 	int err;
1847 	u32 change_mask;
1848 	u32 value;
1849 	bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1850 		      IB_LINK_LAYER_INFINIBAND);
1851 
1852 	/* CM layer calls ib_modify_port() regardless of the link layer. For
1853 	 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1854 	 */
1855 	if (!is_ib)
1856 		return 0;
1857 
1858 	if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1859 		change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1860 		value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1861 		return set_port_caps_atomic(dev, port, change_mask, value);
1862 	}
1863 
1864 	mutex_lock(&dev->cap_mask_mutex);
1865 
1866 	err = ib_query_port(ibdev, port, &attr);
1867 	if (err)
1868 		goto out;
1869 
1870 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1871 		~props->clr_port_cap_mask;
1872 
1873 	err = mlx5_set_port_caps(dev->mdev, port, tmp);
1874 
1875 out:
1876 	mutex_unlock(&dev->cap_mask_mutex);
1877 	return err;
1878 }
1879 
1880 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1881 {
1882 	mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1883 		    caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1884 }
1885 
1886 static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1887 {
1888 	/* Large page with non 4k uar support might limit the dynamic size */
1889 	if (uars_per_sys_page == 1  && PAGE_SIZE > 4096)
1890 		return MLX5_MIN_DYN_BFREGS;
1891 
1892 	return MLX5_MAX_DYN_BFREGS;
1893 }
1894 
1895 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1896 			     struct mlx5_ib_alloc_ucontext_req_v2 *req,
1897 			     struct mlx5_bfreg_info *bfregi)
1898 {
1899 	int uars_per_sys_page;
1900 	int bfregs_per_sys_page;
1901 	int ref_bfregs = req->total_num_bfregs;
1902 
1903 	if (req->total_num_bfregs == 0)
1904 		return -EINVAL;
1905 
1906 	BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1907 	BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1908 
1909 	if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1910 		return -ENOMEM;
1911 
1912 	uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1913 	bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1914 	/* This holds the required static allocation asked by the user */
1915 	req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1916 	if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1917 		return -EINVAL;
1918 
1919 	bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1920 	bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1921 	bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1922 	bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1923 
1924 	mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1925 		    MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1926 		    lib_uar_4k ? "yes" : "no", ref_bfregs,
1927 		    req->total_num_bfregs, bfregi->total_num_bfregs,
1928 		    bfregi->num_sys_pages);
1929 
1930 	return 0;
1931 }
1932 
1933 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1934 {
1935 	struct mlx5_bfreg_info *bfregi;
1936 	int err;
1937 	int i;
1938 
1939 	bfregi = &context->bfregi;
1940 	for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1941 		err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
1942 					 context->devx_uid);
1943 		if (err)
1944 			goto error;
1945 
1946 		mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1947 	}
1948 
1949 	for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1950 		bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1951 
1952 	return 0;
1953 
1954 error:
1955 	for (--i; i >= 0; i--)
1956 		if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1957 					 context->devx_uid))
1958 			mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1959 
1960 	return err;
1961 }
1962 
1963 static void deallocate_uars(struct mlx5_ib_dev *dev,
1964 			    struct mlx5_ib_ucontext *context)
1965 {
1966 	struct mlx5_bfreg_info *bfregi;
1967 	int i;
1968 
1969 	bfregi = &context->bfregi;
1970 	for (i = 0; i < bfregi->num_sys_pages; i++)
1971 		if (i < bfregi->num_static_sys_pages ||
1972 		    bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1973 			mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1974 					     context->devx_uid);
1975 }
1976 
1977 static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
1978 				struct mlx5_core_dev *slave,
1979 				struct mlx5_ib_lb_state *lb_state)
1980 {
1981 	int err;
1982 
1983 	err = mlx5_nic_vport_update_local_lb(master, true);
1984 	if (err)
1985 		return err;
1986 
1987 	err = mlx5_nic_vport_update_local_lb(slave, true);
1988 	if (err)
1989 		goto out;
1990 
1991 	lb_state->force_enable = true;
1992 	return 0;
1993 
1994 out:
1995 	mlx5_nic_vport_update_local_lb(master, false);
1996 	return err;
1997 }
1998 
1999 static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
2000 				  struct mlx5_core_dev *slave,
2001 				  struct mlx5_ib_lb_state *lb_state)
2002 {
2003 	mlx5_nic_vport_update_local_lb(slave, false);
2004 	mlx5_nic_vport_update_local_lb(master, false);
2005 
2006 	lb_state->force_enable = false;
2007 }
2008 
2009 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
2010 {
2011 	int err = 0;
2012 
2013 	if (dev->lb.force_enable)
2014 		return 0;
2015 
2016 	mutex_lock(&dev->lb.mutex);
2017 	if (td)
2018 		dev->lb.user_td++;
2019 	if (qp)
2020 		dev->lb.qps++;
2021 
2022 	if (dev->lb.user_td == 2 ||
2023 	    dev->lb.qps == 1) {
2024 		if (!dev->lb.enabled) {
2025 			err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
2026 			dev->lb.enabled = true;
2027 		}
2028 	}
2029 
2030 	mutex_unlock(&dev->lb.mutex);
2031 
2032 	return err;
2033 }
2034 
2035 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
2036 {
2037 	if (dev->lb.force_enable)
2038 		return;
2039 
2040 	mutex_lock(&dev->lb.mutex);
2041 	if (td)
2042 		dev->lb.user_td--;
2043 	if (qp)
2044 		dev->lb.qps--;
2045 
2046 	if (dev->lb.user_td == 1 &&
2047 	    dev->lb.qps == 0) {
2048 		if (dev->lb.enabled) {
2049 			mlx5_nic_vport_update_local_lb(dev->mdev, false);
2050 			dev->lb.enabled = false;
2051 		}
2052 	}
2053 
2054 	mutex_unlock(&dev->lb.mutex);
2055 }
2056 
2057 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
2058 					  u16 uid)
2059 {
2060 	int err;
2061 
2062 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
2063 		return 0;
2064 
2065 	err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
2066 	if (err)
2067 		return err;
2068 
2069 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
2070 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
2071 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
2072 		return err;
2073 
2074 	return mlx5_ib_enable_lb(dev, true, false);
2075 }
2076 
2077 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
2078 					     u16 uid)
2079 {
2080 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
2081 		return;
2082 
2083 	mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
2084 
2085 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
2086 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
2087 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
2088 		return;
2089 
2090 	mlx5_ib_disable_lb(dev, true, false);
2091 }
2092 
2093 static int set_ucontext_resp(struct ib_ucontext *uctx,
2094 			     struct mlx5_ib_alloc_ucontext_resp *resp)
2095 {
2096 	struct ib_device *ibdev = uctx->device;
2097 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
2098 	struct mlx5_ib_ucontext *context = to_mucontext(uctx);
2099 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
2100 
2101 	if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
2102 		resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey;
2103 		resp->comp_mask |=
2104 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
2105 	}
2106 
2107 	resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
2108 	if (mlx5_wc_support_get(dev->mdev))
2109 		resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
2110 						      log_bf_reg_size);
2111 	resp->cache_line_size = cache_line_size();
2112 	resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
2113 	resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
2114 	resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
2115 	resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
2116 	resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
2117 	resp->cqe_version = context->cqe_version;
2118 	resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2119 				MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
2120 	resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2121 					MLX5_CAP_GEN(dev->mdev,
2122 						     num_of_uars_per_page) : 1;
2123 	resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
2124 			bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
2125 	resp->num_ports = dev->num_ports;
2126 	resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
2127 				      MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
2128 
2129 	if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
2130 		mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
2131 		resp->eth_min_inline++;
2132 	}
2133 
2134 	if (dev->mdev->clock_info)
2135 		resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
2136 
2137 	/*
2138 	 * We don't want to expose information from the PCI bar that is located
2139 	 * after 4096 bytes, so if the arch only supports larger pages, let's
2140 	 * pretend we don't support reading the HCA's core clock. This is also
2141 	 * forced by mmap function.
2142 	 */
2143 	if (PAGE_SIZE <= 4096) {
2144 		resp->comp_mask |=
2145 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
2146 		resp->hca_core_clock_offset =
2147 			offsetof(struct mlx5_init_seg,
2148 				 internal_timer_h) % PAGE_SIZE;
2149 	}
2150 
2151 	if (MLX5_CAP_GEN(dev->mdev, ece_support))
2152 		resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
2153 
2154 	if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
2155 	    rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
2156 	    rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
2157 		resp->comp_mask |=
2158 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS;
2159 
2160 	resp->num_dyn_bfregs = bfregi->num_dyn_bfregs;
2161 
2162 	if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
2163 		resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
2164 
2165 	resp->comp_mask |=
2166 		MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
2167 
2168 	return 0;
2169 }
2170 
2171 static bool uctx_rdma_ctrl_is_enabled(u64 enabled_caps)
2172 {
2173 	return UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_LOCAL) ||
2174 	       UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
2175 }
2176 
2177 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
2178 				  struct ib_udata *udata)
2179 {
2180 	struct ib_device *ibdev = uctx->device;
2181 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
2182 	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
2183 	struct mlx5_ib_alloc_ucontext_resp resp = {};
2184 	struct mlx5_ib_ucontext *context = to_mucontext(uctx);
2185 	struct mlx5_bfreg_info *bfregi;
2186 	int ver;
2187 	int err;
2188 	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
2189 				     max_cqe_version);
2190 	bool lib_uar_4k;
2191 	bool lib_uar_dyn;
2192 
2193 	if (!dev->ib_active)
2194 		return -EAGAIN;
2195 
2196 	if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
2197 		ver = 0;
2198 	else if (udata->inlen >= min_req_v2)
2199 		ver = 2;
2200 	else
2201 		return -EINVAL;
2202 
2203 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
2204 	if (err)
2205 		return err;
2206 
2207 	if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
2208 		return -EOPNOTSUPP;
2209 
2210 	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
2211 		return -EOPNOTSUPP;
2212 
2213 	req.total_num_bfregs = ALIGN(req.total_num_bfregs,
2214 				    MLX5_NON_FP_BFREGS_PER_UAR);
2215 	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
2216 		return -EINVAL;
2217 
2218 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
2219 		err = mlx5_ib_devx_create(dev, true, uctx->enabled_caps);
2220 		if (err < 0)
2221 			goto out_ctx;
2222 		context->devx_uid = err;
2223 
2224 		if (uctx_rdma_ctrl_is_enabled(uctx->enabled_caps)) {
2225 			err = mlx5_cmd_add_privileged_uid(dev->mdev,
2226 							  context->devx_uid);
2227 			if (err)
2228 				goto out_devx;
2229 		}
2230 	}
2231 
2232 	lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
2233 	lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
2234 	bfregi = &context->bfregi;
2235 
2236 	if (lib_uar_dyn) {
2237 		bfregi->lib_uar_dyn = lib_uar_dyn;
2238 		goto uar_done;
2239 	}
2240 
2241 	/* updates req->total_num_bfregs */
2242 	err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
2243 	if (err)
2244 		goto out_ucap;
2245 
2246 	mutex_init(&bfregi->lock);
2247 	bfregi->lib_uar_4k = lib_uar_4k;
2248 	bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
2249 				GFP_KERNEL);
2250 	if (!bfregi->count) {
2251 		err = -ENOMEM;
2252 		goto out_ucap;
2253 	}
2254 
2255 	bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
2256 				    sizeof(*bfregi->sys_pages),
2257 				    GFP_KERNEL);
2258 	if (!bfregi->sys_pages) {
2259 		err = -ENOMEM;
2260 		goto out_count;
2261 	}
2262 
2263 	err = allocate_uars(dev, context);
2264 	if (err)
2265 		goto out_sys_pages;
2266 
2267 uar_done:
2268 	err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
2269 					     context->devx_uid);
2270 	if (err)
2271 		goto out_uars;
2272 
2273 	INIT_LIST_HEAD(&context->db_page_list);
2274 	mutex_init(&context->db_page_mutex);
2275 
2276 	context->cqe_version = min_t(__u8,
2277 				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
2278 				 req.max_cqe_version);
2279 
2280 	err = set_ucontext_resp(uctx, &resp);
2281 	if (err)
2282 		goto out_mdev;
2283 
2284 	resp.response_length = min(udata->outlen, sizeof(resp));
2285 	err = ib_copy_to_udata(udata, &resp, resp.response_length);
2286 	if (err)
2287 		goto out_mdev;
2288 
2289 	bfregi->ver = ver;
2290 	bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
2291 	context->lib_caps = req.lib_caps;
2292 	print_lib_caps(dev, context->lib_caps);
2293 
2294 	if (mlx5_ib_lag_should_assign_affinity(dev)) {
2295 		u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
2296 
2297 		atomic_set(&context->tx_port_affinity,
2298 			   atomic_add_return(
2299 				   1, &dev->port[port].roce.tx_port_affinity));
2300 	}
2301 
2302 	return 0;
2303 
2304 out_mdev:
2305 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2306 
2307 out_uars:
2308 	deallocate_uars(dev, context);
2309 
2310 out_sys_pages:
2311 	kfree(bfregi->sys_pages);
2312 
2313 out_count:
2314 	kfree(bfregi->count);
2315 
2316 out_ucap:
2317 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX &&
2318 	    uctx_rdma_ctrl_is_enabled(uctx->enabled_caps))
2319 		mlx5_cmd_remove_privileged_uid(dev->mdev, context->devx_uid);
2320 
2321 out_devx:
2322 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
2323 		mlx5_ib_devx_destroy(dev, context->devx_uid);
2324 
2325 out_ctx:
2326 	return err;
2327 }
2328 
2329 static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext,
2330 				  struct uverbs_attr_bundle *attrs)
2331 {
2332 	struct mlx5_ib_alloc_ucontext_resp uctx_resp = {};
2333 	int ret;
2334 
2335 	ret = set_ucontext_resp(ibcontext, &uctx_resp);
2336 	if (ret)
2337 		return ret;
2338 
2339 	uctx_resp.response_length =
2340 		min_t(size_t,
2341 		      uverbs_attr_get_len(attrs,
2342 				MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX),
2343 		      sizeof(uctx_resp));
2344 
2345 	ret = uverbs_copy_to_struct_or_zero(attrs,
2346 					MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
2347 					&uctx_resp,
2348 					sizeof(uctx_resp));
2349 	return ret;
2350 }
2351 
2352 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
2353 {
2354 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2355 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2356 	struct mlx5_bfreg_info *bfregi;
2357 
2358 	bfregi = &context->bfregi;
2359 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2360 
2361 	deallocate_uars(dev, context);
2362 	kfree(bfregi->sys_pages);
2363 	kfree(bfregi->count);
2364 
2365 	if (context->devx_uid) {
2366 		if (uctx_rdma_ctrl_is_enabled(ibcontext->enabled_caps))
2367 			mlx5_cmd_remove_privileged_uid(dev->mdev,
2368 						       context->devx_uid);
2369 		mlx5_ib_devx_destroy(dev, context->devx_uid);
2370 	}
2371 }
2372 
2373 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2374 				 int uar_idx)
2375 {
2376 	int fw_uars_per_page;
2377 
2378 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2379 
2380 	return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2381 }
2382 
2383 static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
2384 				 int uar_idx)
2385 {
2386 	unsigned int fw_uars_per_page;
2387 
2388 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2389 				MLX5_UARS_IN_PAGE : 1;
2390 
2391 	return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
2392 }
2393 
2394 static int get_command(unsigned long offset)
2395 {
2396 	return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2397 }
2398 
2399 static int get_arg(unsigned long offset)
2400 {
2401 	return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2402 }
2403 
2404 static int get_index(unsigned long offset)
2405 {
2406 	return get_arg(offset);
2407 }
2408 
2409 /* Index resides in an extra byte to enable larger values than 255 */
2410 static int get_extended_index(unsigned long offset)
2411 {
2412 	return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2413 }
2414 
2415 
2416 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2417 {
2418 }
2419 
2420 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2421 {
2422 	switch (cmd) {
2423 	case MLX5_IB_MMAP_WC_PAGE:
2424 		return "WC";
2425 	case MLX5_IB_MMAP_REGULAR_PAGE:
2426 		return "best effort WC";
2427 	case MLX5_IB_MMAP_NC_PAGE:
2428 		return "NC";
2429 	case MLX5_IB_MMAP_DEVICE_MEM:
2430 		return "Device Memory";
2431 	default:
2432 		return "Unknown";
2433 	}
2434 }
2435 
2436 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2437 					struct vm_area_struct *vma,
2438 					struct mlx5_ib_ucontext *context)
2439 {
2440 	if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2441 	    !(vma->vm_flags & VM_SHARED))
2442 		return -EINVAL;
2443 
2444 	if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2445 		return -EOPNOTSUPP;
2446 
2447 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
2448 		return -EPERM;
2449 	vm_flags_clear(vma, VM_MAYWRITE);
2450 
2451 	if (!dev->mdev->clock_info)
2452 		return -EOPNOTSUPP;
2453 
2454 	return vm_insert_page(vma, vma->vm_start,
2455 			      virt_to_page(dev->mdev->clock_info));
2456 }
2457 
2458 static int phys_addr_to_bar(struct pci_dev *pdev, phys_addr_t pa)
2459 {
2460 	resource_size_t start, end;
2461 	int bar;
2462 
2463 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
2464 		/* Skip BARs not present or not memory-mapped */
2465 		if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
2466 			continue;
2467 
2468 		start = pci_resource_start(pdev, bar);
2469 		end = pci_resource_end(pdev, bar);
2470 
2471 		if (!start || !end)
2472 			continue;
2473 
2474 		if (pa >= start && pa <= end)
2475 			return bar;
2476 	}
2477 
2478 	return -1;
2479 }
2480 
2481 static int mlx5_ib_mmap_get_pfns(struct rdma_user_mmap_entry *entry,
2482 				 struct phys_vec *phys_vec,
2483 				 struct p2pdma_provider **provider)
2484 {
2485 	struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
2486 	struct pci_dev *pdev = to_mdev(entry->ucontext->device)->mdev->pdev;
2487 	int bar;
2488 
2489 	phys_vec->paddr = mentry->address;
2490 	phys_vec->len = entry->npages * PAGE_SIZE;
2491 
2492 	bar = phys_addr_to_bar(pdev, phys_vec->paddr);
2493 	if (bar < 0)
2494 		return -EINVAL;
2495 
2496 	*provider = pcim_p2pdma_provider(pdev, bar);
2497 	/* If the kernel was not compiled with CONFIG_PCI_P2PDMA the
2498 	 * functionality is not supported.
2499 	 */
2500 	if (!*provider)
2501 		return -EOPNOTSUPP;
2502 
2503 	return 0;
2504 }
2505 
2506 static struct rdma_user_mmap_entry *
2507 mlx5_ib_pgoff_to_mmap_entry(struct ib_ucontext *ucontext, off_t pg_off)
2508 {
2509 	unsigned long entry_pgoff;
2510 	unsigned long idx;
2511 	u8 command;
2512 
2513 	pg_off = pg_off >> PAGE_SHIFT;
2514 	command = get_command(pg_off);
2515 	idx = get_extended_index(pg_off);
2516 
2517 	entry_pgoff = command << 16 | idx;
2518 
2519 	return rdma_user_mmap_entry_get_pgoff(ucontext, entry_pgoff);
2520 }
2521 
2522 static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
2523 {
2524 	struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
2525 	struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
2526 	struct mlx5_var_table *var_table = &dev->var_table;
2527 	struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);
2528 
2529 	switch (mentry->mmap_flag) {
2530 	case MLX5_IB_MMAP_TYPE_MEMIC:
2531 	case MLX5_IB_MMAP_TYPE_MEMIC_OP:
2532 		mlx5_ib_dm_mmap_free(dev, mentry);
2533 		break;
2534 	case MLX5_IB_MMAP_TYPE_VAR:
2535 		mutex_lock(&var_table->bitmap_lock);
2536 		clear_bit(mentry->page_idx, var_table->bitmap);
2537 		mutex_unlock(&var_table->bitmap_lock);
2538 		kfree(mentry);
2539 		break;
2540 	case MLX5_IB_MMAP_TYPE_UAR_WC:
2541 	case MLX5_IB_MMAP_TYPE_UAR_NC:
2542 		mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
2543 				     context->devx_uid);
2544 		kfree(mentry);
2545 		break;
2546 	default:
2547 		WARN_ON(true);
2548 	}
2549 }
2550 
2551 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2552 		    struct vm_area_struct *vma,
2553 		    struct mlx5_ib_ucontext *context)
2554 {
2555 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
2556 	int err;
2557 	unsigned long idx;
2558 	phys_addr_t pfn;
2559 	pgprot_t prot;
2560 	u32 bfreg_dyn_idx = 0;
2561 	u32 uar_index;
2562 	int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2563 	int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2564 				bfregi->num_static_sys_pages;
2565 
2566 	if (bfregi->lib_uar_dyn)
2567 		return -EINVAL;
2568 
2569 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2570 		return -EINVAL;
2571 
2572 	if (dyn_uar)
2573 		idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2574 	else
2575 		idx = get_index(vma->vm_pgoff);
2576 
2577 	if (idx >= max_valid_idx) {
2578 		mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2579 			     idx, max_valid_idx);
2580 		return -EINVAL;
2581 	}
2582 
2583 	switch (cmd) {
2584 	case MLX5_IB_MMAP_WC_PAGE:
2585 	case MLX5_IB_MMAP_ALLOC_WC:
2586 	case MLX5_IB_MMAP_REGULAR_PAGE:
2587 		/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2588 		prot = pgprot_writecombine(vma->vm_page_prot);
2589 		break;
2590 	case MLX5_IB_MMAP_NC_PAGE:
2591 		prot = pgprot_noncached(vma->vm_page_prot);
2592 		break;
2593 	default:
2594 		return -EINVAL;
2595 	}
2596 
2597 	if (dyn_uar) {
2598 		int uars_per_page;
2599 
2600 		uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2601 		bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2602 		if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2603 			mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2604 				     bfreg_dyn_idx, bfregi->total_num_bfregs);
2605 			return -EINVAL;
2606 		}
2607 
2608 		mutex_lock(&bfregi->lock);
2609 		/* Fail if uar already allocated, first bfreg index of each
2610 		 * page holds its count.
2611 		 */
2612 		if (bfregi->count[bfreg_dyn_idx]) {
2613 			mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2614 			mutex_unlock(&bfregi->lock);
2615 			return -EINVAL;
2616 		}
2617 
2618 		bfregi->count[bfreg_dyn_idx]++;
2619 		mutex_unlock(&bfregi->lock);
2620 
2621 		err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
2622 					 context->devx_uid);
2623 		if (err) {
2624 			mlx5_ib_warn(dev, "UAR alloc failed\n");
2625 			goto free_bfreg;
2626 		}
2627 	} else {
2628 		uar_index = bfregi->sys_pages[idx];
2629 	}
2630 
2631 	pfn = uar_index2pfn(dev, uar_index);
2632 	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2633 
2634 	err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2635 				prot, NULL);
2636 	if (err) {
2637 		mlx5_ib_err(dev,
2638 			    "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2639 			    err, mmap_cmd2str(cmd));
2640 		goto err;
2641 	}
2642 
2643 	if (dyn_uar)
2644 		bfregi->sys_pages[idx] = uar_index;
2645 	return 0;
2646 
2647 err:
2648 	if (!dyn_uar)
2649 		return err;
2650 
2651 	mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
2652 
2653 free_bfreg:
2654 	mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2655 
2656 	return err;
2657 }
2658 
2659 static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
2660 {
2661 	unsigned long idx;
2662 	u8 command;
2663 
2664 	command = get_command(vma->vm_pgoff);
2665 	idx = get_extended_index(vma->vm_pgoff);
2666 
2667 	return (command << 16 | idx);
2668 }
2669 
2670 static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
2671 			       struct vm_area_struct *vma,
2672 			       struct ib_ucontext *ucontext)
2673 {
2674 	struct mlx5_user_mmap_entry *mentry;
2675 	struct rdma_user_mmap_entry *entry;
2676 	unsigned long pgoff;
2677 	pgprot_t prot;
2678 	phys_addr_t pfn;
2679 	int ret;
2680 
2681 	pgoff = mlx5_vma_to_pgoff(vma);
2682 	entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
2683 	if (!entry)
2684 		return -EINVAL;
2685 
2686 	mentry = to_mmmap(entry);
2687 	pfn = (mentry->address >> PAGE_SHIFT);
2688 	if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
2689 	    mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
2690 		prot = pgprot_noncached(vma->vm_page_prot);
2691 	else
2692 		prot = pgprot_writecombine(vma->vm_page_prot);
2693 	ret = rdma_user_mmap_io(ucontext, vma, pfn,
2694 				entry->npages * PAGE_SIZE,
2695 				prot,
2696 				entry);
2697 	rdma_user_mmap_entry_put(&mentry->rdma_entry);
2698 	return ret;
2699 }
2700 
2701 static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
2702 {
2703 	u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
2704 	u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
2705 
2706 	return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
2707 		(index & 0xFF)) << PAGE_SHIFT;
2708 }
2709 
2710 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2711 {
2712 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2713 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2714 	unsigned long command;
2715 	phys_addr_t pfn;
2716 
2717 	command = get_command(vma->vm_pgoff);
2718 	switch (command) {
2719 	case MLX5_IB_MMAP_WC_PAGE:
2720 	case MLX5_IB_MMAP_ALLOC_WC:
2721 		if (!mlx5_wc_support_get(dev->mdev))
2722 			return -EPERM;
2723 		fallthrough;
2724 	case MLX5_IB_MMAP_NC_PAGE:
2725 	case MLX5_IB_MMAP_REGULAR_PAGE:
2726 		return uar_mmap(dev, command, vma, context);
2727 
2728 	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2729 		return -ENOSYS;
2730 
2731 	case MLX5_IB_MMAP_CORE_CLOCK:
2732 		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2733 			return -EINVAL;
2734 
2735 		if (vma->vm_flags & VM_WRITE)
2736 			return -EPERM;
2737 		vm_flags_clear(vma, VM_MAYWRITE);
2738 
2739 		/* Don't expose to user-space information it shouldn't have */
2740 		if (PAGE_SIZE > 4096)
2741 			return -EOPNOTSUPP;
2742 
2743 		pfn = (dev->mdev->bar_addr +
2744 		       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2745 			PAGE_SHIFT;
2746 		return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2747 					 PAGE_SIZE,
2748 					 pgprot_noncached(vma->vm_page_prot),
2749 					 NULL);
2750 	case MLX5_IB_MMAP_CLOCK_INFO:
2751 		return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2752 
2753 	default:
2754 		return mlx5_ib_mmap_offset(dev, vma, ibcontext);
2755 	}
2756 
2757 	return 0;
2758 }
2759 
2760 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2761 {
2762 	struct mlx5_ib_pd *pd = to_mpd(ibpd);
2763 	struct ib_device *ibdev = ibpd->device;
2764 	struct mlx5_ib_alloc_pd_resp resp;
2765 	int err;
2766 	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2767 	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
2768 	u16 uid = 0;
2769 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2770 		udata, struct mlx5_ib_ucontext, ibucontext);
2771 
2772 	uid = context ? context->devx_uid : 0;
2773 	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2774 	MLX5_SET(alloc_pd_in, in, uid, uid);
2775 	err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
2776 	if (err)
2777 		return err;
2778 
2779 	pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2780 	pd->uid = uid;
2781 	if (udata) {
2782 		resp.pdn = pd->pdn;
2783 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2784 			mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2785 			return -EFAULT;
2786 		}
2787 	}
2788 
2789 	return 0;
2790 }
2791 
2792 static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
2793 {
2794 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2795 	struct mlx5_ib_pd *mpd = to_mpd(pd);
2796 
2797 	return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2798 }
2799 
2800 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2801 {
2802 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2803 	struct mlx5_ib_qp *mqp = to_mqp(ibqp);
2804 	int err;
2805 	u16 uid;
2806 
2807 	uid = ibqp->pd ?
2808 		to_mpd(ibqp->pd)->uid : 0;
2809 
2810 	if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
2811 		mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
2812 		return -EOPNOTSUPP;
2813 	}
2814 
2815 	err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2816 	if (err)
2817 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2818 			     ibqp->qp_num, gid->raw);
2819 
2820 	return err;
2821 }
2822 
2823 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2824 {
2825 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2826 	int err;
2827 	u16 uid;
2828 
2829 	uid = ibqp->pd ?
2830 		to_mpd(ibqp->pd)->uid : 0;
2831 	err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2832 	if (err)
2833 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2834 			     ibqp->qp_num, gid->raw);
2835 
2836 	return err;
2837 }
2838 
2839 static int init_node_data(struct mlx5_ib_dev *dev)
2840 {
2841 	int err;
2842 
2843 	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
2844 	if (err)
2845 		return err;
2846 
2847 	dev->mdev->rev_id = dev->mdev->pdev->revision;
2848 
2849 	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
2850 }
2851 
2852 static ssize_t fw_pages_show(struct device *device,
2853 			     struct device_attribute *attr, char *buf)
2854 {
2855 	struct mlx5_ib_dev *dev =
2856 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2857 
2858 	return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
2859 }
2860 static DEVICE_ATTR_RO(fw_pages);
2861 
2862 static ssize_t reg_pages_show(struct device *device,
2863 			      struct device_attribute *attr, char *buf)
2864 {
2865 	struct mlx5_ib_dev *dev =
2866 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2867 
2868 	return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2869 }
2870 static DEVICE_ATTR_RO(reg_pages);
2871 
2872 static ssize_t hca_type_show(struct device *device,
2873 			     struct device_attribute *attr, char *buf)
2874 {
2875 	struct mlx5_ib_dev *dev =
2876 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2877 
2878 	return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
2879 }
2880 static DEVICE_ATTR_RO(hca_type);
2881 
2882 static ssize_t hw_rev_show(struct device *device,
2883 			   struct device_attribute *attr, char *buf)
2884 {
2885 	struct mlx5_ib_dev *dev =
2886 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2887 
2888 	return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
2889 }
2890 static DEVICE_ATTR_RO(hw_rev);
2891 
2892 static ssize_t board_id_show(struct device *device,
2893 			     struct device_attribute *attr, char *buf)
2894 {
2895 	struct mlx5_ib_dev *dev =
2896 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2897 
2898 	return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
2899 			  dev->mdev->board_id);
2900 }
2901 static DEVICE_ATTR_RO(board_id);
2902 
2903 static struct attribute *mlx5_class_attributes[] = {
2904 	&dev_attr_hw_rev.attr,
2905 	&dev_attr_hca_type.attr,
2906 	&dev_attr_board_id.attr,
2907 	&dev_attr_fw_pages.attr,
2908 	&dev_attr_reg_pages.attr,
2909 	NULL,
2910 };
2911 
2912 static const struct attribute_group mlx5_attr_group = {
2913 	.attrs = mlx5_class_attributes,
2914 };
2915 
2916 static void pkey_change_handler(struct work_struct *work)
2917 {
2918 	struct mlx5_ib_port_resources *ports =
2919 		container_of(work, struct mlx5_ib_port_resources,
2920 			     pkey_change_work);
2921 
2922 	if (!ports->gsi)
2923 		/*
2924 		 * We got this event before device was fully configured
2925 		 * and MAD registration code wasn't called/finished yet.
2926 		 */
2927 		return;
2928 
2929 	mlx5_ib_gsi_pkey_change(ports->gsi);
2930 }
2931 
2932 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2933 {
2934 	struct mlx5_ib_qp *mqp;
2935 	struct mlx5_ib_cq *send_mcq, *recv_mcq;
2936 	struct mlx5_core_cq *mcq;
2937 	struct list_head cq_armed_list;
2938 	unsigned long flags_qp;
2939 	unsigned long flags_cq;
2940 	unsigned long flags;
2941 
2942 	INIT_LIST_HEAD(&cq_armed_list);
2943 
2944 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2945 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2946 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2947 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2948 		if (mqp->sq.tail != mqp->sq.head) {
2949 			send_mcq = to_mcq(mqp->ibqp.send_cq);
2950 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
2951 			if (send_mcq->mcq.comp &&
2952 			    mqp->ibqp.send_cq->comp_handler) {
2953 				if (!send_mcq->mcq.reset_notify_added) {
2954 					send_mcq->mcq.reset_notify_added = 1;
2955 					list_add_tail(&send_mcq->mcq.reset_notify,
2956 						      &cq_armed_list);
2957 				}
2958 			}
2959 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2960 		}
2961 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2962 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2963 		/* no handling is needed for SRQ */
2964 		if (!mqp->ibqp.srq) {
2965 			if (mqp->rq.tail != mqp->rq.head) {
2966 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2967 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2968 				if (recv_mcq->mcq.comp &&
2969 				    mqp->ibqp.recv_cq->comp_handler) {
2970 					if (!recv_mcq->mcq.reset_notify_added) {
2971 						recv_mcq->mcq.reset_notify_added = 1;
2972 						list_add_tail(&recv_mcq->mcq.reset_notify,
2973 							      &cq_armed_list);
2974 					}
2975 				}
2976 				spin_unlock_irqrestore(&recv_mcq->lock,
2977 						       flags_cq);
2978 			}
2979 		}
2980 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2981 	}
2982 	/*At that point all inflight post send were put to be executed as of we
2983 	 * lock/unlock above locks Now need to arm all involved CQs.
2984 	 */
2985 	list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2986 		mcq->comp(mcq, NULL);
2987 	}
2988 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2989 }
2990 
2991 static void delay_drop_handler(struct work_struct *work)
2992 {
2993 	int err;
2994 	struct mlx5_ib_delay_drop *delay_drop =
2995 		container_of(work, struct mlx5_ib_delay_drop,
2996 			     delay_drop_work);
2997 
2998 	atomic_inc(&delay_drop->events_cnt);
2999 
3000 	mutex_lock(&delay_drop->lock);
3001 	err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
3002 	if (err) {
3003 		mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
3004 			     delay_drop->timeout);
3005 		delay_drop->activate = false;
3006 	}
3007 	mutex_unlock(&delay_drop->lock);
3008 }
3009 
3010 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
3011 				 struct ib_event *ibev)
3012 {
3013 	u32 port = (eqe->data.port.port >> 4) & 0xf;
3014 
3015 	switch (eqe->sub_type) {
3016 	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
3017 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
3018 					    IB_LINK_LAYER_ETHERNET)
3019 			schedule_work(&ibdev->delay_drop.delay_drop_work);
3020 		break;
3021 	default: /* do nothing */
3022 		return;
3023 	}
3024 }
3025 
3026 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
3027 			      struct ib_event *ibev)
3028 {
3029 	u32 port = (eqe->data.port.port >> 4) & 0xf;
3030 
3031 	ibev->element.port_num = port;
3032 
3033 	switch (eqe->sub_type) {
3034 	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
3035 	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
3036 	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
3037 		if (ibdev->ib_active) {
3038 			struct ib_event speed_event = {};
3039 
3040 			speed_event.device = &ibdev->ib_dev;
3041 			speed_event.event = IB_EVENT_DEVICE_SPEED_CHANGE;
3042 			ib_dispatch_event(&speed_event);
3043 		}
3044 
3045 		/* In RoCE, port up/down events are handled in
3046 		 * mlx5_netdev_event().
3047 		 */
3048 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
3049 					    IB_LINK_LAYER_ETHERNET)
3050 			return -EINVAL;
3051 
3052 		ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
3053 				IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3054 		break;
3055 
3056 	case MLX5_PORT_CHANGE_SUBTYPE_LID:
3057 		ibev->event = IB_EVENT_LID_CHANGE;
3058 		break;
3059 
3060 	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
3061 		ibev->event = IB_EVENT_PKEY_CHANGE;
3062 		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3063 		break;
3064 
3065 	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
3066 		ibev->event = IB_EVENT_GID_CHANGE;
3067 		break;
3068 
3069 	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
3070 		ibev->event = IB_EVENT_CLIENT_REREGISTER;
3071 		break;
3072 	default:
3073 		return -EINVAL;
3074 	}
3075 
3076 	return 0;
3077 }
3078 
3079 static void mlx5_ib_handle_event(struct work_struct *_work)
3080 {
3081 	struct mlx5_ib_event_work *work =
3082 		container_of(_work, struct mlx5_ib_event_work, work);
3083 	struct mlx5_ib_dev *ibdev;
3084 	struct ib_event ibev;
3085 
3086 	if (work->is_slave) {
3087 		ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
3088 		if (!ibdev)
3089 			goto out;
3090 	} else {
3091 		ibdev = work->dev;
3092 	}
3093 
3094 	switch (work->event) {
3095 	case MLX5_EVENT_TYPE_PORT_CHANGE:
3096 		if (handle_port_change(ibdev, work->param, &ibev))
3097 			goto out;
3098 		break;
3099 	case MLX5_EVENT_TYPE_GENERAL_EVENT:
3100 		handle_general_event(ibdev, work->param, &ibev);
3101 		fallthrough;
3102 	default:
3103 		goto out;
3104 	}
3105 
3106 	ibev.device = &ibdev->ib_dev;
3107 
3108 	if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
3109 		mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
3110 		goto out;
3111 	}
3112 
3113 	if (ibdev->ib_active)
3114 		ib_dispatch_event(&ibev);
3115 
3116 out:
3117 	kfree(work);
3118 }
3119 
3120 static int mlx5_ib_event(struct notifier_block *nb,
3121 			 unsigned long event, void *param)
3122 {
3123 	struct mlx5_ib_event_work *work;
3124 
3125 	work = kmalloc_obj(*work, GFP_ATOMIC);
3126 	if (!work)
3127 		return NOTIFY_DONE;
3128 
3129 	INIT_WORK(&work->work, mlx5_ib_handle_event);
3130 	work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
3131 	work->is_slave = false;
3132 	work->param = param;
3133 	work->event = event;
3134 
3135 	queue_work(mlx5_ib_event_wq, &work->work);
3136 
3137 	return NOTIFY_OK;
3138 }
3139 
3140 static int mlx5_ib_event_slave_port(struct notifier_block *nb,
3141 				    unsigned long event, void *param)
3142 {
3143 	struct mlx5_ib_event_work *work;
3144 
3145 	work = kmalloc_obj(*work, GFP_ATOMIC);
3146 	if (!work)
3147 		return NOTIFY_DONE;
3148 
3149 	INIT_WORK(&work->work, mlx5_ib_handle_event);
3150 	work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
3151 	work->is_slave = true;
3152 	work->param = param;
3153 	work->event = event;
3154 	queue_work(mlx5_ib_event_wq, &work->work);
3155 
3156 	return NOTIFY_OK;
3157 }
3158 
3159 static void mlx5_ib_handle_sys_error_event(struct work_struct *_work)
3160 {
3161 	struct mlx5_ib_event_work *work =
3162 		container_of(_work, struct mlx5_ib_event_work, work);
3163 	struct mlx5_ib_dev *ibdev = work->dev;
3164 	struct ib_event ibev;
3165 
3166 	ibev.event = IB_EVENT_DEVICE_FATAL;
3167 	mlx5_ib_handle_internal_error(ibdev);
3168 	ibev.element.port_num = (u8)(unsigned long)work->param;
3169 	ibev.device = &ibdev->ib_dev;
3170 
3171 	if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
3172 		mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
3173 		goto out;
3174 	}
3175 
3176 	if (ibdev->ib_active)
3177 		ib_dispatch_event(&ibev);
3178 
3179 	ibdev->ib_active = false;
3180 out:
3181 	kfree(work);
3182 }
3183 
3184 static int mlx5_ib_sys_error_event(struct notifier_block *nb,
3185 				   unsigned long event, void *param)
3186 {
3187 	struct mlx5_ib_event_work *work;
3188 
3189 	if (event != MLX5_DEV_EVENT_SYS_ERROR)
3190 		return NOTIFY_DONE;
3191 
3192 	work = kmalloc_obj(*work, GFP_ATOMIC);
3193 	if (!work)
3194 		return NOTIFY_DONE;
3195 
3196 	INIT_WORK(&work->work, mlx5_ib_handle_sys_error_event);
3197 	work->dev = container_of(nb, struct mlx5_ib_dev, sys_error_events);
3198 	work->is_slave = false;
3199 	work->param = param;
3200 	work->event = event;
3201 
3202 	queue_work(mlx5_ib_event_wq, &work->work);
3203 
3204 	return NOTIFY_OK;
3205 }
3206 
3207 static int mlx5_ib_stage_sys_error_notifier_init(struct mlx5_ib_dev *dev)
3208 {
3209 	dev->sys_error_events.notifier_call = mlx5_ib_sys_error_event;
3210 	mlx5_notifier_register(dev->mdev, &dev->sys_error_events);
3211 	return 0;
3212 }
3213 
3214 static void mlx5_ib_stage_sys_error_notifier_cleanup(struct mlx5_ib_dev *dev)
3215 {
3216 	mlx5_notifier_unregister(dev->mdev, &dev->sys_error_events);
3217 }
3218 
3219 static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
3220 {
3221 	struct mlx5_hca_vport_context vport_ctx;
3222 	int err;
3223 
3224 	*num_plane = 0;
3225 	if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
3226 		return 0;
3227 
3228 	err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
3229 	if (err)
3230 		return err;
3231 
3232 	*num_plane = vport_ctx.num_plane;
3233 	return 0;
3234 }
3235 
3236 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
3237 {
3238 	struct mlx5_hca_vport_context vport_ctx;
3239 	int err;
3240 	int port;
3241 
3242 	if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
3243 		return 0;
3244 
3245 	for (port = 1; port <= dev->num_ports; port++) {
3246 		if (dev->num_plane) {
3247 			dev->port_caps[port - 1].has_smi = false;
3248 			continue;
3249 		} else if (!MLX5_CAP_GEN(dev->mdev, ib_virt) ||
3250 			dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
3251 			dev->port_caps[port - 1].has_smi = true;
3252 			continue;
3253 		}
3254 
3255 		err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
3256 						   &vport_ctx);
3257 		if (err) {
3258 			mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
3259 				    port, err);
3260 			return err;
3261 		}
3262 		dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
3263 	}
3264 
3265 	return 0;
3266 }
3267 
3268 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
3269 {
3270 	unsigned int port;
3271 
3272 	rdma_for_each_port (&dev->ib_dev, port)
3273 		mlx5_query_ext_port_caps(dev, port);
3274 }
3275 
3276 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
3277 {
3278 	switch (umr_fence_cap) {
3279 	case MLX5_CAP_UMR_FENCE_NONE:
3280 		return MLX5_FENCE_MODE_NONE;
3281 	case MLX5_CAP_UMR_FENCE_SMALL:
3282 		return MLX5_FENCE_MODE_INITIATOR_SMALL;
3283 	default:
3284 		return MLX5_FENCE_MODE_STRONG_ORDERING;
3285 	}
3286 }
3287 
3288 int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
3289 {
3290 	struct mlx5_ib_resources *devr = &dev->devr;
3291 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
3292 	struct ib_device *ibdev;
3293 	struct ib_pd *pd;
3294 	struct ib_cq *cq;
3295 	int ret = 0;
3296 
3297 
3298 	/*
3299 	 * devr->c0 is set once, never changed until device unload.
3300 	 * Avoid taking the mutex if initialization is already done.
3301 	 */
3302 	if (devr->c0)
3303 		return 0;
3304 
3305 	mutex_lock(&devr->cq_lock);
3306 	if (devr->c0)
3307 		goto unlock;
3308 
3309 	ibdev = &dev->ib_dev;
3310 	pd = ib_alloc_pd(ibdev, 0);
3311 	if (IS_ERR(pd)) {
3312 		ret = PTR_ERR(pd);
3313 		mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%pe\n",
3314 			    pd);
3315 		goto unlock;
3316 	}
3317 
3318 	cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
3319 	if (IS_ERR(cq)) {
3320 		ret = PTR_ERR(cq);
3321 		mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%pe\n",
3322 			    cq);
3323 		ib_dealloc_pd(pd);
3324 		goto unlock;
3325 	}
3326 
3327 	devr->p0 = pd;
3328 	devr->c0 = cq;
3329 
3330 unlock:
3331 	mutex_unlock(&devr->cq_lock);
3332 	return ret;
3333 }
3334 
3335 int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
3336 {
3337 	struct mlx5_ib_resources *devr = &dev->devr;
3338 	struct ib_srq_init_attr attr;
3339 	struct ib_srq *s0, *s1;
3340 	int ret = 0;
3341 
3342 	/*
3343 	 * devr->s1 is set once, never changed until device unload.
3344 	 * Avoid taking the mutex if initialization is already done.
3345 	 */
3346 	if (devr->s1)
3347 		return 0;
3348 
3349 	mutex_lock(&devr->srq_lock);
3350 	if (devr->s1)
3351 		goto unlock;
3352 
3353 	ret = mlx5_ib_dev_res_cq_init(dev);
3354 	if (ret)
3355 		goto unlock;
3356 
3357 	memset(&attr, 0, sizeof(attr));
3358 	attr.attr.max_sge = 1;
3359 	attr.attr.max_wr = 1;
3360 	attr.srq_type = IB_SRQT_XRC;
3361 	attr.ext.cq = devr->c0;
3362 
3363 	s0 = ib_create_srq(devr->p0, &attr);
3364 	if (IS_ERR(s0)) {
3365 		ret = PTR_ERR(s0);
3366 		mlx5_ib_err(dev,
3367 			    "Couldn't create SRQ 0 for res init, err=%pe\n",
3368 			    s0);
3369 		goto unlock;
3370 	}
3371 
3372 	memset(&attr, 0, sizeof(attr));
3373 	attr.attr.max_sge = 1;
3374 	attr.attr.max_wr = 1;
3375 	attr.srq_type = IB_SRQT_BASIC;
3376 
3377 	s1 = ib_create_srq(devr->p0, &attr);
3378 	if (IS_ERR(s1)) {
3379 		ret = PTR_ERR(s1);
3380 		mlx5_ib_err(dev,
3381 			    "Couldn't create SRQ 1 for res init, err=%pe\n",
3382 			    s1);
3383 		ib_destroy_srq(s0);
3384 	}
3385 
3386 	devr->s0 = s0;
3387 	devr->s1 = s1;
3388 
3389 unlock:
3390 	mutex_unlock(&devr->srq_lock);
3391 	return ret;
3392 }
3393 
3394 static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
3395 {
3396 	struct mlx5_ib_resources *devr = &dev->devr;
3397 	int ret;
3398 
3399 	if (!MLX5_CAP_GEN(dev->mdev, xrc))
3400 		return -EOPNOTSUPP;
3401 
3402 	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
3403 	if (ret)
3404 		return ret;
3405 
3406 	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
3407 	if (ret) {
3408 		mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
3409 		return ret;
3410 	}
3411 
3412 	mutex_init(&devr->cq_lock);
3413 	mutex_init(&devr->srq_lock);
3414 
3415 	return 0;
3416 }
3417 
3418 static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
3419 {
3420 	struct mlx5_ib_resources *devr = &dev->devr;
3421 
3422 	/* After s0/s1 init, they are not unset during the device lifetime. */
3423 	if (devr->s1) {
3424 		ib_destroy_srq(devr->s1);
3425 		ib_destroy_srq(devr->s0);
3426 	}
3427 	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
3428 	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
3429 	/* After p0/c0 init, they are not unset during the device lifetime. */
3430 	if (devr->c0) {
3431 		ib_destroy_cq(devr->c0);
3432 		ib_dealloc_pd(devr->p0);
3433 	}
3434 	mutex_destroy(&devr->cq_lock);
3435 	mutex_destroy(&devr->srq_lock);
3436 }
3437 
3438 static int
3439 mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
3440 {
3441 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3442 	struct mlx5_core_dev *mdev = dev->mdev;
3443 	bool ro_supp = false;
3444 	void *mkc;
3445 	u32 mkey;
3446 	u32 pdn;
3447 	u32 *in;
3448 	int err;
3449 
3450 	err = mlx5_core_alloc_pd(mdev, &pdn);
3451 	if (err)
3452 		return err;
3453 
3454 	in = kvzalloc(inlen, GFP_KERNEL);
3455 	if (!in) {
3456 		err = -ENOMEM;
3457 		goto err;
3458 	}
3459 
3460 	MLX5_SET(create_mkey_in, in, data_direct, 1);
3461 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3462 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
3463 	MLX5_SET(mkc, mkc, lw, 1);
3464 	MLX5_SET(mkc, mkc, lr, 1);
3465 	MLX5_SET(mkc, mkc, rw, 1);
3466 	MLX5_SET(mkc, mkc, rr, 1);
3467 	MLX5_SET(mkc, mkc, a, 1);
3468 	MLX5_SET(mkc, mkc, pd, pdn);
3469 	MLX5_SET(mkc, mkc, length64, 1);
3470 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
3471 	err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
3472 	if (err)
3473 		goto err_mkey;
3474 
3475 	dev->ddr.mkey = mkey;
3476 	dev->ddr.pdn = pdn;
3477 
3478 	/* create another mkey with RO support */
3479 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) {
3480 		MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
3481 		ro_supp = true;
3482 	}
3483 
3484 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) {
3485 		MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
3486 		ro_supp = true;
3487 	}
3488 
3489 	if (ro_supp) {
3490 		err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
3491 		/* RO is defined as best effort */
3492 		if (!err) {
3493 			dev->ddr.mkey_ro = mkey;
3494 			dev->ddr.mkey_ro_valid = true;
3495 		}
3496 	}
3497 
3498 	kvfree(in);
3499 	return 0;
3500 
3501 err_mkey:
3502 	kvfree(in);
3503 err:
3504 	mlx5_core_dealloc_pd(mdev, pdn);
3505 	return err;
3506 }
3507 
3508 static void
3509 mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev)
3510 {
3511 
3512 	if (dev->ddr.mkey_ro_valid)
3513 		mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey_ro);
3514 
3515 	mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey);
3516 	mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn);
3517 }
3518 
3519 static u32 get_core_cap_flags(struct ib_device *ibdev,
3520 			      struct mlx5_hca_vport_context *rep)
3521 {
3522 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3523 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
3524 	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
3525 	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
3526 	bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
3527 	u32 ret = 0;
3528 
3529 	if (rep->grh_required)
3530 		ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
3531 
3532 	if (dev->num_plane)
3533 		return ret | RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_IB_MAD |
3534 			RDMA_CORE_CAP_IB_CM | RDMA_CORE_CAP_IB_SA |
3535 			RDMA_CORE_CAP_AF_IB;
3536 	else if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
3537 		return ret | RDMA_CORE_CAP_IB_MAD | RDMA_CORE_CAP_IB_SMI;
3538 
3539 	if (ll == IB_LINK_LAYER_INFINIBAND)
3540 		return ret | RDMA_CORE_PORT_IBA_IB;
3541 
3542 	if (raw_support)
3543 		ret |= RDMA_CORE_PORT_RAW_PACKET;
3544 
3545 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
3546 		return ret;
3547 
3548 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
3549 		return ret;
3550 
3551 	if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
3552 		ret |= RDMA_CORE_PORT_IBA_ROCE;
3553 
3554 	if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
3555 		ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3556 
3557 	return ret;
3558 }
3559 
3560 static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
3561 			       struct ib_port_immutable *immutable)
3562 {
3563 	struct ib_port_attr attr;
3564 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3565 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
3566 	struct mlx5_hca_vport_context rep = {0};
3567 	int err;
3568 
3569 	err = ib_query_port(ibdev, port_num, &attr);
3570 	if (err)
3571 		return err;
3572 
3573 	if (ll == IB_LINK_LAYER_INFINIBAND) {
3574 		if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
3575 			port_num = smi_to_native_portnum(dev, port_num);
3576 
3577 		err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
3578 						   &rep);
3579 		if (err)
3580 			return err;
3581 	}
3582 
3583 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
3584 	immutable->gid_tbl_len = attr.gid_tbl_len;
3585 	immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
3586 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3587 
3588 	return 0;
3589 }
3590 
3591 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
3592 				   struct ib_port_immutable *immutable)
3593 {
3594 	struct ib_port_attr attr;
3595 	int err;
3596 
3597 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
3598 
3599 	err = ib_query_port(ibdev, port_num, &attr);
3600 	if (err)
3601 		return err;
3602 
3603 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
3604 	immutable->gid_tbl_len = attr.gid_tbl_len;
3605 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
3606 
3607 	return 0;
3608 }
3609 
3610 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
3611 {
3612 	struct mlx5_ib_dev *dev =
3613 		container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3614 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
3615 		 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3616 		 fw_rev_sub(dev->mdev));
3617 }
3618 
3619 static int lag_event(struct notifier_block *nb, unsigned long event, void *data)
3620 {
3621 	struct mlx5_ib_dev *dev = container_of(nb, struct mlx5_ib_dev,
3622 					       lag_events);
3623 	struct mlx5_core_dev *mdev = dev->mdev;
3624 	struct ib_device *ibdev = &dev->ib_dev;
3625 	struct net_device *old_ndev = NULL;
3626 	struct mlx5_ib_port *port;
3627 	struct net_device *ndev;
3628 	u32 portnum = 0;
3629 	int ret = 0;
3630 	int i;
3631 
3632 	switch (event) {
3633 	case MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE:
3634 		ndev = data;
3635 		if (ndev) {
3636 			if (!mlx5_lag_is_roce(mdev)) {
3637 				// sriov lag
3638 				for (i = 0; i < dev->num_ports; i++) {
3639 					port = &dev->port[i];
3640 					if (port->rep && port->rep->vport ==
3641 					    MLX5_VPORT_UPLINK) {
3642 						portnum = i;
3643 						break;
3644 					}
3645 				}
3646 			}
3647 			old_ndev = ib_device_get_netdev(ibdev, portnum + 1);
3648 			ret = ib_device_set_netdev(ibdev, ndev, portnum + 1);
3649 			if (ret)
3650 				goto out;
3651 
3652 			if (old_ndev)
3653 				roce_del_all_netdev_gids(ibdev, portnum + 1,
3654 							 old_ndev);
3655 			rdma_roce_rescan_port(ibdev, portnum + 1);
3656 		}
3657 		break;
3658 	default:
3659 		return NOTIFY_DONE;
3660 	}
3661 
3662 out:
3663 	dev_put(old_ndev);
3664 	return notifier_from_errno(ret);
3665 }
3666 
3667 static void mlx5e_lag_event_register(struct mlx5_ib_dev *dev)
3668 {
3669 	dev->lag_events.notifier_call = lag_event;
3670 	blocking_notifier_chain_register(&dev->mdev->priv.lag_nh,
3671 					 &dev->lag_events);
3672 }
3673 
3674 static void mlx5e_lag_event_unregister(struct mlx5_ib_dev *dev)
3675 {
3676 	blocking_notifier_chain_unregister(&dev->mdev->priv.lag_nh,
3677 					   &dev->lag_events);
3678 }
3679 
3680 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
3681 {
3682 	struct mlx5_flow_table_attr ft_attr = {};
3683 	struct mlx5_core_dev *mdev = dev->mdev;
3684 	struct mlx5_flow_namespace *ns;
3685 	int err;
3686 
3687 	ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_LAG);
3688 	if (!ns || !mlx5_lag_is_active(mdev))
3689 		return 0;
3690 
3691 	err = mlx5_cmd_create_vport_lag(mdev);
3692 	if (err)
3693 		return err;
3694 
3695 	ft_attr.level = 0;
3696 	ft_attr.prio = 0;
3697 	ft_attr.max_fte = dev->num_ports;
3698 
3699 	err = mlx5_lag_demux_init(mdev, &ft_attr);
3700 	if (err)
3701 		goto err_destroy_vport_lag;
3702 
3703 	mlx5e_lag_event_register(dev);
3704 	dev->lag_ports = mlx5_lag_get_num_ports(mdev);
3705 	dev->lag_active = true;
3706 	return 0;
3707 
3708 err_destroy_vport_lag:
3709 	mlx5_cmd_destroy_vport_lag(mdev);
3710 	return err;
3711 }
3712 
3713 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
3714 {
3715 	struct mlx5_core_dev *mdev = dev->mdev;
3716 
3717 	if (dev->lag_active) {
3718 		dev->lag_active = false;
3719 
3720 		mlx5e_lag_event_unregister(dev);
3721 		mlx5_lag_demux_cleanup(mdev);
3722 
3723 		mlx5_cmd_destroy_vport_lag(mdev);
3724 	}
3725 }
3726 
3727 static void mlx5_netdev_notifier_register(struct mlx5_roce *roce,
3728 					  struct net_device *netdev)
3729 {
3730 	int err;
3731 
3732 	if (roce->tracking_netdev)
3733 		return;
3734 	roce->tracking_netdev = netdev;
3735 	roce->nb.notifier_call = mlx5_netdev_event;
3736 	err = register_netdevice_notifier_dev_net(netdev, &roce->nb, &roce->nn);
3737 	WARN_ON(err);
3738 }
3739 
3740 static void mlx5_netdev_notifier_unregister(struct mlx5_roce *roce)
3741 {
3742 	if (!roce->tracking_netdev)
3743 		return;
3744 	unregister_netdevice_notifier_dev_net(roce->tracking_netdev, &roce->nb,
3745 					      &roce->nn);
3746 	roce->tracking_netdev = NULL;
3747 }
3748 
3749 static int mlx5e_mdev_notifier_event(struct notifier_block *nb,
3750 				     unsigned long event, void *data)
3751 {
3752 	struct mlx5_roce *roce = container_of(nb, struct mlx5_roce, mdev_nb);
3753 	struct net_device *netdev = data;
3754 
3755 	switch (event) {
3756 	case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
3757 		if (netdev)
3758 			mlx5_netdev_notifier_register(roce, netdev);
3759 		else
3760 			mlx5_netdev_notifier_unregister(roce);
3761 		break;
3762 	default:
3763 		return NOTIFY_DONE;
3764 	}
3765 
3766 	return NOTIFY_OK;
3767 }
3768 
3769 static void mlx5_mdev_netdev_track(struct mlx5_ib_dev *dev, u32 port_num)
3770 {
3771 	struct mlx5_roce *roce = &dev->port[port_num].roce;
3772 
3773 	roce->mdev_nb.notifier_call = mlx5e_mdev_notifier_event;
3774 	mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb);
3775 	mlx5_core_uplink_netdev_event_replay(dev->mdev);
3776 }
3777 
3778 static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev *dev, u32 port_num)
3779 {
3780 	struct mlx5_roce *roce = &dev->port[port_num].roce;
3781 
3782 	mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb);
3783 	mlx5_netdev_notifier_unregister(roce);
3784 }
3785 
3786 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
3787 {
3788 	int err;
3789 
3790 	if (!dev->is_rep && dev->profile != &raw_eth_profile) {
3791 		err = mlx5_nic_vport_enable_roce(dev->mdev);
3792 		if (err)
3793 			return err;
3794 	}
3795 
3796 	err = mlx5_eth_lag_init(dev);
3797 	if (err)
3798 		goto err_disable_roce;
3799 
3800 	return 0;
3801 
3802 err_disable_roce:
3803 	if (!dev->is_rep && dev->profile != &raw_eth_profile)
3804 		mlx5_nic_vport_disable_roce(dev->mdev);
3805 
3806 	return err;
3807 }
3808 
3809 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3810 {
3811 	mlx5_eth_lag_cleanup(dev);
3812 	if (!dev->is_rep && dev->profile != &raw_eth_profile)
3813 		mlx5_nic_vport_disable_roce(dev->mdev);
3814 }
3815 
3816 static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
3817 				 enum rdma_netdev_t type,
3818 				 struct rdma_netdev_alloc_params *params)
3819 {
3820 	if (type != RDMA_NETDEV_IPOIB)
3821 		return -EOPNOTSUPP;
3822 
3823 	return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
3824 }
3825 
3826 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
3827 				       size_t count, loff_t *pos)
3828 {
3829 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3830 	char lbuf[20];
3831 	int len;
3832 
3833 	len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
3834 	return simple_read_from_buffer(buf, count, pos, lbuf, len);
3835 }
3836 
3837 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
3838 					size_t count, loff_t *pos)
3839 {
3840 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3841 	u32 timeout;
3842 	u32 var;
3843 
3844 	if (kstrtouint_from_user(buf, count, 0, &var))
3845 		return -EFAULT;
3846 
3847 	timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
3848 			1000);
3849 	if (timeout != var)
3850 		mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
3851 			    timeout);
3852 
3853 	delay_drop->timeout = timeout;
3854 
3855 	return count;
3856 }
3857 
3858 static const struct file_operations fops_delay_drop_timeout = {
3859 	.owner	= THIS_MODULE,
3860 	.open	= simple_open,
3861 	.write	= delay_drop_timeout_write,
3862 	.read	= delay_drop_timeout_read,
3863 };
3864 
3865 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
3866 				      struct mlx5_ib_multiport_info *mpi)
3867 {
3868 	u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3869 	struct mlx5_ib_port *port = &ibdev->port[port_num];
3870 	int comps;
3871 	int err;
3872 	int i;
3873 
3874 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
3875 
3876 	mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
3877 
3878 	mlx5_core_mp_event_replay(ibdev->mdev,
3879 				  MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
3880 				  NULL);
3881 	mlx5_core_mp_event_replay(mpi->mdev,
3882 				  MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
3883 				  NULL);
3884 
3885 	mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
3886 
3887 	spin_lock(&port->mp.mpi_lock);
3888 	if (!mpi->ibdev) {
3889 		spin_unlock(&port->mp.mpi_lock);
3890 		return;
3891 	}
3892 
3893 	mpi->ibdev = NULL;
3894 
3895 	spin_unlock(&port->mp.mpi_lock);
3896 	if (mpi->mdev_events.notifier_call)
3897 		mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
3898 	mpi->mdev_events.notifier_call = NULL;
3899 	mlx5_mdev_netdev_untrack(ibdev, port_num);
3900 	spin_lock(&port->mp.mpi_lock);
3901 
3902 	comps = mpi->mdev_refcnt;
3903 	if (comps) {
3904 		mpi->unaffiliate = true;
3905 		init_completion(&mpi->unref_comp);
3906 		spin_unlock(&port->mp.mpi_lock);
3907 
3908 		for (i = 0; i < comps; i++)
3909 			wait_for_completion(&mpi->unref_comp);
3910 
3911 		spin_lock(&port->mp.mpi_lock);
3912 		mpi->unaffiliate = false;
3913 	}
3914 
3915 	port->mp.mpi = NULL;
3916 
3917 	spin_unlock(&port->mp.mpi_lock);
3918 
3919 	err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
3920 
3921 	mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
3922 	/* Log an error, still needed to cleanup the pointers and add
3923 	 * it back to the list.
3924 	 */
3925 	if (err)
3926 		mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
3927 			    port_num + 1);
3928 
3929 	ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3930 }
3931 
3932 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
3933 				    struct mlx5_ib_multiport_info *mpi)
3934 {
3935 	u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3936 	u64 key;
3937 	int err;
3938 
3939 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
3940 
3941 	spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3942 	if (ibdev->port[port_num].mp.mpi) {
3943 		mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
3944 			    port_num + 1);
3945 		spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3946 		return false;
3947 	}
3948 
3949 	ibdev->port[port_num].mp.mpi = mpi;
3950 	mpi->ibdev = ibdev;
3951 	mpi->mdev_events.notifier_call = NULL;
3952 	spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3953 
3954 	err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
3955 	if (err)
3956 		goto unbind;
3957 
3958 	mlx5_mdev_netdev_track(ibdev, port_num);
3959 
3960 	mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
3961 	mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
3962 
3963 	mlx5_ib_init_cong_debugfs(ibdev, port_num);
3964 
3965 	key = mpi->mdev->priv.adev_idx;
3966 	mlx5_core_mp_event_replay(mpi->mdev,
3967 				  MLX5_DRIVER_EVENT_AFFILIATION_DONE,
3968 				  &key);
3969 	mlx5_core_mp_event_replay(ibdev->mdev,
3970 				  MLX5_DRIVER_EVENT_AFFILIATION_DONE,
3971 				  &key);
3972 
3973 	err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
3974 	if (err)
3975 		goto unbind;
3976 
3977 	return true;
3978 
3979 unbind:
3980 	mlx5_ib_unbind_slave_port(ibdev, mpi);
3981 	return false;
3982 }
3983 
3984 static int mlx5_ib_data_direct_init(struct mlx5_ib_dev *dev)
3985 {
3986 	char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1] = {};
3987 	int ret;
3988 
3989 	if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
3990 	    !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
3991 		return 0;
3992 
3993 	ret = mlx5_cmd_query_vuid(dev->mdev, true, vuid);
3994 	if (ret)
3995 		return ret;
3996 
3997 	ret = mlx5_ib_create_data_direct_resources(dev);
3998 	if (ret)
3999 		return ret;
4000 
4001 	INIT_LIST_HEAD(&dev->data_direct_mr_list);
4002 	ret = mlx5_data_direct_ib_reg(dev, vuid);
4003 	if (ret)
4004 		mlx5_ib_free_data_direct_resources(dev);
4005 
4006 	return ret;
4007 }
4008 
4009 static void mlx5_ib_data_direct_cleanup(struct mlx5_ib_dev *dev)
4010 {
4011 	if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
4012 	    !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
4013 		return;
4014 
4015 	mlx5_data_direct_ib_unreg(dev);
4016 	mlx5_ib_free_data_direct_resources(dev);
4017 }
4018 
4019 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
4020 {
4021 	u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4022 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
4023 							  port_num + 1);
4024 	struct mlx5_ib_multiport_info *mpi;
4025 	int err;
4026 	u32 i;
4027 
4028 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
4029 		return 0;
4030 
4031 	err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
4032 						     &dev->sys_image_guid);
4033 	if (err)
4034 		return err;
4035 
4036 	err = mlx5_nic_vport_enable_roce(dev->mdev);
4037 	if (err)
4038 		return err;
4039 
4040 	mutex_lock(&mlx5_ib_multiport_mutex);
4041 	for (i = 0; i < dev->num_ports; i++) {
4042 		bool bound = false;
4043 
4044 		/* build a stub multiport info struct for the native port. */
4045 		if (i == port_num) {
4046 			mpi = kzalloc_obj(*mpi);
4047 			if (!mpi) {
4048 				mutex_unlock(&mlx5_ib_multiport_mutex);
4049 				mlx5_nic_vport_disable_roce(dev->mdev);
4050 				return -ENOMEM;
4051 			}
4052 
4053 			mpi->is_master = true;
4054 			mpi->mdev = dev->mdev;
4055 			mpi->sys_image_guid = dev->sys_image_guid;
4056 			dev->port[i].mp.mpi = mpi;
4057 			mpi->ibdev = dev;
4058 			mpi = NULL;
4059 			continue;
4060 		}
4061 
4062 		list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
4063 				    list) {
4064 			if (dev->sys_image_guid == mpi->sys_image_guid &&
4065 			    (mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
4066 			    mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
4067 				bound = mlx5_ib_bind_slave_port(dev, mpi);
4068 			}
4069 
4070 			if (bound) {
4071 				dev_dbg(mpi->mdev->device,
4072 					"removing port from unaffiliated list.\n");
4073 				mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
4074 				list_del(&mpi->list);
4075 				break;
4076 			}
4077 		}
4078 		if (!bound)
4079 			mlx5_ib_dbg(dev, "no free port found for port %d\n",
4080 				    i + 1);
4081 	}
4082 
4083 	list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
4084 	mutex_unlock(&mlx5_ib_multiport_mutex);
4085 	return err;
4086 }
4087 
4088 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
4089 {
4090 	u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4091 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
4092 							  port_num + 1);
4093 	u32 i;
4094 
4095 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
4096 		return;
4097 
4098 	mutex_lock(&mlx5_ib_multiport_mutex);
4099 	for (i = 0; i < dev->num_ports; i++) {
4100 		if (dev->port[i].mp.mpi) {
4101 			/* Destroy the native port stub */
4102 			if (i == port_num) {
4103 				kfree(dev->port[i].mp.mpi);
4104 				dev->port[i].mp.mpi = NULL;
4105 			} else {
4106 				mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
4107 					    i + 1);
4108 				list_add_tail(&dev->port[i].mp.mpi->list,
4109 					      &mlx5_ib_unaffiliated_port_list);
4110 				mlx5_ib_unbind_slave_port(dev,
4111 							  dev->port[i].mp.mpi);
4112 			}
4113 		}
4114 	}
4115 
4116 	mlx5_ib_dbg(dev, "removing from devlist\n");
4117 	list_del(&dev->ib_dev_list);
4118 	mutex_unlock(&mlx5_ib_multiport_mutex);
4119 
4120 	mlx5_nic_vport_disable_roce(dev->mdev);
4121 }
4122 
4123 static int mmap_obj_cleanup(struct ib_uobject *uobject,
4124 			    enum rdma_remove_reason why,
4125 			    struct uverbs_attr_bundle *attrs)
4126 {
4127 	struct mlx5_user_mmap_entry *obj = uobject->object;
4128 
4129 	rdma_user_mmap_entry_remove(&obj->rdma_entry);
4130 	return 0;
4131 }
4132 
4133 static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c,
4134 					    struct mlx5_user_mmap_entry *entry,
4135 					    size_t length)
4136 {
4137 	return rdma_user_mmap_entry_insert_range(
4138 		&c->ibucontext, &entry->rdma_entry, length,
4139 		(MLX5_IB_MMAP_OFFSET_START << 16),
4140 		((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1));
4141 }
4142 
4143 static struct mlx5_user_mmap_entry *
4144 alloc_var_entry(struct mlx5_ib_ucontext *c)
4145 {
4146 	struct mlx5_user_mmap_entry *entry;
4147 	struct mlx5_var_table *var_table;
4148 	u32 page_idx;
4149 	int err;
4150 
4151 	var_table = &to_mdev(c->ibucontext.device)->var_table;
4152 	entry = kzalloc_obj(*entry);
4153 	if (!entry)
4154 		return ERR_PTR(-ENOMEM);
4155 
4156 	mutex_lock(&var_table->bitmap_lock);
4157 	page_idx = find_first_zero_bit(var_table->bitmap,
4158 				       var_table->num_var_hw_entries);
4159 	if (page_idx >= var_table->num_var_hw_entries) {
4160 		err = -ENOSPC;
4161 		mutex_unlock(&var_table->bitmap_lock);
4162 		goto end;
4163 	}
4164 
4165 	set_bit(page_idx, var_table->bitmap);
4166 	mutex_unlock(&var_table->bitmap_lock);
4167 
4168 	entry->address = var_table->hw_start_addr +
4169 				(page_idx * var_table->stride_size);
4170 	entry->page_idx = page_idx;
4171 	entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
4172 
4173 	err = mlx5_rdma_user_mmap_entry_insert(c, entry,
4174 					       var_table->stride_size);
4175 	if (err)
4176 		goto err_insert;
4177 
4178 	return entry;
4179 
4180 err_insert:
4181 	mutex_lock(&var_table->bitmap_lock);
4182 	clear_bit(page_idx, var_table->bitmap);
4183 	mutex_unlock(&var_table->bitmap_lock);
4184 end:
4185 	kfree(entry);
4186 	return ERR_PTR(err);
4187 }
4188 
4189 static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
4190 	struct uverbs_attr_bundle *attrs)
4191 {
4192 	struct ib_uobject *uobj = uverbs_attr_get_uobject(
4193 		attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
4194 	struct mlx5_ib_ucontext *c;
4195 	struct mlx5_user_mmap_entry *entry;
4196 	u64 mmap_offset;
4197 	u32 length;
4198 	int err;
4199 
4200 	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
4201 	if (IS_ERR(c))
4202 		return PTR_ERR(c);
4203 
4204 	entry = alloc_var_entry(c);
4205 	if (IS_ERR(entry))
4206 		return PTR_ERR(entry);
4207 
4208 	mmap_offset = mlx5_entry_to_mmap_offset(entry);
4209 	length = entry->rdma_entry.npages * PAGE_SIZE;
4210 	uobj->object = entry;
4211 	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
4212 
4213 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
4214 			     &mmap_offset, sizeof(mmap_offset));
4215 	if (err)
4216 		return err;
4217 
4218 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
4219 			     &entry->page_idx, sizeof(entry->page_idx));
4220 	if (err)
4221 		return err;
4222 
4223 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
4224 			     &length, sizeof(length));
4225 	return err;
4226 }
4227 
4228 DECLARE_UVERBS_NAMED_METHOD(
4229 	MLX5_IB_METHOD_VAR_OBJ_ALLOC,
4230 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
4231 			MLX5_IB_OBJECT_VAR,
4232 			UVERBS_ACCESS_NEW,
4233 			UA_MANDATORY),
4234 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
4235 			   UVERBS_ATTR_TYPE(u32),
4236 			   UA_MANDATORY),
4237 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
4238 			   UVERBS_ATTR_TYPE(u32),
4239 			   UA_MANDATORY),
4240 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
4241 			    UVERBS_ATTR_TYPE(u64),
4242 			    UA_MANDATORY));
4243 
4244 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
4245 	MLX5_IB_METHOD_VAR_OBJ_DESTROY,
4246 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
4247 			MLX5_IB_OBJECT_VAR,
4248 			UVERBS_ACCESS_DESTROY,
4249 			UA_MANDATORY));
4250 
4251 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
4252 			    UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
4253 			    &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
4254 			    &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
4255 
4256 static bool var_is_supported(struct ib_device *device)
4257 {
4258 	struct mlx5_ib_dev *dev = to_mdev(device);
4259 
4260 	return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
4261 			MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
4262 }
4263 
4264 static struct mlx5_user_mmap_entry *
4265 alloc_uar_entry(struct mlx5_ib_ucontext *c,
4266 		enum mlx5_ib_uapi_uar_alloc_type alloc_type)
4267 {
4268 	struct mlx5_user_mmap_entry *entry;
4269 	struct mlx5_ib_dev *dev;
4270 	u32 uar_index;
4271 	int err;
4272 
4273 	entry = kzalloc_obj(*entry);
4274 	if (!entry)
4275 		return ERR_PTR(-ENOMEM);
4276 
4277 	dev = to_mdev(c->ibucontext.device);
4278 	err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
4279 	if (err)
4280 		goto end;
4281 
4282 	entry->page_idx = uar_index;
4283 	entry->address = uar_index2paddress(dev, uar_index);
4284 	if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
4285 		entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC;
4286 	else
4287 		entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC;
4288 
4289 	err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
4290 	if (err)
4291 		goto err_insert;
4292 
4293 	return entry;
4294 
4295 err_insert:
4296 	mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
4297 end:
4298 	kfree(entry);
4299 	return ERR_PTR(err);
4300 }
4301 
4302 static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
4303 	struct uverbs_attr_bundle *attrs)
4304 {
4305 	struct ib_uobject *uobj = uverbs_attr_get_uobject(
4306 		attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
4307 	enum mlx5_ib_uapi_uar_alloc_type alloc_type;
4308 	struct mlx5_ib_ucontext *c;
4309 	struct mlx5_user_mmap_entry *entry;
4310 	u64 mmap_offset;
4311 	u32 length;
4312 	int err;
4313 
4314 	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
4315 	if (IS_ERR(c))
4316 		return PTR_ERR(c);
4317 
4318 	err = uverbs_get_const(&alloc_type, attrs,
4319 			       MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
4320 	if (err)
4321 		return err;
4322 
4323 	if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
4324 	    alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
4325 		return -EOPNOTSUPP;
4326 
4327 	if (!mlx5_wc_support_get(to_mdev(c->ibucontext.device)->mdev) &&
4328 	    alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
4329 		return -EOPNOTSUPP;
4330 
4331 	entry = alloc_uar_entry(c, alloc_type);
4332 	if (IS_ERR(entry))
4333 		return PTR_ERR(entry);
4334 
4335 	mmap_offset = mlx5_entry_to_mmap_offset(entry);
4336 	length = entry->rdma_entry.npages * PAGE_SIZE;
4337 	uobj->object = entry;
4338 	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
4339 
4340 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
4341 			     &mmap_offset, sizeof(mmap_offset));
4342 	if (err)
4343 		return err;
4344 
4345 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
4346 			     &entry->page_idx, sizeof(entry->page_idx));
4347 	if (err)
4348 		return err;
4349 
4350 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
4351 			     &length, sizeof(length));
4352 	return err;
4353 }
4354 
4355 DECLARE_UVERBS_NAMED_METHOD(
4356 	MLX5_IB_METHOD_UAR_OBJ_ALLOC,
4357 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE,
4358 			MLX5_IB_OBJECT_UAR,
4359 			UVERBS_ACCESS_NEW,
4360 			UA_MANDATORY),
4361 	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
4362 			     enum mlx5_ib_uapi_uar_alloc_type,
4363 			     UA_MANDATORY),
4364 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
4365 			   UVERBS_ATTR_TYPE(u32),
4366 			   UA_MANDATORY),
4367 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
4368 			   UVERBS_ATTR_TYPE(u32),
4369 			   UA_MANDATORY),
4370 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
4371 			    UVERBS_ATTR_TYPE(u64),
4372 			    UA_MANDATORY));
4373 
4374 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
4375 	MLX5_IB_METHOD_UAR_OBJ_DESTROY,
4376 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE,
4377 			MLX5_IB_OBJECT_UAR,
4378 			UVERBS_ACCESS_DESTROY,
4379 			UA_MANDATORY));
4380 
4381 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
4382 			    UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
4383 			    &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
4384 			    &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
4385 
4386 ADD_UVERBS_ATTRIBUTES_SIMPLE(
4387 	mlx5_ib_query_context,
4388 	UVERBS_OBJECT_DEVICE,
4389 	UVERBS_METHOD_QUERY_CONTEXT,
4390 	UVERBS_ATTR_PTR_OUT(
4391 		MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
4392 		UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp,
4393 				   dump_fill_mkey),
4394 		UA_MANDATORY));
4395 
4396 ADD_UVERBS_ATTRIBUTES_SIMPLE(
4397 	mlx5_ib_reg_dmabuf_mr,
4398 	UVERBS_OBJECT_MR,
4399 	UVERBS_METHOD_REG_DMABUF_MR,
4400 	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
4401 			     enum mlx5_ib_uapi_reg_dmabuf_flags,
4402 			     UA_OPTIONAL));
4403 
4404 static const struct uapi_definition mlx5_ib_defs[] = {
4405 	UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
4406 	UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
4407 	UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
4408 	UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
4409 	UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
4410 	UAPI_DEF_CHAIN(mlx5_ib_create_cq_defs),
4411 
4412 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
4413 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR, &mlx5_ib_reg_dmabuf_mr),
4414 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
4415 				UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
4416 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
4417 	{}
4418 };
4419 
4420 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
4421 {
4422 	mlx5_ib_data_direct_cleanup(dev);
4423 	mlx5_ib_cleanup_multiport_master(dev);
4424 	WARN_ON(!xa_empty(&dev->odp_mkeys));
4425 	mutex_destroy(&dev->cap_mask_mutex);
4426 	WARN_ON(!xa_empty(&dev->sig_mrs));
4427 	WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
4428 	mlx5r_macsec_dealloc_gids(dev);
4429 }
4430 
4431 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
4432 {
4433 	struct mlx5_core_dev *mdev = dev->mdev;
4434 	int err, i;
4435 
4436 	dev->ib_dev.node_type = RDMA_NODE_IB_CA;
4437 	dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
4438 	dev->ib_dev.dev.parent = mdev->device;
4439 	dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
4440 
4441 	for (i = 0; i < dev->num_ports; i++) {
4442 		spin_lock_init(&dev->port[i].mp.mpi_lock);
4443 		dev->port[i].roce.dev = dev;
4444 		dev->port[i].roce.native_port_num = i + 1;
4445 		dev->port[i].roce.last_port_state = IB_PORT_DOWN;
4446 	}
4447 
4448 	err = mlx5r_cmd_query_special_mkeys(dev);
4449 	if (err)
4450 		return err;
4451 
4452 	err = mlx5r_macsec_init_gids_and_devlist(dev);
4453 	if (err)
4454 		return err;
4455 
4456 	err = mlx5_ib_init_multiport_master(dev);
4457 	if (err)
4458 		goto err;
4459 
4460 	err = set_has_smi_cap(dev);
4461 	if (err)
4462 		goto err_mp;
4463 
4464 	err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
4465 	if (err)
4466 		goto err_mp;
4467 
4468 	if (mlx5_use_mad_ifc(dev))
4469 		get_ext_port_caps(dev);
4470 
4471 	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_max(mdev);
4472 
4473 	mutex_init(&dev->cap_mask_mutex);
4474 	mutex_init(&dev->data_direct_lock);
4475 	INIT_LIST_HEAD(&dev->qp_list);
4476 	spin_lock_init(&dev->reset_flow_resource_lock);
4477 	xa_init(&dev->odp_mkeys);
4478 	xa_init(&dev->sig_mrs);
4479 	atomic_set(&dev->mkey_var, 0);
4480 
4481 	spin_lock_init(&dev->dm.lock);
4482 	dev->dm.dev = mdev;
4483 	err = mlx5_ib_data_direct_init(dev);
4484 	if (err)
4485 		goto err_mp;
4486 
4487 	err = pcim_p2pdma_init(mdev->pdev);
4488 	if (err && err != -EOPNOTSUPP)
4489 		goto err_dd;
4490 
4491 	return 0;
4492 err_dd:
4493 	mlx5_ib_data_direct_cleanup(dev);
4494 err_mp:
4495 	mlx5_ib_cleanup_multiport_master(dev);
4496 err:
4497 	mlx5r_macsec_dealloc_gids(dev);
4498 	return err;
4499 }
4500 
4501 static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
4502 					     enum rdma_nl_dev_type type,
4503 					     const char *name);
4504 static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev);
4505 
4506 static const struct ib_device_ops mlx5_ib_dev_ops = {
4507 	.owner = THIS_MODULE,
4508 	.driver_id = RDMA_DRIVER_MLX5,
4509 	.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION,
4510 
4511 	.add_gid = mlx5_ib_add_gid,
4512 	.add_sub_dev = mlx5_ib_add_sub_dev,
4513 	.alloc_mr = mlx5_ib_alloc_mr,
4514 	.alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
4515 	.alloc_pd = mlx5_ib_alloc_pd,
4516 	.alloc_ucontext = mlx5_ib_alloc_ucontext,
4517 	.attach_mcast = mlx5_ib_mcg_attach,
4518 	.check_mr_status = mlx5_ib_check_mr_status,
4519 	.create_ah = mlx5_ib_create_ah,
4520 	.create_cq = mlx5_ib_create_cq,
4521 	.create_qp = mlx5_ib_create_qp,
4522 	.create_srq = mlx5_ib_create_srq,
4523 	.create_user_ah = mlx5_ib_create_ah,
4524 	.dealloc_pd = mlx5_ib_dealloc_pd,
4525 	.dealloc_ucontext = mlx5_ib_dealloc_ucontext,
4526 	.del_gid = mlx5_ib_del_gid,
4527 	.del_sub_dev = mlx5_ib_del_sub_dev,
4528 	.dereg_mr = mlx5_ib_dereg_mr,
4529 	.destroy_ah = mlx5_ib_destroy_ah,
4530 	.destroy_cq = mlx5_ib_destroy_cq,
4531 	.destroy_qp = mlx5_ib_destroy_qp,
4532 	.destroy_srq = mlx5_ib_destroy_srq,
4533 	.detach_mcast = mlx5_ib_mcg_detach,
4534 	.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
4535 	.drain_rq = mlx5_ib_drain_rq,
4536 	.drain_sq = mlx5_ib_drain_sq,
4537 	.device_group = &mlx5_attr_group,
4538 	.get_dev_fw_str = get_dev_fw_str,
4539 	.get_dma_mr = mlx5_ib_get_dma_mr,
4540 	.get_link_layer = mlx5_ib_port_link_layer,
4541 	.map_mr_sg = mlx5_ib_map_mr_sg,
4542 	.map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
4543 	.mmap = mlx5_ib_mmap,
4544 	.mmap_free = mlx5_ib_mmap_free,
4545 	.mmap_get_pfns = mlx5_ib_mmap_get_pfns,
4546 	.modify_cq = mlx5_ib_modify_cq,
4547 	.modify_device = mlx5_ib_modify_device,
4548 	.modify_port = mlx5_ib_modify_port,
4549 	.modify_qp = mlx5_ib_modify_qp,
4550 	.modify_srq = mlx5_ib_modify_srq,
4551 	.pgoff_to_mmap_entry = mlx5_ib_pgoff_to_mmap_entry,
4552 	.pre_destroy_cq = mlx5_ib_pre_destroy_cq,
4553 	.poll_cq = mlx5_ib_poll_cq,
4554 	.post_destroy_cq = mlx5_ib_post_destroy_cq,
4555 	.post_recv = mlx5_ib_post_recv_nodrain,
4556 	.post_send = mlx5_ib_post_send_nodrain,
4557 	.post_srq_recv = mlx5_ib_post_srq_recv,
4558 	.process_mad = mlx5_ib_process_mad,
4559 	.query_ah = mlx5_ib_query_ah,
4560 	.query_device = mlx5_ib_query_device,
4561 	.query_gid = mlx5_ib_query_gid,
4562 	.query_pkey = mlx5_ib_query_pkey,
4563 	.query_port_speed = mlx5_ib_query_port_speed,
4564 	.query_qp = mlx5_ib_query_qp,
4565 	.query_srq = mlx5_ib_query_srq,
4566 	.query_ucontext = mlx5_ib_query_ucontext,
4567 	.reg_user_mr = mlx5_ib_reg_user_mr,
4568 	.reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
4569 	.req_notify_cq = mlx5_ib_arm_cq,
4570 	.rereg_user_mr = mlx5_ib_rereg_user_mr,
4571 	.resize_cq = mlx5_ib_resize_cq,
4572 	.ufile_hw_cleanup = mlx5_ib_ufile_hw_cleanup,
4573 
4574 	INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
4575 	INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
4576 	INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
4577 	INIT_RDMA_OBJ_SIZE(ib_dmah, mlx5_ib_dmah, ibdmah),
4578 	INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
4579 	INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
4580 	INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
4581 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
4582 };
4583 
4584 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
4585 	.rdma_netdev_get_params = mlx5_ib_rn_get_params,
4586 };
4587 
4588 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
4589 	.get_vf_config = mlx5_ib_get_vf_config,
4590 	.get_vf_guid = mlx5_ib_get_vf_guid,
4591 	.get_vf_stats = mlx5_ib_get_vf_stats,
4592 	.set_vf_guid = mlx5_ib_set_vf_guid,
4593 	.set_vf_link_state = mlx5_ib_set_vf_link_state,
4594 };
4595 
4596 static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
4597 	.alloc_mw = mlx5_ib_alloc_mw,
4598 	.dealloc_mw = mlx5_ib_dealloc_mw,
4599 
4600 	INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
4601 };
4602 
4603 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
4604 	.alloc_xrcd = mlx5_ib_alloc_xrcd,
4605 	.dealloc_xrcd = mlx5_ib_dealloc_xrcd,
4606 
4607 	INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
4608 };
4609 
4610 static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
4611 {
4612 	struct mlx5_core_dev *mdev = dev->mdev;
4613 	struct mlx5_var_table *var_table = &dev->var_table;
4614 	u8 log_doorbell_bar_size;
4615 	u8 log_doorbell_stride;
4616 	u64 bar_size;
4617 
4618 	log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
4619 					log_doorbell_bar_size);
4620 	log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
4621 					log_doorbell_stride);
4622 	var_table->hw_start_addr = dev->mdev->bar_addr +
4623 				MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
4624 					doorbell_bar_offset);
4625 	bar_size = (1ULL << log_doorbell_bar_size) * 4096;
4626 	var_table->stride_size = 1ULL << log_doorbell_stride;
4627 	var_table->num_var_hw_entries = div_u64(bar_size,
4628 						var_table->stride_size);
4629 	mutex_init(&var_table->bitmap_lock);
4630 	var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
4631 					  GFP_KERNEL);
4632 	return (var_table->bitmap) ? 0 : -ENOMEM;
4633 }
4634 
4635 static void mlx5_ib_cleanup_ucaps(struct mlx5_ib_dev *dev)
4636 {
4637 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
4638 		ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
4639 
4640 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
4641 	    MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA)
4642 		ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
4643 }
4644 
4645 static int mlx5_ib_init_ucaps(struct mlx5_ib_dev *dev)
4646 {
4647 	int ret;
4648 
4649 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL) {
4650 		ret = ib_create_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
4651 		if (ret)
4652 			return ret;
4653 	}
4654 
4655 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
4656 	    MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA) {
4657 		ret = ib_create_ucap(RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
4658 		if (ret)
4659 			goto remove_local;
4660 	}
4661 
4662 	return 0;
4663 
4664 remove_local:
4665 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
4666 		ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
4667 	return ret;
4668 }
4669 
4670 static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
4671 {
4672 	if (MLX5_CAP_GEN_2_64(dev->mdev, general_obj_types_127_64) &
4673 	    MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL)
4674 		mlx5_ib_cleanup_ucaps(dev);
4675 
4676 	bitmap_free(dev->var_table.bitmap);
4677 }
4678 
4679 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
4680 {
4681 	struct mlx5_core_dev *mdev = dev->mdev;
4682 	int err;
4683 
4684 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
4685 	    IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
4686 		ib_set_device_ops(&dev->ib_dev,
4687 				  &mlx5_ib_dev_ipoib_enhanced_ops);
4688 
4689 	if (mlx5_core_is_pf(mdev))
4690 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
4691 
4692 	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
4693 
4694 	if (MLX5_CAP_GEN(mdev, imaicl))
4695 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
4696 
4697 	if (MLX5_CAP_GEN(mdev, xrc))
4698 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
4699 
4700 	if (MLX5_CAP_DEV_MEM(mdev, memic) ||
4701 	    MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
4702 	    MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
4703 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
4704 
4705 	if (mdev->st)
4706 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dmah_ops);
4707 
4708 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
4709 
4710 	if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
4711 		dev->ib_dev.driver_def = mlx5_ib_defs;
4712 
4713 	err = init_node_data(dev);
4714 	if (err)
4715 		return err;
4716 
4717 	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4718 	    (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
4719 	     MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
4720 		mutex_init(&dev->lb.mutex);
4721 
4722 	if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
4723 			MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
4724 		err = mlx5_ib_init_var_table(dev);
4725 		if (err)
4726 			return err;
4727 	}
4728 
4729 	if (MLX5_CAP_GEN_2_64(dev->mdev, general_obj_types_127_64) &
4730 	    MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL) {
4731 		err = mlx5_ib_init_ucaps(dev);
4732 		if (err)
4733 			goto err_ucaps;
4734 	}
4735 
4736 	dev->ib_dev.use_cq_dim = true;
4737 
4738 	return 0;
4739 
4740 err_ucaps:
4741 	bitmap_free(dev->var_table.bitmap);
4742 	return err;
4743 }
4744 
4745 static const struct ib_device_ops mlx5_ib_dev_port_ops = {
4746 	.get_port_immutable = mlx5_port_immutable,
4747 	.query_port = mlx5_ib_query_port,
4748 };
4749 
4750 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
4751 {
4752 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
4753 	return 0;
4754 }
4755 
4756 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
4757 	.get_port_immutable = mlx5_port_rep_immutable,
4758 	.query_port = mlx5_ib_rep_query_port,
4759 	.query_pkey = mlx5_ib_rep_query_pkey,
4760 };
4761 
4762 static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
4763 {
4764 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
4765 	return 0;
4766 }
4767 
4768 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
4769 	.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
4770 	.create_wq = mlx5_ib_create_wq,
4771 	.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
4772 	.destroy_wq = mlx5_ib_destroy_wq,
4773 	.modify_wq = mlx5_ib_modify_wq,
4774 
4775 	INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
4776 			   ib_rwq_ind_tbl),
4777 };
4778 
4779 static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
4780 {
4781 	struct mlx5_core_dev *mdev = dev->mdev;
4782 	enum rdma_link_layer ll;
4783 	int port_type_cap;
4784 	u32 port_num = 0;
4785 	int err;
4786 
4787 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4788 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4789 
4790 	if (ll == IB_LINK_LAYER_ETHERNET) {
4791 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
4792 
4793 		port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4794 
4795 		/* Register only for native ports */
4796 		mlx5_mdev_netdev_track(dev, port_num);
4797 
4798 		err = mlx5_enable_eth(dev);
4799 		if (err)
4800 			goto cleanup;
4801 	}
4802 
4803 	return 0;
4804 cleanup:
4805 	mlx5_mdev_netdev_untrack(dev, port_num);
4806 	return err;
4807 }
4808 
4809 static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
4810 {
4811 	struct mlx5_core_dev *mdev = dev->mdev;
4812 	enum rdma_link_layer ll;
4813 	int port_type_cap;
4814 	u32 port_num;
4815 
4816 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4817 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4818 
4819 	if (ll == IB_LINK_LAYER_ETHERNET) {
4820 		mlx5_disable_eth(dev);
4821 
4822 		port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4823 		mlx5_mdev_netdev_untrack(dev, port_num);
4824 	}
4825 }
4826 
4827 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
4828 {
4829 	mlx5_ib_init_cong_debugfs(dev,
4830 				  mlx5_core_native_port_num(dev->mdev) - 1);
4831 	return 0;
4832 }
4833 
4834 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
4835 {
4836 	mlx5_ib_cleanup_cong_debugfs(dev,
4837 				     mlx5_core_native_port_num(dev->mdev) - 1);
4838 }
4839 
4840 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
4841 {
4842 	int err;
4843 
4844 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4845 	if (err)
4846 		return err;
4847 
4848 	err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4849 	if (err)
4850 		mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4851 
4852 	return err;
4853 }
4854 
4855 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
4856 {
4857 	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4858 	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4859 }
4860 
4861 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4862 {
4863 	const char *name;
4864 
4865 	if (dev->sub_dev_name) {
4866 		name = dev->sub_dev_name;
4867 		ib_mark_name_assigned_by_user(&dev->ib_dev);
4868 	} else if (!mlx5_lag_is_active(dev->mdev))
4869 		name = "mlx5_%d";
4870 	else
4871 		name = "mlx5_bond_%d";
4872 	return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
4873 }
4874 
4875 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4876 {
4877 	mlx5_mkey_cache_cleanup(dev);
4878 	mlx5r_umr_resource_cleanup(dev);
4879 	mlx5r_umr_cleanup(dev);
4880 }
4881 
4882 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4883 {
4884 	ib_unregister_device(&dev->ib_dev);
4885 }
4886 
4887 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
4888 {
4889 	int ret;
4890 
4891 	ret = mlx5r_umr_init(dev);
4892 	if (ret)
4893 		return ret;
4894 
4895 	ret = mlx5_mkey_cache_init(dev);
4896 	if (ret)
4897 		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4898 	return ret;
4899 }
4900 
4901 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
4902 {
4903 	struct dentry *root;
4904 
4905 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4906 		return 0;
4907 
4908 	mutex_init(&dev->delay_drop.lock);
4909 	dev->delay_drop.dev = dev;
4910 	dev->delay_drop.activate = false;
4911 	dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
4912 	INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
4913 	atomic_set(&dev->delay_drop.rqs_cnt, 0);
4914 	atomic_set(&dev->delay_drop.events_cnt, 0);
4915 
4916 	if (!mlx5_debugfs_root)
4917 		return 0;
4918 
4919 	root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
4920 	dev->delay_drop.dir_debugfs = root;
4921 
4922 	debugfs_create_atomic_t("num_timeout_events", 0400, root,
4923 				&dev->delay_drop.events_cnt);
4924 	debugfs_create_atomic_t("num_rqs", 0400, root,
4925 				&dev->delay_drop.rqs_cnt);
4926 	debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
4927 			    &fops_delay_drop_timeout);
4928 	return 0;
4929 }
4930 
4931 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
4932 {
4933 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4934 		return;
4935 
4936 	cancel_work_sync(&dev->delay_drop.delay_drop_work);
4937 	if (!dev->delay_drop.dir_debugfs)
4938 		return;
4939 
4940 	debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
4941 	dev->delay_drop.dir_debugfs = NULL;
4942 }
4943 
4944 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
4945 {
4946 	struct mlx5_ib_resources *devr = &dev->devr;
4947 	int port;
4948 
4949 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
4950 		INIT_WORK(&devr->ports[port].pkey_change_work,
4951 			  pkey_change_handler);
4952 
4953 	dev->mdev_events.notifier_call = mlx5_ib_event;
4954 	mlx5_notifier_register(dev->mdev, &dev->mdev_events);
4955 
4956 	mlx5r_macsec_event_register(dev);
4957 
4958 	return 0;
4959 }
4960 
4961 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
4962 {
4963 	struct mlx5_ib_resources *devr = &dev->devr;
4964 	int port;
4965 
4966 	mlx5r_macsec_event_unregister(dev);
4967 	mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
4968 
4969 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
4970 		cancel_work_sync(&devr->ports[port].pkey_change_work);
4971 }
4972 
4973 void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
4974 			      struct mlx5_data_direct_dev *dev)
4975 {
4976 	mutex_lock(&ibdev->data_direct_lock);
4977 	ibdev->data_direct_dev = dev;
4978 	mutex_unlock(&ibdev->data_direct_lock);
4979 }
4980 
4981 void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev)
4982 {
4983 	mutex_lock(&ibdev->data_direct_lock);
4984 	mlx5_ib_revoke_data_direct_mrs(ibdev);
4985 	ibdev->data_direct_dev = NULL;
4986 	mutex_unlock(&ibdev->data_direct_lock);
4987 }
4988 
4989 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
4990 		      const struct mlx5_ib_profile *profile,
4991 		      int stage)
4992 {
4993 	dev->ib_active = false;
4994 
4995 	/* Number of stages to cleanup */
4996 	while (stage) {
4997 		stage--;
4998 		if (profile->stage[stage].cleanup)
4999 			profile->stage[stage].cleanup(dev);
5000 	}
5001 
5002 	kfree(dev->port);
5003 	ib_dealloc_device(&dev->ib_dev);
5004 }
5005 
5006 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
5007 		  const struct mlx5_ib_profile *profile)
5008 {
5009 	int err;
5010 	int i;
5011 
5012 	dev->profile = profile;
5013 
5014 	for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
5015 		if (profile->stage[i].init) {
5016 			err = profile->stage[i].init(dev);
5017 			if (err)
5018 				goto err_out;
5019 		}
5020 	}
5021 
5022 	dev->ib_active = true;
5023 	return 0;
5024 
5025 err_out:
5026 	/* Clean up stages which were initialized */
5027 	while (i) {
5028 		i--;
5029 		if (profile->stage[i].cleanup)
5030 			profile->stage[i].cleanup(dev);
5031 	}
5032 	return -ENOMEM;
5033 }
5034 
5035 static const struct mlx5_ib_profile pf_profile = {
5036 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
5037 		     mlx5_ib_stage_init_init,
5038 		     mlx5_ib_stage_init_cleanup),
5039 	STAGE_CREATE(MLX5_IB_STAGE_FS,
5040 		     mlx5_ib_fs_init,
5041 		     mlx5_ib_fs_cleanup),
5042 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5043 		     mlx5_ib_stage_caps_init,
5044 		     mlx5_ib_stage_caps_cleanup),
5045 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5046 		     mlx5_ib_stage_non_default_cb,
5047 		     NULL),
5048 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
5049 		     mlx5_ib_roce_init,
5050 		     mlx5_ib_roce_cleanup),
5051 	STAGE_CREATE(MLX5_IB_STAGE_QP,
5052 		     mlx5_init_qp_table,
5053 		     mlx5_cleanup_qp_table),
5054 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
5055 		     mlx5_init_srq_table,
5056 		     mlx5_cleanup_srq_table),
5057 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5058 		     mlx5_ib_dev_res_init,
5059 		     mlx5_ib_dev_res_cleanup),
5060 	STAGE_CREATE(MLX5_IB_STAGE_ODP,
5061 		     mlx5_ib_odp_init_one,
5062 		     mlx5_ib_odp_cleanup_one),
5063 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
5064 		     mlx5_ib_counters_init,
5065 		     mlx5_ib_counters_cleanup),
5066 	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
5067 		     mlx5_ib_stage_cong_debugfs_init,
5068 		     mlx5_ib_stage_cong_debugfs_cleanup),
5069 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5070 		     mlx5_ib_stage_bfrag_init,
5071 		     mlx5_ib_stage_bfrag_cleanup),
5072 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5073 		     NULL,
5074 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5075 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
5076 		     mlx5_ib_devx_init,
5077 		     mlx5_ib_devx_cleanup),
5078 	STAGE_CREATE(MLX5_IB_STAGE_SYS_ERROR_NOTIFIER,
5079 		     mlx5_ib_stage_sys_error_notifier_init,
5080 		     mlx5_ib_stage_sys_error_notifier_cleanup),
5081 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5082 		     mlx5_ib_stage_ib_reg_init,
5083 		     mlx5_ib_stage_ib_reg_cleanup),
5084 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
5085 		     mlx5_ib_stage_dev_notifier_init,
5086 		     mlx5_ib_stage_dev_notifier_cleanup),
5087 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5088 		     mlx5_ib_stage_post_ib_reg_umr_init,
5089 		     NULL),
5090 	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5091 		     mlx5_ib_stage_delay_drop_init,
5092 		     mlx5_ib_stage_delay_drop_cleanup),
5093 	STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
5094 		     mlx5_ib_restrack_init,
5095 		     NULL),
5096 };
5097 
5098 const struct mlx5_ib_profile raw_eth_profile = {
5099 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
5100 		     mlx5_ib_stage_init_init,
5101 		     mlx5_ib_stage_init_cleanup),
5102 	STAGE_CREATE(MLX5_IB_STAGE_FS,
5103 		     mlx5_ib_fs_init,
5104 		     mlx5_ib_fs_cleanup),
5105 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5106 		     mlx5_ib_stage_caps_init,
5107 		     mlx5_ib_stage_caps_cleanup),
5108 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5109 		     mlx5_ib_stage_raw_eth_non_default_cb,
5110 		     NULL),
5111 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
5112 		     mlx5_ib_roce_init,
5113 		     mlx5_ib_roce_cleanup),
5114 	STAGE_CREATE(MLX5_IB_STAGE_QP,
5115 		     mlx5_init_qp_table,
5116 		     mlx5_cleanup_qp_table),
5117 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
5118 		     mlx5_init_srq_table,
5119 		     mlx5_cleanup_srq_table),
5120 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5121 		     mlx5_ib_dev_res_init,
5122 		     mlx5_ib_dev_res_cleanup),
5123 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
5124 		     mlx5_ib_counters_init,
5125 		     mlx5_ib_counters_cleanup),
5126 	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
5127 		     mlx5_ib_stage_cong_debugfs_init,
5128 		     mlx5_ib_stage_cong_debugfs_cleanup),
5129 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5130 		     mlx5_ib_stage_bfrag_init,
5131 		     mlx5_ib_stage_bfrag_cleanup),
5132 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5133 		     NULL,
5134 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5135 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
5136 		     mlx5_ib_devx_init,
5137 		     mlx5_ib_devx_cleanup),
5138 	STAGE_CREATE(MLX5_IB_STAGE_SYS_ERROR_NOTIFIER,
5139 		     mlx5_ib_stage_sys_error_notifier_init,
5140 		     mlx5_ib_stage_sys_error_notifier_cleanup),
5141 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5142 		     mlx5_ib_stage_ib_reg_init,
5143 		     mlx5_ib_stage_ib_reg_cleanup),
5144 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
5145 		     mlx5_ib_stage_dev_notifier_init,
5146 		     mlx5_ib_stage_dev_notifier_cleanup),
5147 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5148 		     mlx5_ib_stage_post_ib_reg_umr_init,
5149 		     NULL),
5150 	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5151 		     mlx5_ib_stage_delay_drop_init,
5152 		     mlx5_ib_stage_delay_drop_cleanup),
5153 	STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
5154 		     mlx5_ib_restrack_init,
5155 		     NULL),
5156 };
5157 
5158 static const struct mlx5_ib_profile plane_profile = {
5159 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
5160 		     mlx5_ib_stage_init_init,
5161 		     mlx5_ib_stage_init_cleanup),
5162 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5163 		     mlx5_ib_stage_caps_init,
5164 		     mlx5_ib_stage_caps_cleanup),
5165 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5166 		     mlx5_ib_stage_non_default_cb,
5167 		     NULL),
5168 	STAGE_CREATE(MLX5_IB_STAGE_QP,
5169 		     mlx5_init_qp_table,
5170 		     mlx5_cleanup_qp_table),
5171 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
5172 		     mlx5_init_srq_table,
5173 		     mlx5_cleanup_srq_table),
5174 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5175 		     mlx5_ib_dev_res_init,
5176 		     mlx5_ib_dev_res_cleanup),
5177 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5178 		     mlx5_ib_stage_bfrag_init,
5179 		     mlx5_ib_stage_bfrag_cleanup),
5180 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5181 		     mlx5_ib_stage_ib_reg_init,
5182 		     mlx5_ib_stage_ib_reg_cleanup),
5183 };
5184 
5185 static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
5186 					     enum rdma_nl_dev_type type,
5187 					     const char *name)
5188 {
5189 	struct mlx5_ib_dev *mparent = to_mdev(parent), *mplane;
5190 	enum rdma_link_layer ll;
5191 	int ret;
5192 
5193 	if (mparent->smi_dev)
5194 		return ERR_PTR(-EEXIST);
5195 
5196 	ll = mlx5_port_type_cap_to_rdma_ll(MLX5_CAP_GEN(mparent->mdev,
5197 							port_type));
5198 	if (type != RDMA_DEVICE_TYPE_SMI || !mparent->num_plane ||
5199 	    ll != IB_LINK_LAYER_INFINIBAND ||
5200 	    !MLX5_CAP_GEN_2(mparent->mdev, multiplane_qp_ud))
5201 		return ERR_PTR(-EOPNOTSUPP);
5202 
5203 	mplane = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
5204 					  mlx5_core_net(mparent->mdev));
5205 	if (!mplane)
5206 		return ERR_PTR(-ENOMEM);
5207 
5208 	mplane->port = kzalloc_objs(*mplane->port,
5209 				    mparent->num_plane * mparent->num_ports);
5210 	if (!mplane->port) {
5211 		ret = -ENOMEM;
5212 		goto fail_kcalloc;
5213 	}
5214 
5215 	mplane->ib_dev.type = type;
5216 	mplane->mdev = mparent->mdev;
5217 	mplane->num_ports = mparent->num_plane;
5218 	mplane->sub_dev_name = name;
5219 	mplane->ib_dev.phys_port_cnt = mplane->num_ports;
5220 
5221 	ret = __mlx5_ib_add(mplane, &plane_profile);
5222 	if (ret)
5223 		goto fail_ib_add;
5224 
5225 	mparent->smi_dev = mplane;
5226 	return &mplane->ib_dev;
5227 
5228 fail_ib_add:
5229 	kfree(mplane->port);
5230 fail_kcalloc:
5231 	ib_dealloc_device(&mplane->ib_dev);
5232 	return ERR_PTR(ret);
5233 }
5234 
5235 static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev)
5236 {
5237 	struct mlx5_ib_dev *mdev = to_mdev(sub_dev);
5238 
5239 	to_mdev(sub_dev->parent)->smi_dev = NULL;
5240 	__mlx5_ib_remove(mdev, mdev->profile, MLX5_IB_STAGE_MAX);
5241 }
5242 
5243 static int mlx5r_mp_probe(struct auxiliary_device *adev,
5244 			  const struct auxiliary_device_id *id)
5245 {
5246 	struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
5247 	struct mlx5_core_dev *mdev = idev->mdev;
5248 	struct mlx5_ib_multiport_info *mpi;
5249 	struct mlx5_ib_dev *dev;
5250 	bool bound = false;
5251 	int err;
5252 
5253 	mpi = kzalloc_obj(*mpi);
5254 	if (!mpi)
5255 		return -ENOMEM;
5256 
5257 	mpi->mdev = mdev;
5258 	err = mlx5_query_nic_vport_system_image_guid(mdev,
5259 						     &mpi->sys_image_guid);
5260 	if (err) {
5261 		kfree(mpi);
5262 		return err;
5263 	}
5264 
5265 	mutex_lock(&mlx5_ib_multiport_mutex);
5266 	list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
5267 		if (dev->sys_image_guid == mpi->sys_image_guid &&
5268 		    mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
5269 			bound = mlx5_ib_bind_slave_port(dev, mpi);
5270 
5271 		if (bound) {
5272 			rdma_roce_rescan_device(&dev->ib_dev);
5273 			mpi->ibdev->ib_active = true;
5274 			break;
5275 		}
5276 	}
5277 
5278 	if (!bound) {
5279 		list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5280 		dev_dbg(mdev->device,
5281 			"no suitable IB device found to bind to, added to unaffiliated list.\n");
5282 	}
5283 	mutex_unlock(&mlx5_ib_multiport_mutex);
5284 
5285 	auxiliary_set_drvdata(adev, mpi);
5286 	return 0;
5287 }
5288 
5289 static void mlx5r_mp_remove(struct auxiliary_device *adev)
5290 {
5291 	struct mlx5_ib_multiport_info *mpi;
5292 
5293 	mpi = auxiliary_get_drvdata(adev);
5294 	mutex_lock(&mlx5_ib_multiport_mutex);
5295 	if (mpi->ibdev)
5296 		mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
5297 	else
5298 		list_del(&mpi->list);
5299 	mutex_unlock(&mlx5_ib_multiport_mutex);
5300 	kfree(mpi);
5301 }
5302 
5303 static int mlx5r_probe(struct auxiliary_device *adev,
5304 		       const struct auxiliary_device_id *id)
5305 {
5306 	struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
5307 	struct mlx5_core_dev *mdev = idev->mdev;
5308 	const struct mlx5_ib_profile *profile;
5309 	int port_type_cap, num_ports, ret;
5310 	enum rdma_link_layer ll;
5311 	struct mlx5_ib_dev *dev;
5312 
5313 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5314 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5315 
5316 	num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
5317 			MLX5_CAP_GEN(mdev, num_vhca_ports));
5318 	dev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
5319 				       mlx5_core_net(mdev));
5320 	if (!dev)
5321 		return -ENOMEM;
5322 
5323 	if (ll == IB_LINK_LAYER_INFINIBAND) {
5324 		ret = mlx5_ib_get_plane_num(mdev, &dev->num_plane);
5325 		if (ret)
5326 			goto fail;
5327 	}
5328 
5329 	dev->port = kzalloc_objs(*dev->port, num_ports);
5330 	if (!dev->port) {
5331 		ret = -ENOMEM;
5332 		goto fail;
5333 	}
5334 
5335 	dev->mdev = mdev;
5336 	dev->num_ports = num_ports;
5337 	dev->ib_dev.phys_port_cnt = num_ports;
5338 
5339 	if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
5340 		profile = &raw_eth_profile;
5341 	else
5342 		profile = &pf_profile;
5343 
5344 	ret = __mlx5_ib_add(dev, profile);
5345 	if (ret)
5346 		goto fail_ib_add;
5347 
5348 	auxiliary_set_drvdata(adev, dev);
5349 	return 0;
5350 
5351 fail_ib_add:
5352 	kfree(dev->port);
5353 fail:
5354 	ib_dealloc_device(&dev->ib_dev);
5355 	return ret;
5356 }
5357 
5358 static void mlx5r_remove(struct auxiliary_device *adev)
5359 {
5360 	struct mlx5_ib_dev *dev;
5361 
5362 	dev = auxiliary_get_drvdata(adev);
5363 	__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
5364 }
5365 
5366 static const struct auxiliary_device_id mlx5r_mp_id_table[] = {
5367 	{ .name = MLX5_ADEV_NAME ".multiport", },
5368 	{},
5369 };
5370 
5371 static const struct auxiliary_device_id mlx5r_id_table[] = {
5372 	{ .name = MLX5_ADEV_NAME ".rdma", },
5373 	{},
5374 };
5375 
5376 MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table);
5377 MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table);
5378 
5379 static struct auxiliary_driver mlx5r_mp_driver = {
5380 	.name = "multiport",
5381 	.probe = mlx5r_mp_probe,
5382 	.remove = mlx5r_mp_remove,
5383 	.id_table = mlx5r_mp_id_table,
5384 };
5385 
5386 static struct auxiliary_driver mlx5r_driver = {
5387 	.name = "rdma",
5388 	.probe = mlx5r_probe,
5389 	.remove = mlx5r_remove,
5390 	.id_table = mlx5r_id_table,
5391 };
5392 
5393 static int __init mlx5_ib_init(void)
5394 {
5395 	int ret;
5396 
5397 	xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL);
5398 	if (!xlt_emergency_page)
5399 		return -ENOMEM;
5400 
5401 	mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
5402 	if (!mlx5_ib_event_wq) {
5403 		free_page((unsigned long)xlt_emergency_page);
5404 		return -ENOMEM;
5405 	}
5406 
5407 	ret = mlx5_ib_qp_event_init();
5408 	if (ret)
5409 		goto qp_event_err;
5410 
5411 	mlx5_ib_odp_init();
5412 	ret = mlx5r_rep_init();
5413 	if (ret)
5414 		goto rep_err;
5415 	ret = mlx5_data_direct_driver_register();
5416 	if (ret)
5417 		goto dd_err;
5418 	ret = auxiliary_driver_register(&mlx5r_mp_driver);
5419 	if (ret)
5420 		goto mp_err;
5421 	ret = auxiliary_driver_register(&mlx5r_driver);
5422 	if (ret)
5423 		goto drv_err;
5424 
5425 	return 0;
5426 
5427 drv_err:
5428 	auxiliary_driver_unregister(&mlx5r_mp_driver);
5429 mp_err:
5430 	mlx5_data_direct_driver_unregister();
5431 dd_err:
5432 	mlx5r_rep_cleanup();
5433 rep_err:
5434 	mlx5_ib_qp_event_cleanup();
5435 qp_event_err:
5436 	destroy_workqueue(mlx5_ib_event_wq);
5437 	free_page((unsigned long)xlt_emergency_page);
5438 	return ret;
5439 }
5440 
5441 static void __exit mlx5_ib_cleanup(void)
5442 {
5443 	mlx5_data_direct_driver_unregister();
5444 	auxiliary_driver_unregister(&mlx5r_driver);
5445 	auxiliary_driver_unregister(&mlx5r_mp_driver);
5446 	mlx5r_rep_cleanup();
5447 
5448 	mlx5_ib_qp_event_cleanup();
5449 	destroy_workqueue(mlx5_ib_event_wq);
5450 	free_page((unsigned long)xlt_emergency_page);
5451 }
5452 
5453 module_init(mlx5_ib_init);
5454 module_exit(mlx5_ib_cleanup);
5455