xref: /linux/drivers/infiniband/hw/mlx5/main.c (revision 4b0b946019e7376752456380b67e54eea2f10a7c)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4  * Copyright (c) 2020, Intel Corporation. All rights reserved.
5  */
6 
7 #include <linux/debugfs.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/pci.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/bitmap.h>
16 #include <linux/log2.h>
17 #include <linux/sched.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task.h>
20 #include <linux/delay.h>
21 #include <rdma/ib_user_verbs.h>
22 #include <rdma/ib_addr.h>
23 #include <rdma/ib_cache.h>
24 #include <linux/mlx5/port.h>
25 #include <linux/mlx5/vport.h>
26 #include <linux/mlx5/fs.h>
27 #include <linux/mlx5/eswitch.h>
28 #include <linux/mlx5/driver.h>
29 #include <linux/mlx5/lag.h>
30 #include <linux/list.h>
31 #include <rdma/ib_smi.h>
32 #include <rdma/ib_umem_odp.h>
33 #include <rdma/lag.h>
34 #include <linux/in.h>
35 #include <linux/etherdevice.h>
36 #include "mlx5_ib.h"
37 #include "ib_rep.h"
38 #include "cmd.h"
39 #include "devx.h"
40 #include "dm.h"
41 #include "fs.h"
42 #include "srq.h"
43 #include "qp.h"
44 #include "wr.h"
45 #include "restrack.h"
46 #include "counters.h"
47 #include "umr.h"
48 #include <rdma/uverbs_std_types.h>
49 #include <rdma/uverbs_ioctl.h>
50 #include <rdma/mlx5_user_ioctl_verbs.h>
51 #include <rdma/mlx5_user_ioctl_cmds.h>
52 #include <rdma/ib_ucaps.h>
53 #include "macsec.h"
54 #include "data_direct.h"
55 #include "dmah.h"
56 
57 #define UVERBS_MODULE_NAME mlx5_ib
58 #include <rdma/uverbs_named_ioctl.h>
59 
60 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
61 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63 
64 struct mlx5_ib_event_work {
65 	struct work_struct	work;
66 	union {
67 		struct mlx5_ib_dev	      *dev;
68 		struct mlx5_ib_multiport_info *mpi;
69 	};
70 	bool			is_slave;
71 	unsigned int		event;
72 	void			*param;
73 };
74 
75 enum {
76 	MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
77 };
78 
79 static struct workqueue_struct *mlx5_ib_event_wq;
80 static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
81 static LIST_HEAD(mlx5_ib_dev_list);
82 /*
83  * This mutex should be held when accessing either of the above lists
84  */
85 static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
86 
87 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
88 {
89 	struct mlx5_ib_dev *dev;
90 
91 	mutex_lock(&mlx5_ib_multiport_mutex);
92 	dev = mpi->ibdev;
93 	mutex_unlock(&mlx5_ib_multiport_mutex);
94 	return dev;
95 }
96 
97 static enum rdma_link_layer
98 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
99 {
100 	switch (port_type_cap) {
101 	case MLX5_CAP_PORT_TYPE_IB:
102 		return IB_LINK_LAYER_INFINIBAND;
103 	case MLX5_CAP_PORT_TYPE_ETH:
104 		return IB_LINK_LAYER_ETHERNET;
105 	default:
106 		return IB_LINK_LAYER_UNSPECIFIED;
107 	}
108 }
109 
110 static enum rdma_link_layer
111 mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
112 {
113 	struct mlx5_ib_dev *dev = to_mdev(device);
114 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
115 
116 	return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
117 }
118 
119 static int get_port_state(struct ib_device *ibdev,
120 			  u32 port_num,
121 			  enum ib_port_state *state)
122 {
123 	struct ib_port_attr attr;
124 	int ret;
125 
126 	memset(&attr, 0, sizeof(attr));
127 	ret = ibdev->ops.query_port(ibdev, port_num, &attr);
128 	if (!ret)
129 		*state = attr.state;
130 	return ret;
131 }
132 
133 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
134 					   struct net_device *ndev,
135 					   struct net_device *upper,
136 					   u32 *port_num)
137 {
138 	struct net_device *rep_ndev;
139 	struct mlx5_ib_port *port;
140 	int i;
141 
142 	for (i = 0; i < dev->num_ports; i++) {
143 		port  = &dev->port[i];
144 		if (!port->rep)
145 			continue;
146 
147 		if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) {
148 			*port_num = i + 1;
149 			return &port->roce;
150 		}
151 
152 		if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
153 			continue;
154 		rep_ndev = ib_device_get_netdev(&dev->ib_dev, i + 1);
155 		if (rep_ndev && rep_ndev == ndev) {
156 			dev_put(rep_ndev);
157 			*port_num = i + 1;
158 			return &port->roce;
159 		}
160 
161 		dev_put(rep_ndev);
162 	}
163 
164 	return NULL;
165 }
166 
167 static bool mlx5_netdev_send_event(struct mlx5_ib_dev *dev,
168 				   struct net_device *ndev,
169 				   struct net_device *upper,
170 				   struct net_device *ib_ndev)
171 {
172 	if (!dev->ib_active)
173 		return false;
174 
175 	/* Event is about our upper device */
176 	if (upper == ndev)
177 		return true;
178 
179 	/* RDMA device is not in lag and not in switchdev */
180 	if (!dev->is_rep && !upper && ndev == ib_ndev)
181 		return true;
182 
183 	/* RDMA devie is in switchdev */
184 	if (dev->is_rep && ndev == ib_ndev)
185 		return true;
186 
187 	return false;
188 }
189 
190 static struct net_device *mlx5_ib_get_rep_uplink_netdev(struct mlx5_ib_dev *ibdev)
191 {
192 	struct mlx5_ib_port *port;
193 	int i;
194 
195 	for (i = 0; i < ibdev->num_ports; i++) {
196 		port = &ibdev->port[i];
197 		if (port->rep && port->rep->vport == MLX5_VPORT_UPLINK) {
198 			return ib_device_get_netdev(&ibdev->ib_dev, i + 1);
199 		}
200 	}
201 
202 	return NULL;
203 }
204 
205 static int mlx5_netdev_event(struct notifier_block *this,
206 			     unsigned long event, void *ptr)
207 {
208 	struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
209 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
210 	u32 port_num = roce->native_port_num;
211 	struct net_device *ib_ndev = NULL;
212 	struct mlx5_core_dev *mdev;
213 	struct mlx5_ib_dev *ibdev;
214 
215 	ibdev = roce->dev;
216 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
217 	if (!mdev)
218 		return NOTIFY_DONE;
219 
220 	switch (event) {
221 	case NETDEV_REGISTER:
222 		/* Should already be registered during the load */
223 		if (ibdev->is_rep)
224 			break;
225 
226 		ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
227 		/* Exit if already registered */
228 		if (ib_ndev)
229 			goto put_ndev;
230 
231 		if (ndev->dev.parent == mdev->device)
232 			ib_device_set_netdev(&ibdev->ib_dev, ndev, port_num);
233 		break;
234 
235 	case NETDEV_UNREGISTER:
236 		/* In case of reps, ib device goes away before the netdevs */
237 		if (ibdev->is_rep)
238 			break;
239 		ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
240 		if (ib_ndev == ndev)
241 			ib_device_set_netdev(&ibdev->ib_dev, NULL, port_num);
242 		goto put_ndev;
243 
244 	case NETDEV_CHANGE:
245 	case NETDEV_UP:
246 	case NETDEV_DOWN: {
247 		struct net_device *upper = NULL;
248 
249 		if (!netif_is_lag_master(ndev) && !netif_is_lag_port(ndev) &&
250 		    !mlx5_core_mp_enabled(mdev))
251 			return NOTIFY_DONE;
252 
253 		if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
254 			struct net_device *lag_ndev;
255 
256 			if(mlx5_lag_is_roce(mdev))
257 				lag_ndev = ib_device_get_netdev(&ibdev->ib_dev, 1);
258 			else /* sriov lag */
259 				lag_ndev = mlx5_ib_get_rep_uplink_netdev(ibdev);
260 
261 			if (lag_ndev) {
262 				upper = netdev_master_upper_dev_get(lag_ndev);
263 				dev_put(lag_ndev);
264 			} else {
265 				goto done;
266 			}
267 		}
268 
269 		if (ibdev->is_rep)
270 			roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
271 		if (!roce)
272 			return NOTIFY_DONE;
273 
274 		ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
275 
276 		if (mlx5_netdev_send_event(ibdev, ndev, upper, ib_ndev)) {
277 			struct ib_event ibev = { };
278 			enum ib_port_state port_state;
279 
280 			if (get_port_state(&ibdev->ib_dev, port_num,
281 					   &port_state))
282 				goto put_ndev;
283 
284 			if (roce->last_port_state == port_state)
285 				goto put_ndev;
286 
287 			roce->last_port_state = port_state;
288 			ibev.device = &ibdev->ib_dev;
289 			if (port_state == IB_PORT_DOWN)
290 				ibev.event = IB_EVENT_PORT_ERR;
291 			else if (port_state == IB_PORT_ACTIVE)
292 				ibev.event = IB_EVENT_PORT_ACTIVE;
293 			else
294 				goto put_ndev;
295 
296 			ibev.element.port_num = port_num;
297 			ib_dispatch_event(&ibev);
298 		}
299 		break;
300 	}
301 
302 	default:
303 		break;
304 	}
305 put_ndev:
306 	dev_put(ib_ndev);
307 done:
308 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
309 	return NOTIFY_DONE;
310 }
311 
312 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
313 						   u32 ib_port_num,
314 						   u32 *native_port_num)
315 {
316 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
317 							  ib_port_num);
318 	struct mlx5_core_dev *mdev = NULL;
319 	struct mlx5_ib_multiport_info *mpi;
320 	struct mlx5_ib_port *port;
321 
322 	if (ibdev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
323 		if (native_port_num)
324 			*native_port_num = smi_to_native_portnum(ibdev,
325 								 ib_port_num);
326 		return ibdev->mdev;
327 
328 	}
329 
330 	if (!mlx5_core_mp_enabled(ibdev->mdev) ||
331 	    ll != IB_LINK_LAYER_ETHERNET) {
332 		if (native_port_num)
333 			*native_port_num = ib_port_num;
334 		return ibdev->mdev;
335 	}
336 
337 	if (native_port_num)
338 		*native_port_num = 1;
339 
340 	port = &ibdev->port[ib_port_num - 1];
341 	spin_lock(&port->mp.mpi_lock);
342 	mpi = ibdev->port[ib_port_num - 1].mp.mpi;
343 	if (mpi && !mpi->unaffiliate) {
344 		mdev = mpi->mdev;
345 		/* If it's the master no need to refcount, it'll exist
346 		 * as long as the ib_dev exists.
347 		 */
348 		if (!mpi->is_master)
349 			mpi->mdev_refcnt++;
350 	}
351 	spin_unlock(&port->mp.mpi_lock);
352 
353 	return mdev;
354 }
355 
356 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
357 {
358 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
359 							  port_num);
360 	struct mlx5_ib_multiport_info *mpi;
361 	struct mlx5_ib_port *port;
362 
363 	if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
364 		return;
365 
366 	port = &ibdev->port[port_num - 1];
367 
368 	spin_lock(&port->mp.mpi_lock);
369 	mpi = ibdev->port[port_num - 1].mp.mpi;
370 	if (mpi->is_master)
371 		goto out;
372 
373 	mpi->mdev_refcnt--;
374 	if (mpi->unaffiliate)
375 		complete(&mpi->unref_comp);
376 out:
377 	spin_unlock(&port->mp.mpi_lock);
378 }
379 
380 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper,
381 					   u16 *active_speed, u8 *active_width)
382 {
383 	switch (eth_proto_oper) {
384 	case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
385 	case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
386 	case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
387 	case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
388 		*active_width = IB_WIDTH_1X;
389 		*active_speed = IB_SPEED_SDR;
390 		break;
391 	case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
392 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
393 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
394 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
395 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
396 	case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
397 	case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
398 		*active_width = IB_WIDTH_1X;
399 		*active_speed = IB_SPEED_QDR;
400 		break;
401 	case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
402 	case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
403 	case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
404 		*active_width = IB_WIDTH_1X;
405 		*active_speed = IB_SPEED_EDR;
406 		break;
407 	case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
408 	case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
409 	case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
410 	case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
411 		*active_width = IB_WIDTH_4X;
412 		*active_speed = IB_SPEED_QDR;
413 		break;
414 	case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
415 	case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
416 	case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
417 		*active_width = IB_WIDTH_1X;
418 		*active_speed = IB_SPEED_HDR;
419 		break;
420 	case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
421 		*active_width = IB_WIDTH_4X;
422 		*active_speed = IB_SPEED_FDR;
423 		break;
424 	case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
425 	case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
426 	case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
427 	case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
428 		*active_width = IB_WIDTH_4X;
429 		*active_speed = IB_SPEED_EDR;
430 		break;
431 	default:
432 		return -EINVAL;
433 	}
434 
435 	return 0;
436 }
437 
438 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
439 					u8 *active_width)
440 {
441 	switch (eth_proto_oper) {
442 	case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
443 	case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
444 		*active_width = IB_WIDTH_1X;
445 		*active_speed = IB_SPEED_SDR;
446 		break;
447 	case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
448 		*active_width = IB_WIDTH_1X;
449 		*active_speed = IB_SPEED_DDR;
450 		break;
451 	case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
452 		*active_width = IB_WIDTH_1X;
453 		*active_speed = IB_SPEED_QDR;
454 		break;
455 	case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
456 		*active_width = IB_WIDTH_4X;
457 		*active_speed = IB_SPEED_QDR;
458 		break;
459 	case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
460 		*active_width = IB_WIDTH_1X;
461 		*active_speed = IB_SPEED_EDR;
462 		break;
463 	case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
464 		*active_width = IB_WIDTH_2X;
465 		*active_speed = IB_SPEED_EDR;
466 		break;
467 	case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
468 		*active_width = IB_WIDTH_1X;
469 		*active_speed = IB_SPEED_HDR;
470 		break;
471 	case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
472 		*active_width = IB_WIDTH_4X;
473 		*active_speed = IB_SPEED_EDR;
474 		break;
475 	case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
476 		*active_width = IB_WIDTH_2X;
477 		*active_speed = IB_SPEED_HDR;
478 		break;
479 	case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR):
480 		*active_width = IB_WIDTH_1X;
481 		*active_speed = IB_SPEED_NDR;
482 		break;
483 	case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
484 		*active_width = IB_WIDTH_4X;
485 		*active_speed = IB_SPEED_HDR;
486 		break;
487 	case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2):
488 		*active_width = IB_WIDTH_2X;
489 		*active_speed = IB_SPEED_NDR;
490 		break;
491 	case MLX5E_PROT_MASK(MLX5E_200GAUI_1_200GBASE_CR1_KR1):
492 		*active_width = IB_WIDTH_1X;
493 		*active_speed = IB_SPEED_XDR;
494 		break;
495 	case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
496 		*active_width = IB_WIDTH_8X;
497 		*active_speed = IB_SPEED_HDR;
498 		break;
499 	case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
500 		*active_width = IB_WIDTH_4X;
501 		*active_speed = IB_SPEED_NDR;
502 		break;
503 	case MLX5E_PROT_MASK(MLX5E_400GAUI_2_400GBASE_CR2_KR2):
504 		*active_width = IB_WIDTH_2X;
505 		*active_speed = IB_SPEED_XDR;
506 		break;
507 	case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
508 		*active_width = IB_WIDTH_8X;
509 		*active_speed = IB_SPEED_NDR;
510 		break;
511 	case MLX5E_PROT_MASK(MLX5E_800GAUI_4_800GBASE_CR4_KR4):
512 		*active_width = IB_WIDTH_4X;
513 		*active_speed = IB_SPEED_XDR;
514 		break;
515 	case MLX5E_PROT_MASK(MLX5E_1600GAUI_8_1600GBASE_CR8_KR8):
516 		*active_width = IB_WIDTH_8X;
517 		*active_speed = IB_SPEED_XDR;
518 		break;
519 	default:
520 		return -EINVAL;
521 	}
522 
523 	return 0;
524 }
525 
526 static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
527 				    u8 *active_width, bool ext)
528 {
529 	return ext ?
530 		translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
531 					     active_width) :
532 		translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
533 						active_width);
534 }
535 
536 static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
537 				struct ib_port_attr *props)
538 {
539 	struct mlx5_ib_dev *dev = to_mdev(device);
540 	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
541 	struct mlx5_core_dev *mdev;
542 	struct net_device *ndev, *upper;
543 	enum ib_mtu ndev_ib_mtu;
544 	bool put_mdev = true;
545 	u32 eth_prot_oper;
546 	u32 mdev_port_num;
547 	bool ext;
548 	int err;
549 
550 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
551 	if (!mdev) {
552 		/* This means the port isn't affiliated yet. Get the
553 		 * info for the master port instead.
554 		 */
555 		put_mdev = false;
556 		mdev = dev->mdev;
557 		mdev_port_num = 1;
558 		port_num = 1;
559 	}
560 
561 	/* Possible bad flows are checked before filling out props so in case
562 	 * of an error it will still be zeroed out.
563 	 * Use native port in case of reps
564 	 */
565 	if (dev->is_rep) {
566 		struct mlx5_eswitch_rep *rep;
567 
568 		rep = dev->port[port_num - 1].rep;
569 		if (rep) {
570 			mdev = mlx5_eswitch_get_core_dev(rep->esw);
571 			WARN_ON(!mdev);
572 		}
573 		mdev_port_num = 1;
574 	}
575 
576 	err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
577 				   mdev_port_num, 0);
578 
579 	if (err)
580 		goto out;
581 	ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
582 	eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
583 
584 	props->active_width     = IB_WIDTH_4X;
585 	props->active_speed     = IB_SPEED_QDR;
586 
587 	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
588 				 &props->active_width, ext);
589 
590 	if (!dev->is_rep && dev->mdev->roce.roce_en) {
591 		u16 qkey_viol_cntr;
592 
593 		props->port_cap_flags |= IB_PORT_CM_SUP;
594 		props->ip_gids = true;
595 		props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
596 						   roce_address_table_size);
597 		mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
598 		props->qkey_viol_cntr = qkey_viol_cntr;
599 	}
600 	props->max_mtu          = IB_MTU_4096;
601 	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
602 	props->pkey_tbl_len     = 1;
603 	props->state            = IB_PORT_DOWN;
604 	props->phys_state       = IB_PORT_PHYS_STATE_DISABLED;
605 
606 	/* If this is a stub query for an unaffiliated port stop here */
607 	if (!put_mdev)
608 		goto out;
609 
610 	ndev = ib_device_get_netdev(device, port_num);
611 	if (!ndev)
612 		goto out;
613 
614 	if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
615 		rcu_read_lock();
616 		upper = netdev_master_upper_dev_get_rcu(ndev);
617 		if (upper) {
618 			dev_put(ndev);
619 			ndev = upper;
620 			dev_hold(ndev);
621 		}
622 		rcu_read_unlock();
623 	}
624 
625 	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
626 		props->state      = IB_PORT_ACTIVE;
627 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
628 	}
629 
630 	ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
631 
632 	dev_put(ndev);
633 
634 	props->active_mtu	= min(props->max_mtu, ndev_ib_mtu);
635 out:
636 	if (put_mdev)
637 		mlx5_ib_put_native_port_mdev(dev, port_num);
638 	return err;
639 }
640 
641 int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
642 		  unsigned int index, const union ib_gid *gid,
643 		  const struct ib_gid_attr *attr)
644 {
645 	enum ib_gid_type gid_type;
646 	u16 vlan_id = 0xffff;
647 	u8 roce_version = 0;
648 	u8 roce_l3_type = 0;
649 	u8 mac[ETH_ALEN];
650 	int ret;
651 
652 	gid_type = attr->gid_type;
653 	if (gid) {
654 		ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
655 		if (ret)
656 			return ret;
657 	}
658 
659 	switch (gid_type) {
660 	case IB_GID_TYPE_ROCE:
661 		roce_version = MLX5_ROCE_VERSION_1;
662 		break;
663 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
664 		roce_version = MLX5_ROCE_VERSION_2;
665 		if (gid && ipv6_addr_v4mapped((void *)gid))
666 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
667 		else
668 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
669 		break;
670 
671 	default:
672 		mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
673 	}
674 
675 	return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
676 				      roce_l3_type, gid->raw, mac,
677 				      vlan_id < VLAN_CFI_MASK, vlan_id,
678 				      port_num);
679 }
680 
681 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
682 			   __always_unused void **context)
683 {
684 	int ret;
685 
686 	ret = mlx5r_add_gid_macsec_operations(attr);
687 	if (ret)
688 		return ret;
689 
690 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
691 			     attr->index, &attr->gid, attr);
692 }
693 
694 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
695 			   __always_unused void **context)
696 {
697 	int ret;
698 
699 	ret = set_roce_addr(to_mdev(attr->device), attr->port_num,
700 			    attr->index, NULL, attr);
701 	if (ret)
702 		return ret;
703 
704 	mlx5r_del_gid_macsec_operations(attr);
705 	return 0;
706 }
707 
708 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
709 				   const struct ib_gid_attr *attr)
710 {
711 	if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
712 		return 0;
713 
714 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
715 }
716 
717 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
718 {
719 	if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
720 		return !MLX5_CAP_GEN(dev->mdev, ib_virt);
721 	return 0;
722 }
723 
724 enum {
725 	MLX5_VPORT_ACCESS_METHOD_MAD,
726 	MLX5_VPORT_ACCESS_METHOD_HCA,
727 	MLX5_VPORT_ACCESS_METHOD_NIC,
728 };
729 
730 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
731 {
732 	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
733 		return MLX5_VPORT_ACCESS_METHOD_MAD;
734 
735 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
736 	    IB_LINK_LAYER_ETHERNET)
737 		return MLX5_VPORT_ACCESS_METHOD_NIC;
738 
739 	return MLX5_VPORT_ACCESS_METHOD_HCA;
740 }
741 
742 static void get_atomic_caps(struct mlx5_ib_dev *dev,
743 			    u8 atomic_size_qp,
744 			    struct ib_device_attr *props)
745 {
746 	u8 tmp;
747 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
748 	u8 atomic_req_8B_endianness_mode =
749 		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
750 
751 	/* Check if HW supports 8 bytes standard atomic operations and capable
752 	 * of host endianness respond
753 	 */
754 	tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
755 	if (((atomic_operations & tmp) == tmp) &&
756 	    (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
757 	    (atomic_req_8B_endianness_mode)) {
758 		props->atomic_cap = IB_ATOMIC_HCA;
759 	} else {
760 		props->atomic_cap = IB_ATOMIC_NONE;
761 	}
762 }
763 
764 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
765 			       struct ib_device_attr *props)
766 {
767 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
768 
769 	get_atomic_caps(dev, atomic_size_qp, props);
770 }
771 
772 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
773 					__be64 *sys_image_guid)
774 {
775 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
776 	struct mlx5_core_dev *mdev = dev->mdev;
777 	u64 tmp;
778 	int err;
779 
780 	switch (mlx5_get_vport_access_method(ibdev)) {
781 	case MLX5_VPORT_ACCESS_METHOD_MAD:
782 		return mlx5_query_mad_ifc_system_image_guid(ibdev,
783 							    sys_image_guid);
784 
785 	case MLX5_VPORT_ACCESS_METHOD_HCA:
786 		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
787 		break;
788 
789 	case MLX5_VPORT_ACCESS_METHOD_NIC:
790 		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
791 		break;
792 
793 	default:
794 		return -EINVAL;
795 	}
796 
797 	if (!err)
798 		*sys_image_guid = cpu_to_be64(tmp);
799 
800 	return err;
801 
802 }
803 
804 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
805 				u16 *max_pkeys)
806 {
807 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
808 	struct mlx5_core_dev *mdev = dev->mdev;
809 
810 	switch (mlx5_get_vport_access_method(ibdev)) {
811 	case MLX5_VPORT_ACCESS_METHOD_MAD:
812 		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
813 
814 	case MLX5_VPORT_ACCESS_METHOD_HCA:
815 	case MLX5_VPORT_ACCESS_METHOD_NIC:
816 		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
817 						pkey_table_size));
818 		return 0;
819 
820 	default:
821 		return -EINVAL;
822 	}
823 }
824 
825 static int mlx5_query_vendor_id(struct ib_device *ibdev,
826 				u32 *vendor_id)
827 {
828 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
829 
830 	switch (mlx5_get_vport_access_method(ibdev)) {
831 	case MLX5_VPORT_ACCESS_METHOD_MAD:
832 		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
833 
834 	case MLX5_VPORT_ACCESS_METHOD_HCA:
835 	case MLX5_VPORT_ACCESS_METHOD_NIC:
836 		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
837 
838 	default:
839 		return -EINVAL;
840 	}
841 }
842 
843 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
844 				__be64 *node_guid)
845 {
846 	u64 tmp;
847 	int err;
848 
849 	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
850 	case MLX5_VPORT_ACCESS_METHOD_MAD:
851 		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
852 
853 	case MLX5_VPORT_ACCESS_METHOD_HCA:
854 		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
855 		break;
856 
857 	case MLX5_VPORT_ACCESS_METHOD_NIC:
858 		err = mlx5_query_nic_vport_node_guid(dev->mdev, 0, false, &tmp);
859 		break;
860 
861 	default:
862 		return -EINVAL;
863 	}
864 
865 	if (!err)
866 		*node_guid = cpu_to_be64(tmp);
867 
868 	return err;
869 }
870 
871 struct mlx5_reg_node_desc {
872 	u8	desc[IB_DEVICE_NODE_DESC_MAX];
873 };
874 
875 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
876 {
877 	struct mlx5_reg_node_desc in;
878 
879 	if (mlx5_use_mad_ifc(dev))
880 		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
881 
882 	memset(&in, 0, sizeof(in));
883 
884 	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
885 				    sizeof(struct mlx5_reg_node_desc),
886 				    MLX5_REG_NODE_DESC, 0, 0);
887 }
888 
889 static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
890 				struct mlx5_ib_query_device_resp *resp)
891 {
892 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
893 	u16 vport = mlx5_eswitch_manager_vport(mdev);
894 
895 	resp->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(esw,
896 								      vport);
897 	resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
898 }
899 
900 /*
901  * Calculate maximum SQ overhead across all QP types.
902  * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT)
903  * have smaller overhead than the types calculated below,
904  * so they are implicitly included.
905  */
906 static u32 mlx5_ib_calc_max_sq_overhead(void)
907 {
908 	u32 max_overhead_xrc, overhead_ud_lso, a, b;
909 
910 	/* XRC_INI */
911 	max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg);
912 	max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg);
913 	a = sizeof(struct mlx5_wqe_atomic_seg) +
914 	    sizeof(struct mlx5_wqe_raddr_seg);
915 	b = sizeof(struct mlx5_wqe_umr_ctrl_seg) +
916 	    sizeof(struct mlx5_mkey_seg) +
917 	    MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD;
918 	max_overhead_xrc += max(a, b);
919 
920 	/* UD with LSO */
921 	overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg);
922 	overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad);
923 	overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg);
924 	overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg);
925 
926 	return max(max_overhead_xrc, overhead_ud_lso);
927 }
928 
929 static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev)
930 {
931 	struct mlx5_core_dev *mdev = dev->mdev;
932 	u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
933 	u32 max_wqe_size;
934 	/* max QP overhead + 1 SGE, no inline, no special features */
935 	max_wqe_size = mlx5_ib_calc_max_sq_overhead() +
936 		       sizeof(struct mlx5_wqe_data_seg);
937 
938 	max_wqe_size = roundup_pow_of_two(max_wqe_size);
939 
940 	max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB);
941 
942 	return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size;
943 }
944 
945 static int mlx5_ib_query_device(struct ib_device *ibdev,
946 				struct ib_device_attr *props,
947 				struct ib_udata *uhw)
948 {
949 	size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
950 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
951 	struct mlx5_core_dev *mdev = dev->mdev;
952 	int err = -ENOMEM;
953 	int max_sq_desc;
954 	int max_rq_sg;
955 	int max_sq_sg;
956 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
957 	bool raw_support = !mlx5_core_mp_enabled(mdev);
958 	struct mlx5_ib_query_device_resp resp = {};
959 	size_t resp_len;
960 	u64 max_tso;
961 
962 	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
963 	if (uhw_outlen && uhw_outlen < resp_len)
964 		return -EINVAL;
965 
966 	resp.response_length = resp_len;
967 
968 	if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
969 		return -EINVAL;
970 
971 	memset(props, 0, sizeof(*props));
972 	err = mlx5_query_system_image_guid(ibdev,
973 					   &props->sys_image_guid);
974 	if (err)
975 		return err;
976 
977 	props->max_pkeys = dev->pkey_table_len;
978 
979 	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
980 	if (err)
981 		return err;
982 
983 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
984 		(fw_rev_min(dev->mdev) << 16) |
985 		fw_rev_sub(dev->mdev);
986 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
987 		IB_DEVICE_PORT_ACTIVE_EVENT		|
988 		IB_DEVICE_SYS_IMAGE_GUID		|
989 		IB_DEVICE_RC_RNR_NAK_GEN;
990 
991 	if (MLX5_CAP_GEN(mdev, pkv))
992 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
993 	if (MLX5_CAP_GEN(mdev, qkv))
994 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
995 	if (MLX5_CAP_GEN(mdev, apm))
996 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
997 	if (MLX5_CAP_GEN(mdev, xrc))
998 		props->device_cap_flags |= IB_DEVICE_XRC;
999 	if (MLX5_CAP_GEN(mdev, imaicl)) {
1000 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
1001 					   IB_DEVICE_MEM_WINDOW_TYPE_2B;
1002 		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
1003 		/* We support 'Gappy' memory registration too */
1004 		props->kernel_cap_flags |= IBK_SG_GAPS_REG;
1005 	}
1006 	/* IB_WR_REG_MR always requires changing the entity size with UMR */
1007 	if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1008 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
1009 	if (MLX5_CAP_GEN(mdev, sho)) {
1010 		props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER;
1011 		/* At this stage no support for signature handover */
1012 		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
1013 				      IB_PROT_T10DIF_TYPE_2 |
1014 				      IB_PROT_T10DIF_TYPE_3;
1015 		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
1016 				       IB_GUARD_T10DIF_CSUM;
1017 	}
1018 	if (MLX5_CAP_GEN(mdev, block_lb_mc))
1019 		props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK;
1020 
1021 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
1022 		if (MLX5_CAP_ETH(mdev, csum_cap)) {
1023 			/* Legacy bit to support old userspace libraries */
1024 			props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
1025 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
1026 		}
1027 
1028 		if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
1029 			props->raw_packet_caps |=
1030 				IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
1031 
1032 		if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
1033 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
1034 			if (max_tso) {
1035 				resp.tso_caps.max_tso = 1 << max_tso;
1036 				resp.tso_caps.supported_qpts |=
1037 					1 << IB_QPT_RAW_PACKET;
1038 				resp.response_length += sizeof(resp.tso_caps);
1039 			}
1040 		}
1041 
1042 		if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
1043 			resp.rss_caps.rx_hash_function =
1044 						MLX5_RX_HASH_FUNC_TOEPLITZ;
1045 			resp.rss_caps.rx_hash_fields_mask =
1046 						MLX5_RX_HASH_SRC_IPV4 |
1047 						MLX5_RX_HASH_DST_IPV4 |
1048 						MLX5_RX_HASH_SRC_IPV6 |
1049 						MLX5_RX_HASH_DST_IPV6 |
1050 						MLX5_RX_HASH_SRC_PORT_TCP |
1051 						MLX5_RX_HASH_DST_PORT_TCP |
1052 						MLX5_RX_HASH_SRC_PORT_UDP |
1053 						MLX5_RX_HASH_DST_PORT_UDP |
1054 						MLX5_RX_HASH_INNER;
1055 			resp.response_length += sizeof(resp.rss_caps);
1056 		}
1057 	} else {
1058 		if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
1059 			resp.response_length += sizeof(resp.tso_caps);
1060 		if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
1061 			resp.response_length += sizeof(resp.rss_caps);
1062 	}
1063 
1064 	if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
1065 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
1066 		props->kernel_cap_flags |= IBK_UD_TSO;
1067 	}
1068 
1069 	if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
1070 	    MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
1071 	    raw_support)
1072 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
1073 
1074 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
1075 	    MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
1076 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
1077 
1078 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
1079 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
1080 	    raw_support) {
1081 		/* Legacy bit to support old userspace libraries */
1082 		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
1083 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
1084 	}
1085 
1086 	if (MLX5_CAP_DEV_MEM(mdev, memic)) {
1087 		props->max_dm_size =
1088 			MLX5_CAP_DEV_MEM(mdev, max_memic_size);
1089 	}
1090 
1091 	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
1092 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
1093 
1094 	if (MLX5_CAP_GEN(mdev, end_pad))
1095 		props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
1096 
1097 	props->vendor_part_id	   = mdev->pdev->device;
1098 	props->hw_ver		   = mdev->pdev->revision;
1099 
1100 	props->max_mr_size	   = ~0ull;
1101 	props->page_size_cap	   = ~(min_page_size - 1);
1102 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
1103 	props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev);
1104 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
1105 		     sizeof(struct mlx5_wqe_data_seg);
1106 	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
1107 	max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
1108 		     sizeof(struct mlx5_wqe_raddr_seg)) /
1109 		sizeof(struct mlx5_wqe_data_seg);
1110 	props->max_send_sge = max_sq_sg;
1111 	props->max_recv_sge = max_rq_sg;
1112 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
1113 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
1114 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
1115 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
1116 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
1117 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
1118 	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
1119 	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
1120 	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
1121 	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
1122 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
1123 	props->max_srq_sge	   = max_rq_sg - 1;
1124 	props->max_fast_reg_page_list_len =
1125 		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
1126 	props->max_pi_fast_reg_page_list_len =
1127 		props->max_fast_reg_page_list_len / 2;
1128 	props->max_sgl_rd =
1129 		MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
1130 	get_atomic_caps_qp(dev, props);
1131 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
1132 	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1133 	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1134 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1135 					   props->max_mcast_grp;
1136 	props->max_ah = INT_MAX;
1137 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1138 	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1139 
1140 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1141 		if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1142 			props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
1143 		props->odp_caps = dev->odp_caps;
1144 		if (!uhw) {
1145 			/* ODP for kernel QPs is not implemented for receive
1146 			 * WQEs and SRQ WQEs
1147 			 */
1148 			props->odp_caps.per_transport_caps.rc_odp_caps &=
1149 				~(IB_ODP_SUPPORT_READ |
1150 				  IB_ODP_SUPPORT_SRQ_RECV);
1151 			props->odp_caps.per_transport_caps.uc_odp_caps &=
1152 				~(IB_ODP_SUPPORT_READ |
1153 				  IB_ODP_SUPPORT_SRQ_RECV);
1154 			props->odp_caps.per_transport_caps.ud_odp_caps &=
1155 				~(IB_ODP_SUPPORT_READ |
1156 				  IB_ODP_SUPPORT_SRQ_RECV);
1157 			props->odp_caps.per_transport_caps.xrc_odp_caps &=
1158 				~(IB_ODP_SUPPORT_READ |
1159 				  IB_ODP_SUPPORT_SRQ_RECV);
1160 		}
1161 	}
1162 
1163 	if (mlx5_core_is_vf(mdev))
1164 		props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION;
1165 
1166 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
1167 	    IB_LINK_LAYER_ETHERNET && raw_support) {
1168 		props->rss_caps.max_rwq_indirection_tables =
1169 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1170 		props->rss_caps.max_rwq_indirection_table_size =
1171 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1172 		props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1173 		props->max_wq_type_rq =
1174 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1175 	}
1176 
1177 	if (MLX5_CAP_GEN(mdev, tag_matching)) {
1178 		props->tm_caps.max_num_tags =
1179 			(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1180 		props->tm_caps.max_ops =
1181 			1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1182 		props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1183 	}
1184 
1185 	if (MLX5_CAP_GEN(mdev, tag_matching) &&
1186 	    MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1187 		props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
1188 		props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1189 	}
1190 
1191 	if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1192 		props->cq_caps.max_cq_moderation_count =
1193 						MLX5_MAX_CQ_COUNT;
1194 		props->cq_caps.max_cq_moderation_period =
1195 						MLX5_MAX_CQ_PERIOD;
1196 	}
1197 
1198 	if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
1199 		resp.response_length += sizeof(resp.cqe_comp_caps);
1200 
1201 		if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1202 			resp.cqe_comp_caps.max_num =
1203 				MLX5_CAP_GEN(dev->mdev,
1204 					     cqe_compression_max_num);
1205 
1206 			resp.cqe_comp_caps.supported_format =
1207 				MLX5_IB_CQE_RES_FORMAT_HASH |
1208 				MLX5_IB_CQE_RES_FORMAT_CSUM;
1209 
1210 			if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1211 				resp.cqe_comp_caps.supported_format |=
1212 					MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1213 		}
1214 	}
1215 
1216 	if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
1217 	    raw_support) {
1218 		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1219 		    MLX5_CAP_GEN(mdev, qos)) {
1220 			resp.packet_pacing_caps.qp_rate_limit_max =
1221 				MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1222 			resp.packet_pacing_caps.qp_rate_limit_min =
1223 				MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1224 			resp.packet_pacing_caps.supported_qpts |=
1225 				1 << IB_QPT_RAW_PACKET;
1226 			if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1227 			    MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1228 				resp.packet_pacing_caps.cap_flags |=
1229 					MLX5_IB_PP_SUPPORT_BURST;
1230 		}
1231 		resp.response_length += sizeof(resp.packet_pacing_caps);
1232 	}
1233 
1234 	if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
1235 	    uhw_outlen) {
1236 		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1237 			resp.mlx5_ib_support_multi_pkt_send_wqes =
1238 				MLX5_IB_ALLOW_MPW;
1239 
1240 		if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1241 			resp.mlx5_ib_support_multi_pkt_send_wqes |=
1242 				MLX5_IB_SUPPORT_EMPW;
1243 
1244 		resp.response_length +=
1245 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1246 	}
1247 
1248 	if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
1249 		resp.response_length += sizeof(resp.flags);
1250 
1251 		if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1252 			resp.flags |=
1253 				MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1254 
1255 		if (MLX5_CAP_GEN(mdev, cqe_128_always))
1256 			resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1257 		if (MLX5_CAP_GEN(mdev, qp_packet_based))
1258 			resp.flags |=
1259 				MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1260 
1261 		resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1262 
1263 		if (MLX5_CAP_GEN_2(mdev, dp_ordering_force) &&
1264 		    (MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_xrc) ||
1265 		    MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_dc) ||
1266 		    MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_rc) ||
1267 		    MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_ud) ||
1268 		    MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_uc)))
1269 			resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_OOO_DP;
1270 	}
1271 
1272 	if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
1273 		resp.response_length += sizeof(resp.sw_parsing_caps);
1274 		if (MLX5_CAP_ETH(mdev, swp)) {
1275 			resp.sw_parsing_caps.sw_parsing_offloads |=
1276 				MLX5_IB_SW_PARSING;
1277 
1278 			if (MLX5_CAP_ETH(mdev, swp_csum))
1279 				resp.sw_parsing_caps.sw_parsing_offloads |=
1280 					MLX5_IB_SW_PARSING_CSUM;
1281 
1282 			if (MLX5_CAP_ETH(mdev, swp_lso))
1283 				resp.sw_parsing_caps.sw_parsing_offloads |=
1284 					MLX5_IB_SW_PARSING_LSO;
1285 
1286 			if (resp.sw_parsing_caps.sw_parsing_offloads)
1287 				resp.sw_parsing_caps.supported_qpts =
1288 					BIT(IB_QPT_RAW_PACKET);
1289 		}
1290 	}
1291 
1292 	if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
1293 	    raw_support) {
1294 		resp.response_length += sizeof(resp.striding_rq_caps);
1295 		if (MLX5_CAP_GEN(mdev, striding_rq)) {
1296 			resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1297 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1298 			resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1299 				MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1300 			if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1301 				resp.striding_rq_caps
1302 					.min_single_wqe_log_num_of_strides =
1303 					MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1304 			else
1305 				resp.striding_rq_caps
1306 					.min_single_wqe_log_num_of_strides =
1307 					MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1308 			resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1309 				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1310 			resp.striding_rq_caps.supported_qpts =
1311 				BIT(IB_QPT_RAW_PACKET);
1312 		}
1313 	}
1314 
1315 	if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
1316 		resp.response_length += sizeof(resp.tunnel_offloads_caps);
1317 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1318 			resp.tunnel_offloads_caps |=
1319 				MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1320 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1321 			resp.tunnel_offloads_caps |=
1322 				MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1323 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1324 			resp.tunnel_offloads_caps |=
1325 				MLX5_IB_TUNNELED_OFFLOADS_GRE;
1326 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
1327 			resp.tunnel_offloads_caps |=
1328 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1329 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
1330 			resp.tunnel_offloads_caps |=
1331 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1332 	}
1333 
1334 	if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) {
1335 		resp.response_length += sizeof(resp.dci_streams_caps);
1336 
1337 		resp.dci_streams_caps.max_log_num_concurent =
1338 			MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
1339 
1340 		resp.dci_streams_caps.max_log_num_errored =
1341 			MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
1342 	}
1343 
1344 	if (offsetofend(typeof(resp), reserved) <= uhw_outlen)
1345 		resp.response_length += sizeof(resp.reserved);
1346 
1347 	if (offsetofend(typeof(resp), reg_c0) <= uhw_outlen) {
1348 		struct mlx5_eswitch *esw = mdev->priv.eswitch;
1349 
1350 		resp.response_length += sizeof(resp.reg_c0);
1351 
1352 		if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS &&
1353 		    mlx5_eswitch_vport_match_metadata_enabled(esw))
1354 			fill_esw_mgr_reg_c0(mdev, &resp);
1355 	}
1356 
1357 	if (uhw_outlen) {
1358 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1359 
1360 		if (err)
1361 			return err;
1362 	}
1363 
1364 	return 0;
1365 }
1366 
1367 static void translate_active_width(struct ib_device *ibdev, u16 active_width,
1368 				   u8 *ib_width)
1369 {
1370 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1371 
1372 	if (active_width & MLX5_PTYS_WIDTH_1X)
1373 		*ib_width = IB_WIDTH_1X;
1374 	else if (active_width & MLX5_PTYS_WIDTH_2X)
1375 		*ib_width = IB_WIDTH_2X;
1376 	else if (active_width & MLX5_PTYS_WIDTH_4X)
1377 		*ib_width = IB_WIDTH_4X;
1378 	else if (active_width & MLX5_PTYS_WIDTH_8X)
1379 		*ib_width = IB_WIDTH_8X;
1380 	else if (active_width & MLX5_PTYS_WIDTH_12X)
1381 		*ib_width = IB_WIDTH_12X;
1382 	else {
1383 		mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1384 			    active_width);
1385 		*ib_width = IB_WIDTH_4X;
1386 	}
1387 
1388 	return;
1389 }
1390 
1391 static int mlx5_mtu_to_ib_mtu(int mtu)
1392 {
1393 	switch (mtu) {
1394 	case 256: return 1;
1395 	case 512: return 2;
1396 	case 1024: return 3;
1397 	case 2048: return 4;
1398 	case 4096: return 5;
1399 	default:
1400 		pr_warn("invalid mtu\n");
1401 		return -1;
1402 	}
1403 }
1404 
1405 enum ib_max_vl_num {
1406 	__IB_MAX_VL_0		= 1,
1407 	__IB_MAX_VL_0_1		= 2,
1408 	__IB_MAX_VL_0_3		= 3,
1409 	__IB_MAX_VL_0_7		= 4,
1410 	__IB_MAX_VL_0_14	= 5,
1411 };
1412 
1413 enum mlx5_vl_hw_cap {
1414 	MLX5_VL_HW_0	= 1,
1415 	MLX5_VL_HW_0_1	= 2,
1416 	MLX5_VL_HW_0_2	= 3,
1417 	MLX5_VL_HW_0_3	= 4,
1418 	MLX5_VL_HW_0_4	= 5,
1419 	MLX5_VL_HW_0_5	= 6,
1420 	MLX5_VL_HW_0_6	= 7,
1421 	MLX5_VL_HW_0_7	= 8,
1422 	MLX5_VL_HW_0_14	= 15
1423 };
1424 
1425 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1426 				u8 *max_vl_num)
1427 {
1428 	switch (vl_hw_cap) {
1429 	case MLX5_VL_HW_0:
1430 		*max_vl_num = __IB_MAX_VL_0;
1431 		break;
1432 	case MLX5_VL_HW_0_1:
1433 		*max_vl_num = __IB_MAX_VL_0_1;
1434 		break;
1435 	case MLX5_VL_HW_0_3:
1436 		*max_vl_num = __IB_MAX_VL_0_3;
1437 		break;
1438 	case MLX5_VL_HW_0_7:
1439 		*max_vl_num = __IB_MAX_VL_0_7;
1440 		break;
1441 	case MLX5_VL_HW_0_14:
1442 		*max_vl_num = __IB_MAX_VL_0_14;
1443 		break;
1444 
1445 	default:
1446 		return -EINVAL;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
1453 			       struct ib_port_attr *props)
1454 {
1455 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1456 	struct mlx5_core_dev *mdev = dev->mdev;
1457 	struct mlx5_hca_vport_context *rep;
1458 	u8 vl_hw_cap, plane_index = 0;
1459 	u16 max_mtu;
1460 	u16 oper_mtu;
1461 	int err;
1462 	u16 ib_link_width_oper;
1463 
1464 	rep = kzalloc_obj(*rep);
1465 	if (!rep) {
1466 		err = -ENOMEM;
1467 		goto out;
1468 	}
1469 
1470 	/* props being zeroed by the caller, avoid zeroing it here */
1471 
1472 	if (ibdev->type == RDMA_DEVICE_TYPE_SMI) {
1473 		plane_index = port;
1474 		port = smi_to_native_portnum(dev, port);
1475 	}
1476 
1477 	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1478 	if (err)
1479 		goto out;
1480 
1481 	props->lid		= rep->lid;
1482 	props->lmc		= rep->lmc;
1483 	props->sm_lid		= rep->sm_lid;
1484 	props->sm_sl		= rep->sm_sl;
1485 	props->state		= rep->vport_state;
1486 	props->phys_state	= rep->port_physical_state;
1487 
1488 	props->port_cap_flags = rep->cap_mask1;
1489 	if (dev->num_plane) {
1490 		props->port_cap_flags |= IB_PORT_SM_DISABLED;
1491 		props->port_cap_flags &= ~IB_PORT_SM;
1492 	} else if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
1493 		props->port_cap_flags &= ~IB_PORT_CM_SUP;
1494 
1495 	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1496 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1497 	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1498 	props->bad_pkey_cntr	= rep->pkey_violation_counter;
1499 	props->qkey_viol_cntr	= rep->qkey_violation_counter;
1500 	props->subnet_timeout	= rep->subnet_timeout;
1501 	props->init_type_reply	= rep->init_type_reply;
1502 
1503 	if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1504 		props->port_cap_flags2 = rep->cap_mask2;
1505 
1506 	err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
1507 				      &props->active_speed, port, plane_index);
1508 	if (err)
1509 		goto out;
1510 
1511 	translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1512 
1513 	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1514 
1515 	props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1516 
1517 	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1518 
1519 	props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1520 
1521 	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1522 	if (err)
1523 		goto out;
1524 
1525 	err = translate_max_vl_num(ibdev, vl_hw_cap,
1526 				   &props->max_vl_num);
1527 out:
1528 	kfree(rep);
1529 	return err;
1530 }
1531 
1532 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1533 		       struct ib_port_attr *props)
1534 {
1535 	unsigned int count;
1536 	int ret;
1537 
1538 	switch (mlx5_get_vport_access_method(ibdev)) {
1539 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1540 		ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1541 		break;
1542 
1543 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1544 		ret = mlx5_query_hca_port(ibdev, port, props);
1545 		break;
1546 
1547 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1548 		ret = mlx5_query_port_roce(ibdev, port, props);
1549 		break;
1550 
1551 	default:
1552 		ret = -EINVAL;
1553 	}
1554 
1555 	if (!ret && props) {
1556 		struct mlx5_ib_dev *dev = to_mdev(ibdev);
1557 		struct mlx5_core_dev *mdev;
1558 		bool put_mdev = true;
1559 
1560 		mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1561 		if (!mdev) {
1562 			/* If the port isn't affiliated yet query the master.
1563 			 * The master and slave will have the same values.
1564 			 */
1565 			mdev = dev->mdev;
1566 			port = 1;
1567 			put_mdev = false;
1568 		}
1569 		count = mlx5_core_reserved_gids_count(mdev);
1570 		if (put_mdev)
1571 			mlx5_ib_put_native_port_mdev(dev, port);
1572 		props->gid_tbl_len -= count;
1573 	}
1574 	return ret;
1575 }
1576 
1577 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
1578 				  struct ib_port_attr *props)
1579 {
1580 	return mlx5_query_port_roce(ibdev, port, props);
1581 }
1582 
1583 static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1584 				  u16 *pkey)
1585 {
1586 	/* Default special Pkey for representor device port as per the
1587 	 * IB specification 1.3 section 10.9.1.2.
1588 	 */
1589 	*pkey = 0xffff;
1590 	return 0;
1591 }
1592 
1593 static int mlx5_ib_query_port_speed_from_port(struct mlx5_ib_dev *dev,
1594 					      u32 port_num, u64 *speed)
1595 {
1596 	struct ib_port_speed_info speed_info;
1597 	struct ib_port_attr attr = {};
1598 	int err;
1599 
1600 	err = mlx5_ib_query_port(&dev->ib_dev, port_num, &attr);
1601 	if (err)
1602 		return err;
1603 
1604 	if (attr.state == IB_PORT_DOWN) {
1605 		*speed = 0;
1606 		return 0;
1607 	}
1608 
1609 	err = ib_port_attr_to_speed_info(&attr, &speed_info);
1610 	if (err)
1611 		return err;
1612 
1613 	*speed = speed_info.rate;
1614 	return 0;
1615 }
1616 
1617 static int mlx5_ib_query_port_speed_from_vport(struct mlx5_core_dev *mdev,
1618 					       u8 op_mod, u16 vport,
1619 					       u8 other_vport, u64 *speed,
1620 					       struct mlx5_ib_dev *dev,
1621 					       u32 port_num)
1622 {
1623 	u32 max_tx_speed;
1624 	int err;
1625 
1626 	err = mlx5_query_vport_max_tx_speed(mdev, op_mod, vport, other_vport,
1627 					    &max_tx_speed);
1628 	if (err)
1629 		return err;
1630 
1631 	if (max_tx_speed == 0)
1632 		/* Value 0 indicates field not supported, fallback */
1633 		return mlx5_ib_query_port_speed_from_port(dev, port_num,
1634 							  speed);
1635 
1636 	*speed = max_tx_speed;
1637 	return 0;
1638 }
1639 
1640 static int mlx5_ib_query_port_speed_from_bond(struct mlx5_ib_dev *dev,
1641 					      u32 port_num, u64 *speed)
1642 {
1643 	struct mlx5_core_dev *mdev = dev->mdev;
1644 	u32 bond_speed;
1645 	int err;
1646 
1647 	err = mlx5_lag_query_bond_speed(mdev, &bond_speed);
1648 	if (err)
1649 		return err;
1650 
1651 	*speed = bond_speed / MLX5_MAX_TX_SPEED_UNIT;
1652 
1653 	return 0;
1654 }
1655 
1656 static int mlx5_ib_query_port_speed_non_rep(struct mlx5_ib_dev *dev,
1657 					    u32 port_num, u64 *speed)
1658 {
1659 	u16 op_mod = MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT;
1660 
1661 	if (mlx5_lag_is_roce(dev->mdev))
1662 		return mlx5_ib_query_port_speed_from_bond(dev, port_num,
1663 							  speed);
1664 
1665 	return mlx5_ib_query_port_speed_from_vport(dev->mdev, op_mod, 0, false,
1666 						   speed, dev, port_num);
1667 }
1668 
1669 static int mlx5_ib_query_port_speed_rep(struct mlx5_ib_dev *dev, u32 port_num,
1670 					u64 *speed)
1671 {
1672 	struct mlx5_eswitch_rep *rep;
1673 	struct mlx5_core_dev *mdev;
1674 	u16 op_mod;
1675 
1676 	if (!dev->port[port_num - 1].rep) {
1677 		mlx5_ib_warn(dev, "Representor doesn't exist for port %u\n",
1678 			     port_num);
1679 		return -EINVAL;
1680 	}
1681 
1682 	rep = dev->port[port_num - 1].rep;
1683 	mdev = mlx5_eswitch_get_core_dev(rep->esw);
1684 	if (!mdev)
1685 		return -ENODEV;
1686 
1687 	if (rep->vport == MLX5_VPORT_UPLINK) {
1688 		if (mlx5_lag_is_sriov(mdev))
1689 			return mlx5_ib_query_port_speed_from_bond(dev,
1690 								  port_num,
1691 								  speed);
1692 
1693 		return mlx5_ib_query_port_speed_from_port(dev, port_num,
1694 							  speed);
1695 	}
1696 
1697 	op_mod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1698 	return mlx5_ib_query_port_speed_from_vport(dev->mdev, op_mod,
1699 						   rep->vport, true, speed, dev,
1700 						   port_num);
1701 }
1702 
1703 int mlx5_ib_query_port_speed(struct ib_device *ibdev, u32 port_num, u64 *speed)
1704 {
1705 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1706 
1707 	if (mlx5_ib_port_link_layer(ibdev, port_num) ==
1708 	    IB_LINK_LAYER_INFINIBAND || mlx5_core_mp_enabled(dev->mdev))
1709 		return mlx5_ib_query_port_speed_from_port(dev, port_num, speed);
1710 	else if (!dev->is_rep)
1711 		return mlx5_ib_query_port_speed_non_rep(dev, port_num, speed);
1712 	else
1713 		return mlx5_ib_query_port_speed_rep(dev, port_num, speed);
1714 }
1715 
1716 static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
1717 			     union ib_gid *gid)
1718 {
1719 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1720 	struct mlx5_core_dev *mdev = dev->mdev;
1721 
1722 	switch (mlx5_get_vport_access_method(ibdev)) {
1723 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1724 		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1725 
1726 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1727 		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1728 
1729 	default:
1730 		return -EINVAL;
1731 	}
1732 
1733 }
1734 
1735 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
1736 				   u16 index, u16 *pkey)
1737 {
1738 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1739 	struct mlx5_core_dev *mdev;
1740 	bool put_mdev = true;
1741 	u32 mdev_port_num;
1742 	int err;
1743 
1744 	mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1745 	if (!mdev) {
1746 		/* The port isn't affiliated yet, get the PKey from the master
1747 		 * port. For RoCE the PKey tables will be the same.
1748 		 */
1749 		put_mdev = false;
1750 		mdev = dev->mdev;
1751 		mdev_port_num = 1;
1752 	}
1753 
1754 	err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1755 					index, pkey);
1756 	if (put_mdev)
1757 		mlx5_ib_put_native_port_mdev(dev, port);
1758 
1759 	return err;
1760 }
1761 
1762 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1763 			      u16 *pkey)
1764 {
1765 	switch (mlx5_get_vport_access_method(ibdev)) {
1766 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1767 		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1768 
1769 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1770 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1771 		return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1772 	default:
1773 		return -EINVAL;
1774 	}
1775 }
1776 
1777 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1778 				 struct ib_device_modify *props)
1779 {
1780 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1781 	struct mlx5_reg_node_desc in;
1782 	struct mlx5_reg_node_desc out;
1783 	int err;
1784 
1785 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1786 		return -EOPNOTSUPP;
1787 
1788 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1789 		return 0;
1790 
1791 	/*
1792 	 * If possible, pass node desc to FW, so it can generate
1793 	 * a 144 trap.  If cmd fails, just ignore.
1794 	 */
1795 	memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1796 	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1797 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1798 	if (err)
1799 		return err;
1800 
1801 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1802 
1803 	return err;
1804 }
1805 
1806 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
1807 				u32 value)
1808 {
1809 	struct mlx5_hca_vport_context ctx = {};
1810 	struct mlx5_core_dev *mdev;
1811 	u32 mdev_port_num;
1812 	int err;
1813 
1814 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1815 	if (!mdev)
1816 		return -ENODEV;
1817 
1818 	err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1819 	if (err)
1820 		goto out;
1821 
1822 	if (~ctx.cap_mask1_perm & mask) {
1823 		mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1824 			     mask, ctx.cap_mask1_perm);
1825 		err = -EINVAL;
1826 		goto out;
1827 	}
1828 
1829 	ctx.cap_mask1 = value;
1830 	ctx.cap_mask1_perm = mask;
1831 	err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1832 						 0, &ctx);
1833 
1834 out:
1835 	mlx5_ib_put_native_port_mdev(dev, port_num);
1836 
1837 	return err;
1838 }
1839 
1840 static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1841 			       struct ib_port_modify *props)
1842 {
1843 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1844 	struct ib_port_attr attr;
1845 	u32 tmp;
1846 	int err;
1847 	u32 change_mask;
1848 	u32 value;
1849 	bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1850 		      IB_LINK_LAYER_INFINIBAND);
1851 
1852 	/* CM layer calls ib_modify_port() regardless of the link layer. For
1853 	 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1854 	 */
1855 	if (!is_ib)
1856 		return 0;
1857 
1858 	if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1859 		change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1860 		value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1861 		return set_port_caps_atomic(dev, port, change_mask, value);
1862 	}
1863 
1864 	mutex_lock(&dev->cap_mask_mutex);
1865 
1866 	err = ib_query_port(ibdev, port, &attr);
1867 	if (err)
1868 		goto out;
1869 
1870 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1871 		~props->clr_port_cap_mask;
1872 
1873 	err = mlx5_set_port_caps(dev->mdev, port, tmp);
1874 
1875 out:
1876 	mutex_unlock(&dev->cap_mask_mutex);
1877 	return err;
1878 }
1879 
1880 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1881 {
1882 	mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1883 		    caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1884 }
1885 
1886 static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1887 {
1888 	/* Large page with non 4k uar support might limit the dynamic size */
1889 	if (uars_per_sys_page == 1  && PAGE_SIZE > 4096)
1890 		return MLX5_MIN_DYN_BFREGS;
1891 
1892 	return MLX5_MAX_DYN_BFREGS;
1893 }
1894 
1895 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1896 			     struct mlx5_ib_alloc_ucontext_req_v2 *req,
1897 			     struct mlx5_bfreg_info *bfregi)
1898 {
1899 	int uars_per_sys_page;
1900 	int bfregs_per_sys_page;
1901 	int ref_bfregs = req->total_num_bfregs;
1902 
1903 	if (req->total_num_bfregs == 0)
1904 		return -EINVAL;
1905 
1906 	BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1907 	BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1908 
1909 	if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1910 		return -ENOMEM;
1911 
1912 	uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1913 	bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1914 	/* This holds the required static allocation asked by the user */
1915 	req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1916 	if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1917 		return -EINVAL;
1918 
1919 	bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1920 	bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1921 	bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1922 	bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1923 
1924 	mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1925 		    MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1926 		    lib_uar_4k ? "yes" : "no", ref_bfregs,
1927 		    req->total_num_bfregs, bfregi->total_num_bfregs,
1928 		    bfregi->num_sys_pages);
1929 
1930 	return 0;
1931 }
1932 
1933 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1934 {
1935 	struct mlx5_bfreg_info *bfregi;
1936 	int err;
1937 	int i;
1938 
1939 	bfregi = &context->bfregi;
1940 	for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1941 		err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
1942 					 context->devx_uid);
1943 		if (err)
1944 			goto error;
1945 
1946 		mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1947 	}
1948 
1949 	for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1950 		bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1951 
1952 	return 0;
1953 
1954 error:
1955 	for (--i; i >= 0; i--)
1956 		if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1957 					 context->devx_uid))
1958 			mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1959 
1960 	return err;
1961 }
1962 
1963 static void deallocate_uars(struct mlx5_ib_dev *dev,
1964 			    struct mlx5_ib_ucontext *context)
1965 {
1966 	struct mlx5_bfreg_info *bfregi;
1967 	int i;
1968 
1969 	bfregi = &context->bfregi;
1970 	for (i = 0; i < bfregi->num_sys_pages; i++)
1971 		if (i < bfregi->num_static_sys_pages ||
1972 		    bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1973 			mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1974 					     context->devx_uid);
1975 }
1976 
1977 static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
1978 				struct mlx5_core_dev *slave,
1979 				struct mlx5_ib_lb_state *lb_state)
1980 {
1981 	int err;
1982 
1983 	err = mlx5_nic_vport_update_local_lb(master, true);
1984 	if (err)
1985 		return err;
1986 
1987 	err = mlx5_nic_vport_update_local_lb(slave, true);
1988 	if (err)
1989 		goto out;
1990 
1991 	lb_state->force_enable = true;
1992 	return 0;
1993 
1994 out:
1995 	mlx5_nic_vport_update_local_lb(master, false);
1996 	return err;
1997 }
1998 
1999 static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
2000 				  struct mlx5_core_dev *slave,
2001 				  struct mlx5_ib_lb_state *lb_state)
2002 {
2003 	mlx5_nic_vport_update_local_lb(slave, false);
2004 	mlx5_nic_vport_update_local_lb(master, false);
2005 
2006 	lb_state->force_enable = false;
2007 }
2008 
2009 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
2010 {
2011 	int err = 0;
2012 
2013 	if (dev->lb.force_enable)
2014 		return 0;
2015 
2016 	mutex_lock(&dev->lb.mutex);
2017 	if (td)
2018 		dev->lb.user_td++;
2019 	if (qp)
2020 		dev->lb.qps++;
2021 
2022 	if (dev->lb.user_td == 2 ||
2023 	    dev->lb.qps == 1) {
2024 		if (!dev->lb.enabled) {
2025 			err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
2026 			dev->lb.enabled = true;
2027 		}
2028 	}
2029 
2030 	mutex_unlock(&dev->lb.mutex);
2031 
2032 	return err;
2033 }
2034 
2035 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
2036 {
2037 	if (dev->lb.force_enable)
2038 		return;
2039 
2040 	mutex_lock(&dev->lb.mutex);
2041 	if (td)
2042 		dev->lb.user_td--;
2043 	if (qp)
2044 		dev->lb.qps--;
2045 
2046 	if (dev->lb.user_td == 1 &&
2047 	    dev->lb.qps == 0) {
2048 		if (dev->lb.enabled) {
2049 			mlx5_nic_vport_update_local_lb(dev->mdev, false);
2050 			dev->lb.enabled = false;
2051 		}
2052 	}
2053 
2054 	mutex_unlock(&dev->lb.mutex);
2055 }
2056 
2057 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
2058 					  u16 uid)
2059 {
2060 	int err;
2061 
2062 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
2063 		return 0;
2064 
2065 	err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
2066 	if (err)
2067 		return err;
2068 
2069 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
2070 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
2071 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
2072 		return err;
2073 
2074 	return mlx5_ib_enable_lb(dev, true, false);
2075 }
2076 
2077 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
2078 					     u16 uid)
2079 {
2080 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
2081 		return;
2082 
2083 	mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
2084 
2085 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
2086 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
2087 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
2088 		return;
2089 
2090 	mlx5_ib_disable_lb(dev, true, false);
2091 }
2092 
2093 static int set_ucontext_resp(struct ib_ucontext *uctx,
2094 			     struct mlx5_ib_alloc_ucontext_resp *resp)
2095 {
2096 	struct ib_device *ibdev = uctx->device;
2097 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
2098 	struct mlx5_ib_ucontext *context = to_mucontext(uctx);
2099 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
2100 
2101 	if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
2102 		resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey;
2103 		resp->comp_mask |=
2104 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
2105 	}
2106 
2107 	resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
2108 	if (mlx5_wc_support_get(dev->mdev))
2109 		resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
2110 						      log_bf_reg_size);
2111 	resp->cache_line_size = cache_line_size();
2112 	resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
2113 	resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
2114 	resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
2115 	resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
2116 	resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
2117 	resp->cqe_version = context->cqe_version;
2118 	resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2119 				MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
2120 	resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2121 					MLX5_CAP_GEN(dev->mdev,
2122 						     num_of_uars_per_page) : 1;
2123 	resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
2124 			bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
2125 	resp->num_ports = dev->num_ports;
2126 	resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
2127 				      MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
2128 
2129 	if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
2130 		mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
2131 		resp->eth_min_inline++;
2132 	}
2133 
2134 	if (dev->mdev->clock_info)
2135 		resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
2136 
2137 	/*
2138 	 * We don't want to expose information from the PCI bar that is located
2139 	 * after 4096 bytes, so if the arch only supports larger pages, let's
2140 	 * pretend we don't support reading the HCA's core clock. This is also
2141 	 * forced by mmap function.
2142 	 */
2143 	if (PAGE_SIZE <= 4096) {
2144 		resp->comp_mask |=
2145 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
2146 		resp->hca_core_clock_offset =
2147 			offsetof(struct mlx5_init_seg,
2148 				 internal_timer_h) % PAGE_SIZE;
2149 	}
2150 
2151 	if (MLX5_CAP_GEN(dev->mdev, ece_support))
2152 		resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
2153 
2154 	if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
2155 	    rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
2156 	    rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
2157 		resp->comp_mask |=
2158 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS;
2159 
2160 	resp->num_dyn_bfregs = bfregi->num_dyn_bfregs;
2161 
2162 	if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
2163 		resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
2164 
2165 	resp->comp_mask |=
2166 		MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
2167 
2168 	return 0;
2169 }
2170 
2171 static bool uctx_rdma_ctrl_is_enabled(u64 enabled_caps)
2172 {
2173 	return UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_LOCAL) ||
2174 	       UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
2175 }
2176 
2177 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
2178 				  struct ib_udata *udata)
2179 {
2180 	struct ib_device *ibdev = uctx->device;
2181 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
2182 	struct mlx5_ib_alloc_ucontext_req_v2 req;
2183 	struct mlx5_ib_alloc_ucontext_resp resp = {};
2184 	struct mlx5_ib_ucontext *context = to_mucontext(uctx);
2185 	struct mlx5_bfreg_info *bfregi;
2186 	int ver;
2187 	int err;
2188 	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
2189 				     max_cqe_version);
2190 	bool lib_uar_4k;
2191 	bool lib_uar_dyn;
2192 
2193 	if (!dev->ib_active)
2194 		return -EAGAIN;
2195 
2196 	if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
2197 		ver = 0;
2198 	else if (udata->inlen >= min_req_v2)
2199 		ver = 2;
2200 	else
2201 		return -EINVAL;
2202 
2203 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
2204 	if (err)
2205 		return err;
2206 
2207 	if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
2208 		return -EOPNOTSUPP;
2209 
2210 	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
2211 		return -EOPNOTSUPP;
2212 
2213 	req.total_num_bfregs = ALIGN(req.total_num_bfregs,
2214 				    MLX5_NON_FP_BFREGS_PER_UAR);
2215 	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
2216 		return -EINVAL;
2217 
2218 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
2219 		err = mlx5_ib_devx_create(dev, true, uctx->enabled_caps);
2220 		if (err < 0)
2221 			goto out_ctx;
2222 		context->devx_uid = err;
2223 
2224 		if (uctx_rdma_ctrl_is_enabled(uctx->enabled_caps)) {
2225 			err = mlx5_cmd_add_privileged_uid(dev->mdev,
2226 							  context->devx_uid);
2227 			if (err)
2228 				goto out_devx;
2229 		}
2230 	}
2231 
2232 	lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
2233 	lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
2234 	bfregi = &context->bfregi;
2235 
2236 	if (lib_uar_dyn) {
2237 		bfregi->lib_uar_dyn = lib_uar_dyn;
2238 		goto uar_done;
2239 	}
2240 
2241 	/* updates req->total_num_bfregs */
2242 	err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
2243 	if (err)
2244 		goto out_ucap;
2245 
2246 	mutex_init(&bfregi->lock);
2247 	bfregi->lib_uar_4k = lib_uar_4k;
2248 	bfregi->count = kzalloc_objs(*bfregi->count, bfregi->total_num_bfregs);
2249 	if (!bfregi->count) {
2250 		err = -ENOMEM;
2251 		goto out_ucap;
2252 	}
2253 
2254 	bfregi->sys_pages =
2255 		kzalloc_objs(*bfregi->sys_pages, bfregi->num_sys_pages);
2256 	if (!bfregi->sys_pages) {
2257 		err = -ENOMEM;
2258 		goto out_count;
2259 	}
2260 
2261 	err = allocate_uars(dev, context);
2262 	if (err)
2263 		goto out_sys_pages;
2264 
2265 uar_done:
2266 	err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
2267 					     context->devx_uid);
2268 	if (err)
2269 		goto out_uars;
2270 
2271 	INIT_LIST_HEAD(&context->db_page_list);
2272 	mutex_init(&context->db_page_mutex);
2273 
2274 	context->cqe_version = min_t(__u8,
2275 				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
2276 				 req.max_cqe_version);
2277 
2278 	err = set_ucontext_resp(uctx, &resp);
2279 	if (err)
2280 		goto out_mdev;
2281 
2282 	resp.response_length = min(udata->outlen, sizeof(resp));
2283 	err = ib_copy_to_udata(udata, &resp, resp.response_length);
2284 	if (err)
2285 		goto out_mdev;
2286 
2287 	bfregi->ver = ver;
2288 	bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
2289 	context->lib_caps = req.lib_caps;
2290 	print_lib_caps(dev, context->lib_caps);
2291 
2292 	if (mlx5_ib_lag_should_assign_affinity(dev)) {
2293 		u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
2294 
2295 		atomic_set(&context->tx_port_affinity,
2296 			   atomic_add_return(
2297 				   1, &dev->port[port].roce.tx_port_affinity));
2298 	}
2299 
2300 	return 0;
2301 
2302 out_mdev:
2303 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2304 
2305 out_uars:
2306 	deallocate_uars(dev, context);
2307 
2308 out_sys_pages:
2309 	kfree(bfregi->sys_pages);
2310 
2311 out_count:
2312 	kfree(bfregi->count);
2313 
2314 out_ucap:
2315 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX &&
2316 	    uctx_rdma_ctrl_is_enabled(uctx->enabled_caps))
2317 		mlx5_cmd_remove_privileged_uid(dev->mdev, context->devx_uid);
2318 
2319 out_devx:
2320 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
2321 		mlx5_ib_devx_destroy(dev, context->devx_uid);
2322 
2323 out_ctx:
2324 	return err;
2325 }
2326 
2327 static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext,
2328 				  struct uverbs_attr_bundle *attrs)
2329 {
2330 	struct mlx5_ib_alloc_ucontext_resp uctx_resp = {};
2331 	int ret;
2332 
2333 	ret = set_ucontext_resp(ibcontext, &uctx_resp);
2334 	if (ret)
2335 		return ret;
2336 
2337 	uctx_resp.response_length =
2338 		min_t(size_t,
2339 		      uverbs_attr_get_len(attrs,
2340 				MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX),
2341 		      sizeof(uctx_resp));
2342 
2343 	ret = uverbs_copy_to_struct_or_zero(attrs,
2344 					MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
2345 					&uctx_resp,
2346 					sizeof(uctx_resp));
2347 	return ret;
2348 }
2349 
2350 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
2351 {
2352 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2353 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2354 	struct mlx5_bfreg_info *bfregi;
2355 
2356 	bfregi = &context->bfregi;
2357 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2358 
2359 	deallocate_uars(dev, context);
2360 	kfree(bfregi->sys_pages);
2361 	kfree(bfregi->count);
2362 
2363 	if (context->devx_uid) {
2364 		if (uctx_rdma_ctrl_is_enabled(ibcontext->enabled_caps))
2365 			mlx5_cmd_remove_privileged_uid(dev->mdev,
2366 						       context->devx_uid);
2367 		mlx5_ib_devx_destroy(dev, context->devx_uid);
2368 	}
2369 }
2370 
2371 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2372 				 int uar_idx)
2373 {
2374 	int fw_uars_per_page;
2375 
2376 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2377 
2378 	return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2379 }
2380 
2381 static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
2382 				 int uar_idx)
2383 {
2384 	unsigned int fw_uars_per_page;
2385 
2386 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2387 				MLX5_UARS_IN_PAGE : 1;
2388 
2389 	return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
2390 }
2391 
2392 static int get_command(unsigned long offset)
2393 {
2394 	return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2395 }
2396 
2397 static int get_arg(unsigned long offset)
2398 {
2399 	return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2400 }
2401 
2402 static int get_index(unsigned long offset)
2403 {
2404 	return get_arg(offset);
2405 }
2406 
2407 /* Index resides in an extra byte to enable larger values than 255 */
2408 static int get_extended_index(unsigned long offset)
2409 {
2410 	return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2411 }
2412 
2413 
2414 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2415 {
2416 }
2417 
2418 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2419 {
2420 	switch (cmd) {
2421 	case MLX5_IB_MMAP_WC_PAGE:
2422 		return "WC";
2423 	case MLX5_IB_MMAP_REGULAR_PAGE:
2424 		return "best effort WC";
2425 	case MLX5_IB_MMAP_NC_PAGE:
2426 		return "NC";
2427 	case MLX5_IB_MMAP_DEVICE_MEM:
2428 		return "Device Memory";
2429 	default:
2430 		return "Unknown";
2431 	}
2432 }
2433 
2434 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2435 					struct vm_area_struct *vma,
2436 					struct mlx5_ib_ucontext *context)
2437 {
2438 	if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2439 	    !(vma->vm_flags & VM_SHARED))
2440 		return -EINVAL;
2441 
2442 	if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2443 		return -EOPNOTSUPP;
2444 
2445 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
2446 		return -EPERM;
2447 	vm_flags_clear(vma, VM_MAYWRITE);
2448 
2449 	if (!dev->mdev->clock_info)
2450 		return -EOPNOTSUPP;
2451 
2452 	return vm_insert_page(vma, vma->vm_start,
2453 			      virt_to_page(dev->mdev->clock_info));
2454 }
2455 
2456 static int phys_addr_to_bar(struct pci_dev *pdev, phys_addr_t pa)
2457 {
2458 	resource_size_t start, end;
2459 	int bar;
2460 
2461 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
2462 		/* Skip BARs not present or not memory-mapped */
2463 		if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
2464 			continue;
2465 
2466 		start = pci_resource_start(pdev, bar);
2467 		end = pci_resource_end(pdev, bar);
2468 
2469 		if (!start || !end)
2470 			continue;
2471 
2472 		if (pa >= start && pa <= end)
2473 			return bar;
2474 	}
2475 
2476 	return -1;
2477 }
2478 
2479 static int mlx5_ib_mmap_get_pfns(struct rdma_user_mmap_entry *entry,
2480 				 struct phys_vec *phys_vec,
2481 				 struct p2pdma_provider **provider)
2482 {
2483 	struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
2484 	struct pci_dev *pdev = to_mdev(entry->ucontext->device)->mdev->pdev;
2485 	int bar;
2486 
2487 	phys_vec->paddr = mentry->address;
2488 	phys_vec->len = entry->npages * PAGE_SIZE;
2489 
2490 	bar = phys_addr_to_bar(pdev, phys_vec->paddr);
2491 	if (bar < 0)
2492 		return -EINVAL;
2493 
2494 	*provider = pcim_p2pdma_provider(pdev, bar);
2495 	/* If the kernel was not compiled with CONFIG_PCI_P2PDMA the
2496 	 * functionality is not supported.
2497 	 */
2498 	if (!*provider)
2499 		return -EOPNOTSUPP;
2500 
2501 	return 0;
2502 }
2503 
2504 static struct rdma_user_mmap_entry *
2505 mlx5_ib_pgoff_to_mmap_entry(struct ib_ucontext *ucontext, off_t pg_off)
2506 {
2507 	unsigned long entry_pgoff;
2508 	unsigned long idx;
2509 	u8 command;
2510 
2511 	pg_off = pg_off >> PAGE_SHIFT;
2512 	command = get_command(pg_off);
2513 	idx = get_extended_index(pg_off);
2514 
2515 	entry_pgoff = command << 16 | idx;
2516 
2517 	return rdma_user_mmap_entry_get_pgoff(ucontext, entry_pgoff);
2518 }
2519 
2520 static void mlx5_ib_free_var_mmap_entry(struct mlx5_user_mmap_entry *mentry,
2521 					struct mlx5_var_region *var_region)
2522 {
2523 	mutex_lock(&var_region->bitmap_lock);
2524 	clear_bit(mentry->page_idx, var_region->bitmap);
2525 	mutex_unlock(&var_region->bitmap_lock);
2526 	kfree(mentry);
2527 }
2528 
2529 static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
2530 {
2531 	struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
2532 	struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
2533 	struct mlx5_var_table *var_table = &dev->var_table;
2534 	struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);
2535 	struct mlx5_var_region *var_region;
2536 
2537 	switch (mentry->mmap_flag) {
2538 	case MLX5_IB_MMAP_TYPE_MEMIC:
2539 	case MLX5_IB_MMAP_TYPE_MEMIC_OP:
2540 		mlx5_ib_dm_mmap_free(dev, mentry);
2541 		break;
2542 	case MLX5_IB_MMAP_TYPE_VAR:
2543 		var_region = &var_table->var_region;
2544 		mlx5_ib_free_var_mmap_entry(mentry, var_region);
2545 		break;
2546 	case MLX5_IB_MMAP_TYPE_TLP_VAR:
2547 		var_region = &var_table->tlp_var_region;
2548 		mlx5_ib_free_var_mmap_entry(mentry, var_region);
2549 		break;
2550 	case MLX5_IB_MMAP_TYPE_UAR_WC:
2551 	case MLX5_IB_MMAP_TYPE_UAR_NC:
2552 		mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
2553 				     context->devx_uid);
2554 		kfree(mentry);
2555 		break;
2556 	default:
2557 		WARN_ON(true);
2558 	}
2559 }
2560 
2561 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2562 		    struct vm_area_struct *vma,
2563 		    struct mlx5_ib_ucontext *context)
2564 {
2565 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
2566 	int err;
2567 	unsigned long idx;
2568 	phys_addr_t pfn;
2569 	pgprot_t prot;
2570 	u32 bfreg_dyn_idx = 0;
2571 	u32 uar_index;
2572 	int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2573 	int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2574 				bfregi->num_static_sys_pages;
2575 
2576 	if (bfregi->lib_uar_dyn)
2577 		return -EINVAL;
2578 
2579 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2580 		return -EINVAL;
2581 
2582 	if (dyn_uar)
2583 		idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2584 	else
2585 		idx = get_index(vma->vm_pgoff);
2586 
2587 	if (idx >= max_valid_idx) {
2588 		mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2589 			     idx, max_valid_idx);
2590 		return -EINVAL;
2591 	}
2592 
2593 	switch (cmd) {
2594 	case MLX5_IB_MMAP_WC_PAGE:
2595 	case MLX5_IB_MMAP_ALLOC_WC:
2596 	case MLX5_IB_MMAP_REGULAR_PAGE:
2597 		/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2598 		prot = pgprot_writecombine(vma->vm_page_prot);
2599 		break;
2600 	case MLX5_IB_MMAP_NC_PAGE:
2601 		prot = pgprot_noncached(vma->vm_page_prot);
2602 		break;
2603 	default:
2604 		return -EINVAL;
2605 	}
2606 
2607 	if (dyn_uar) {
2608 		int uars_per_page;
2609 
2610 		uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2611 		bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2612 		if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2613 			mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2614 				     bfreg_dyn_idx, bfregi->total_num_bfregs);
2615 			return -EINVAL;
2616 		}
2617 
2618 		mutex_lock(&bfregi->lock);
2619 		/* Fail if uar already allocated, first bfreg index of each
2620 		 * page holds its count.
2621 		 */
2622 		if (bfregi->count[bfreg_dyn_idx]) {
2623 			mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2624 			mutex_unlock(&bfregi->lock);
2625 			return -EINVAL;
2626 		}
2627 
2628 		bfregi->count[bfreg_dyn_idx]++;
2629 		mutex_unlock(&bfregi->lock);
2630 
2631 		err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
2632 					 context->devx_uid);
2633 		if (err) {
2634 			mlx5_ib_warn(dev, "UAR alloc failed\n");
2635 			goto free_bfreg;
2636 		}
2637 	} else {
2638 		uar_index = bfregi->sys_pages[idx];
2639 	}
2640 
2641 	pfn = uar_index2pfn(dev, uar_index);
2642 	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2643 
2644 	err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2645 				prot, NULL);
2646 	if (err) {
2647 		mlx5_ib_err(dev,
2648 			    "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2649 			    err, mmap_cmd2str(cmd));
2650 		goto err;
2651 	}
2652 
2653 	if (dyn_uar)
2654 		bfregi->sys_pages[idx] = uar_index;
2655 	return 0;
2656 
2657 err:
2658 	if (!dyn_uar)
2659 		return err;
2660 
2661 	mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
2662 
2663 free_bfreg:
2664 	mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2665 
2666 	return err;
2667 }
2668 
2669 static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
2670 {
2671 	unsigned long idx;
2672 	u8 command;
2673 
2674 	command = get_command(vma->vm_pgoff);
2675 	idx = get_extended_index(vma->vm_pgoff);
2676 
2677 	return (command << 16 | idx);
2678 }
2679 
2680 static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
2681 			       struct vm_area_struct *vma,
2682 			       struct ib_ucontext *ucontext)
2683 {
2684 	struct mlx5_user_mmap_entry *mentry;
2685 	struct rdma_user_mmap_entry *entry;
2686 	unsigned long pgoff;
2687 	pgprot_t prot;
2688 	phys_addr_t pfn;
2689 	int ret;
2690 
2691 	pgoff = mlx5_vma_to_pgoff(vma);
2692 	entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
2693 	if (!entry)
2694 		return -EINVAL;
2695 
2696 	mentry = to_mmmap(entry);
2697 	pfn = (mentry->address >> PAGE_SHIFT);
2698 	if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
2699 	    mentry->mmap_flag == MLX5_IB_MMAP_TYPE_TLP_VAR ||
2700 	    mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
2701 		prot = pgprot_noncached(vma->vm_page_prot);
2702 	else
2703 		prot = pgprot_writecombine(vma->vm_page_prot);
2704 	ret = rdma_user_mmap_io(ucontext, vma, pfn,
2705 				entry->npages * PAGE_SIZE,
2706 				prot,
2707 				entry);
2708 	rdma_user_mmap_entry_put(&mentry->rdma_entry);
2709 	return ret;
2710 }
2711 
2712 static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
2713 {
2714 	u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
2715 	u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
2716 
2717 	return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
2718 		(index & 0xFF)) << PAGE_SHIFT;
2719 }
2720 
2721 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2722 {
2723 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2724 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2725 	unsigned long command;
2726 	phys_addr_t pfn;
2727 
2728 	command = get_command(vma->vm_pgoff);
2729 	switch (command) {
2730 	case MLX5_IB_MMAP_WC_PAGE:
2731 	case MLX5_IB_MMAP_ALLOC_WC:
2732 		if (!mlx5_wc_support_get(dev->mdev))
2733 			return -EPERM;
2734 		fallthrough;
2735 	case MLX5_IB_MMAP_NC_PAGE:
2736 	case MLX5_IB_MMAP_REGULAR_PAGE:
2737 		return uar_mmap(dev, command, vma, context);
2738 
2739 	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2740 		return -ENOSYS;
2741 
2742 	case MLX5_IB_MMAP_CORE_CLOCK:
2743 		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2744 			return -EINVAL;
2745 
2746 		if (vma->vm_flags & VM_WRITE)
2747 			return -EPERM;
2748 		vm_flags_clear(vma, VM_MAYWRITE);
2749 
2750 		/* Don't expose to user-space information it shouldn't have */
2751 		if (PAGE_SIZE > 4096)
2752 			return -EOPNOTSUPP;
2753 
2754 		pfn = (dev->mdev->bar_addr +
2755 		       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2756 			PAGE_SHIFT;
2757 		return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2758 					 PAGE_SIZE,
2759 					 pgprot_noncached(vma->vm_page_prot),
2760 					 NULL);
2761 	case MLX5_IB_MMAP_CLOCK_INFO:
2762 		return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2763 
2764 	default:
2765 		return mlx5_ib_mmap_offset(dev, vma, ibcontext);
2766 	}
2767 
2768 	return 0;
2769 }
2770 
2771 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2772 {
2773 	struct mlx5_ib_pd *pd = to_mpd(ibpd);
2774 	struct ib_device *ibdev = ibpd->device;
2775 	struct mlx5_ib_alloc_pd_resp resp;
2776 	int err;
2777 	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2778 	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
2779 	u16 uid = 0;
2780 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2781 		udata, struct mlx5_ib_ucontext, ibucontext);
2782 
2783 	uid = context ? context->devx_uid : 0;
2784 	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2785 	MLX5_SET(alloc_pd_in, in, uid, uid);
2786 	err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
2787 	if (err)
2788 		return err;
2789 
2790 	pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2791 	pd->uid = uid;
2792 	if (udata) {
2793 		resp.pdn = pd->pdn;
2794 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2795 			mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2796 			return -EFAULT;
2797 		}
2798 	}
2799 
2800 	return 0;
2801 }
2802 
2803 static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
2804 {
2805 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2806 	struct mlx5_ib_pd *mpd = to_mpd(pd);
2807 
2808 	return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2809 }
2810 
2811 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2812 {
2813 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2814 	struct mlx5_ib_qp *mqp = to_mqp(ibqp);
2815 	int err;
2816 	u16 uid;
2817 
2818 	uid = ibqp->pd ?
2819 		to_mpd(ibqp->pd)->uid : 0;
2820 
2821 	if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
2822 		mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
2823 		return -EOPNOTSUPP;
2824 	}
2825 
2826 	err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2827 	if (err)
2828 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2829 			     ibqp->qp_num, gid->raw);
2830 
2831 	return err;
2832 }
2833 
2834 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2835 {
2836 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2837 	int err;
2838 	u16 uid;
2839 
2840 	uid = ibqp->pd ?
2841 		to_mpd(ibqp->pd)->uid : 0;
2842 	err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2843 	if (err)
2844 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2845 			     ibqp->qp_num, gid->raw);
2846 
2847 	return err;
2848 }
2849 
2850 static int init_node_data(struct mlx5_ib_dev *dev)
2851 {
2852 	int err;
2853 
2854 	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
2855 	if (err)
2856 		return err;
2857 
2858 	dev->mdev->rev_id = dev->mdev->pdev->revision;
2859 
2860 	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
2861 }
2862 
2863 static ssize_t fw_pages_show(struct device *device,
2864 			     struct device_attribute *attr, char *buf)
2865 {
2866 	struct mlx5_ib_dev *dev =
2867 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2868 
2869 	return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
2870 }
2871 static DEVICE_ATTR_RO(fw_pages);
2872 
2873 static ssize_t reg_pages_show(struct device *device,
2874 			      struct device_attribute *attr, char *buf)
2875 {
2876 	struct mlx5_ib_dev *dev =
2877 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2878 
2879 	return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2880 }
2881 static DEVICE_ATTR_RO(reg_pages);
2882 
2883 static ssize_t hca_type_show(struct device *device,
2884 			     struct device_attribute *attr, char *buf)
2885 {
2886 	struct mlx5_ib_dev *dev =
2887 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2888 
2889 	return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
2890 }
2891 static DEVICE_ATTR_RO(hca_type);
2892 
2893 static ssize_t hw_rev_show(struct device *device,
2894 			   struct device_attribute *attr, char *buf)
2895 {
2896 	struct mlx5_ib_dev *dev =
2897 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2898 
2899 	return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
2900 }
2901 static DEVICE_ATTR_RO(hw_rev);
2902 
2903 static ssize_t board_id_show(struct device *device,
2904 			     struct device_attribute *attr, char *buf)
2905 {
2906 	struct mlx5_ib_dev *dev =
2907 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2908 
2909 	return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
2910 			  dev->mdev->board_id);
2911 }
2912 static DEVICE_ATTR_RO(board_id);
2913 
2914 static struct attribute *mlx5_class_attributes[] = {
2915 	&dev_attr_hw_rev.attr,
2916 	&dev_attr_hca_type.attr,
2917 	&dev_attr_board_id.attr,
2918 	&dev_attr_fw_pages.attr,
2919 	&dev_attr_reg_pages.attr,
2920 	NULL,
2921 };
2922 
2923 static const struct attribute_group mlx5_attr_group = {
2924 	.attrs = mlx5_class_attributes,
2925 };
2926 
2927 static void pkey_change_handler(struct work_struct *work)
2928 {
2929 	struct mlx5_ib_port_resources *ports =
2930 		container_of(work, struct mlx5_ib_port_resources,
2931 			     pkey_change_work);
2932 
2933 	if (!ports->gsi)
2934 		/*
2935 		 * We got this event before device was fully configured
2936 		 * and MAD registration code wasn't called/finished yet.
2937 		 */
2938 		return;
2939 
2940 	mlx5_ib_gsi_pkey_change(ports->gsi);
2941 }
2942 
2943 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2944 {
2945 	struct mlx5_ib_qp *mqp;
2946 	struct mlx5_ib_cq *send_mcq, *recv_mcq;
2947 	struct mlx5_core_cq *mcq;
2948 	struct list_head cq_armed_list;
2949 	unsigned long flags_qp;
2950 	unsigned long flags_cq;
2951 	unsigned long flags;
2952 
2953 	INIT_LIST_HEAD(&cq_armed_list);
2954 
2955 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2956 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2957 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2958 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2959 		if (mqp->sq.tail != mqp->sq.head) {
2960 			send_mcq = to_mcq(mqp->ibqp.send_cq);
2961 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
2962 			if (send_mcq->mcq.comp &&
2963 			    mqp->ibqp.send_cq->comp_handler) {
2964 				if (!send_mcq->mcq.reset_notify_added) {
2965 					send_mcq->mcq.reset_notify_added = 1;
2966 					list_add_tail(&send_mcq->mcq.reset_notify,
2967 						      &cq_armed_list);
2968 				}
2969 			}
2970 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2971 		}
2972 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2973 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2974 		/* no handling is needed for SRQ */
2975 		if (!mqp->ibqp.srq) {
2976 			if (mqp->rq.tail != mqp->rq.head) {
2977 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2978 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2979 				if (recv_mcq->mcq.comp &&
2980 				    mqp->ibqp.recv_cq->comp_handler) {
2981 					if (!recv_mcq->mcq.reset_notify_added) {
2982 						recv_mcq->mcq.reset_notify_added = 1;
2983 						list_add_tail(&recv_mcq->mcq.reset_notify,
2984 							      &cq_armed_list);
2985 					}
2986 				}
2987 				spin_unlock_irqrestore(&recv_mcq->lock,
2988 						       flags_cq);
2989 			}
2990 		}
2991 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2992 	}
2993 	/*At that point all inflight post send were put to be executed as of we
2994 	 * lock/unlock above locks Now need to arm all involved CQs.
2995 	 */
2996 	list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2997 		mcq->comp(mcq, NULL);
2998 	}
2999 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3000 }
3001 
3002 static void delay_drop_handler(struct work_struct *work)
3003 {
3004 	int err;
3005 	struct mlx5_ib_delay_drop *delay_drop =
3006 		container_of(work, struct mlx5_ib_delay_drop,
3007 			     delay_drop_work);
3008 
3009 	atomic_inc(&delay_drop->events_cnt);
3010 
3011 	mutex_lock(&delay_drop->lock);
3012 	err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
3013 	if (err) {
3014 		mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
3015 			     delay_drop->timeout);
3016 		delay_drop->activate = false;
3017 	}
3018 	mutex_unlock(&delay_drop->lock);
3019 }
3020 
3021 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
3022 				 struct ib_event *ibev)
3023 {
3024 	u32 port = (eqe->data.port.port >> 4) & 0xf;
3025 
3026 	switch (eqe->sub_type) {
3027 	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
3028 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
3029 					    IB_LINK_LAYER_ETHERNET)
3030 			schedule_work(&ibdev->delay_drop.delay_drop_work);
3031 		break;
3032 	default: /* do nothing */
3033 		return;
3034 	}
3035 }
3036 
3037 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
3038 			      struct ib_event *ibev)
3039 {
3040 	u32 port = (eqe->data.port.port >> 4) & 0xf;
3041 
3042 	ibev->element.port_num = port;
3043 
3044 	switch (eqe->sub_type) {
3045 	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
3046 	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
3047 	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
3048 		if (ibdev->ib_active) {
3049 			struct ib_event speed_event = {};
3050 
3051 			speed_event.device = &ibdev->ib_dev;
3052 			speed_event.event = IB_EVENT_DEVICE_SPEED_CHANGE;
3053 			ib_dispatch_event(&speed_event);
3054 		}
3055 
3056 		/* In RoCE, port up/down events are handled in
3057 		 * mlx5_netdev_event().
3058 		 */
3059 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
3060 					    IB_LINK_LAYER_ETHERNET)
3061 			return -EINVAL;
3062 
3063 		ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
3064 				IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3065 		break;
3066 
3067 	case MLX5_PORT_CHANGE_SUBTYPE_LID:
3068 		ibev->event = IB_EVENT_LID_CHANGE;
3069 		break;
3070 
3071 	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
3072 		ibev->event = IB_EVENT_PKEY_CHANGE;
3073 		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3074 		break;
3075 
3076 	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
3077 		ibev->event = IB_EVENT_GID_CHANGE;
3078 		break;
3079 
3080 	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
3081 		ibev->event = IB_EVENT_CLIENT_REREGISTER;
3082 		break;
3083 	default:
3084 		return -EINVAL;
3085 	}
3086 
3087 	return 0;
3088 }
3089 
3090 static void mlx5_ib_handle_event(struct work_struct *_work)
3091 {
3092 	struct mlx5_ib_event_work *work =
3093 		container_of(_work, struct mlx5_ib_event_work, work);
3094 	struct mlx5_ib_dev *ibdev;
3095 	struct ib_event ibev;
3096 
3097 	if (work->is_slave) {
3098 		ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
3099 		if (!ibdev)
3100 			goto out;
3101 	} else {
3102 		ibdev = work->dev;
3103 	}
3104 
3105 	switch (work->event) {
3106 	case MLX5_EVENT_TYPE_PORT_CHANGE:
3107 		if (handle_port_change(ibdev, work->param, &ibev))
3108 			goto out;
3109 		break;
3110 	case MLX5_EVENT_TYPE_GENERAL_EVENT:
3111 		handle_general_event(ibdev, work->param, &ibev);
3112 		fallthrough;
3113 	default:
3114 		goto out;
3115 	}
3116 
3117 	ibev.device = &ibdev->ib_dev;
3118 
3119 	if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
3120 		mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
3121 		goto out;
3122 	}
3123 
3124 	if (ibdev->ib_active)
3125 		ib_dispatch_event(&ibev);
3126 
3127 out:
3128 	kfree(work);
3129 }
3130 
3131 static int mlx5_ib_event(struct notifier_block *nb,
3132 			 unsigned long event, void *param)
3133 {
3134 	struct mlx5_ib_event_work *work;
3135 
3136 	work = kmalloc_obj(*work, GFP_ATOMIC);
3137 	if (!work)
3138 		return NOTIFY_DONE;
3139 
3140 	INIT_WORK(&work->work, mlx5_ib_handle_event);
3141 	work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
3142 	work->is_slave = false;
3143 	work->param = param;
3144 	work->event = event;
3145 
3146 	queue_work(mlx5_ib_event_wq, &work->work);
3147 
3148 	return NOTIFY_OK;
3149 }
3150 
3151 static int mlx5_ib_event_slave_port(struct notifier_block *nb,
3152 				    unsigned long event, void *param)
3153 {
3154 	struct mlx5_ib_event_work *work;
3155 
3156 	work = kmalloc_obj(*work, GFP_ATOMIC);
3157 	if (!work)
3158 		return NOTIFY_DONE;
3159 
3160 	INIT_WORK(&work->work, mlx5_ib_handle_event);
3161 	work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
3162 	work->is_slave = true;
3163 	work->param = param;
3164 	work->event = event;
3165 	queue_work(mlx5_ib_event_wq, &work->work);
3166 
3167 	return NOTIFY_OK;
3168 }
3169 
3170 static void mlx5_ib_handle_sys_error_event(struct work_struct *_work)
3171 {
3172 	struct mlx5_ib_event_work *work =
3173 		container_of(_work, struct mlx5_ib_event_work, work);
3174 	struct mlx5_ib_dev *ibdev = work->dev;
3175 	struct ib_event ibev;
3176 
3177 	ibev.event = IB_EVENT_DEVICE_FATAL;
3178 	mlx5_ib_handle_internal_error(ibdev);
3179 	ibev.element.port_num = (u8)(unsigned long)work->param;
3180 	ibev.device = &ibdev->ib_dev;
3181 
3182 	if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
3183 		mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
3184 		goto out;
3185 	}
3186 
3187 	if (ibdev->ib_active)
3188 		ib_dispatch_event(&ibev);
3189 
3190 	ibdev->ib_active = false;
3191 out:
3192 	kfree(work);
3193 }
3194 
3195 static int mlx5_ib_sys_error_event(struct notifier_block *nb,
3196 				   unsigned long event, void *param)
3197 {
3198 	struct mlx5_ib_event_work *work;
3199 
3200 	if (event != MLX5_DEV_EVENT_SYS_ERROR)
3201 		return NOTIFY_DONE;
3202 
3203 	work = kmalloc_obj(*work, GFP_ATOMIC);
3204 	if (!work)
3205 		return NOTIFY_DONE;
3206 
3207 	INIT_WORK(&work->work, mlx5_ib_handle_sys_error_event);
3208 	work->dev = container_of(nb, struct mlx5_ib_dev, sys_error_events);
3209 	work->is_slave = false;
3210 	work->param = param;
3211 	work->event = event;
3212 
3213 	queue_work(mlx5_ib_event_wq, &work->work);
3214 
3215 	return NOTIFY_OK;
3216 }
3217 
3218 static int mlx5_ib_stage_sys_error_notifier_init(struct mlx5_ib_dev *dev)
3219 {
3220 	dev->sys_error_events.notifier_call = mlx5_ib_sys_error_event;
3221 	mlx5_notifier_register(dev->mdev, &dev->sys_error_events);
3222 	return 0;
3223 }
3224 
3225 static void mlx5_ib_stage_sys_error_notifier_cleanup(struct mlx5_ib_dev *dev)
3226 {
3227 	mlx5_notifier_unregister(dev->mdev, &dev->sys_error_events);
3228 }
3229 
3230 static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
3231 {
3232 	struct mlx5_hca_vport_context vport_ctx;
3233 	int err;
3234 
3235 	*num_plane = 0;
3236 	if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
3237 		return 0;
3238 
3239 	err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
3240 	if (err)
3241 		return err;
3242 
3243 	*num_plane = vport_ctx.num_plane;
3244 	return 0;
3245 }
3246 
3247 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
3248 {
3249 	struct mlx5_hca_vport_context vport_ctx;
3250 	int err;
3251 	int port;
3252 
3253 	if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
3254 		return 0;
3255 
3256 	for (port = 1; port <= dev->num_ports; port++) {
3257 		if (dev->num_plane) {
3258 			dev->port_caps[port - 1].has_smi = false;
3259 			continue;
3260 		} else if (!MLX5_CAP_GEN(dev->mdev, ib_virt) ||
3261 			dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
3262 			dev->port_caps[port - 1].has_smi = true;
3263 			continue;
3264 		}
3265 
3266 		err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
3267 						   &vport_ctx);
3268 		if (err) {
3269 			mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
3270 				    port, err);
3271 			return err;
3272 		}
3273 		dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
3274 	}
3275 
3276 	return 0;
3277 }
3278 
3279 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
3280 {
3281 	unsigned int port;
3282 
3283 	rdma_for_each_port (&dev->ib_dev, port)
3284 		mlx5_query_ext_port_caps(dev, port);
3285 }
3286 
3287 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
3288 {
3289 	switch (umr_fence_cap) {
3290 	case MLX5_CAP_UMR_FENCE_NONE:
3291 		return MLX5_FENCE_MODE_NONE;
3292 	case MLX5_CAP_UMR_FENCE_SMALL:
3293 		return MLX5_FENCE_MODE_INITIATOR_SMALL;
3294 	default:
3295 		return MLX5_FENCE_MODE_STRONG_ORDERING;
3296 	}
3297 }
3298 
3299 int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
3300 {
3301 	struct mlx5_ib_resources *devr = &dev->devr;
3302 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
3303 	struct ib_device *ibdev;
3304 	struct ib_pd *pd;
3305 	struct ib_cq *cq;
3306 	int ret = 0;
3307 
3308 
3309 	/*
3310 	 * devr->c0 is set once, never changed until device unload.
3311 	 * Avoid taking the mutex if initialization is already done.
3312 	 */
3313 	if (devr->c0)
3314 		return 0;
3315 
3316 	mutex_lock(&devr->cq_lock);
3317 	if (devr->c0)
3318 		goto unlock;
3319 
3320 	ibdev = &dev->ib_dev;
3321 	pd = ib_alloc_pd(ibdev, 0);
3322 	if (IS_ERR(pd)) {
3323 		ret = PTR_ERR(pd);
3324 		mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%pe\n",
3325 			    pd);
3326 		goto unlock;
3327 	}
3328 
3329 	cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
3330 	if (IS_ERR(cq)) {
3331 		ret = PTR_ERR(cq);
3332 		mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%pe\n",
3333 			    cq);
3334 		ib_dealloc_pd(pd);
3335 		goto unlock;
3336 	}
3337 
3338 	devr->p0 = pd;
3339 	devr->c0 = cq;
3340 
3341 unlock:
3342 	mutex_unlock(&devr->cq_lock);
3343 	return ret;
3344 }
3345 
3346 int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
3347 {
3348 	struct mlx5_ib_resources *devr = &dev->devr;
3349 	struct ib_srq_init_attr attr;
3350 	struct ib_srq *s0, *s1;
3351 	int ret = 0;
3352 
3353 	/*
3354 	 * devr->s1 is set once, never changed until device unload.
3355 	 * Avoid taking the mutex if initialization is already done.
3356 	 */
3357 	if (devr->s1)
3358 		return 0;
3359 
3360 	mutex_lock(&devr->srq_lock);
3361 	if (devr->s1)
3362 		goto unlock;
3363 
3364 	ret = mlx5_ib_dev_res_cq_init(dev);
3365 	if (ret)
3366 		goto unlock;
3367 
3368 	memset(&attr, 0, sizeof(attr));
3369 	attr.attr.max_sge = 1;
3370 	attr.attr.max_wr = 1;
3371 	attr.srq_type = IB_SRQT_XRC;
3372 	attr.ext.cq = devr->c0;
3373 
3374 	s0 = ib_create_srq(devr->p0, &attr);
3375 	if (IS_ERR(s0)) {
3376 		ret = PTR_ERR(s0);
3377 		mlx5_ib_err(dev,
3378 			    "Couldn't create SRQ 0 for res init, err=%pe\n",
3379 			    s0);
3380 		goto unlock;
3381 	}
3382 
3383 	memset(&attr, 0, sizeof(attr));
3384 	attr.attr.max_sge = 1;
3385 	attr.attr.max_wr = 1;
3386 	attr.srq_type = IB_SRQT_BASIC;
3387 
3388 	s1 = ib_create_srq(devr->p0, &attr);
3389 	if (IS_ERR(s1)) {
3390 		ret = PTR_ERR(s1);
3391 		mlx5_ib_err(dev,
3392 			    "Couldn't create SRQ 1 for res init, err=%pe\n",
3393 			    s1);
3394 		ib_destroy_srq(s0);
3395 	}
3396 
3397 	devr->s0 = s0;
3398 	devr->s1 = s1;
3399 
3400 unlock:
3401 	mutex_unlock(&devr->srq_lock);
3402 	return ret;
3403 }
3404 
3405 static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
3406 {
3407 	struct mlx5_ib_resources *devr = &dev->devr;
3408 	int ret;
3409 
3410 	if (!MLX5_CAP_GEN(dev->mdev, xrc))
3411 		return -EOPNOTSUPP;
3412 
3413 	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
3414 	if (ret)
3415 		return ret;
3416 
3417 	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
3418 	if (ret) {
3419 		mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
3420 		return ret;
3421 	}
3422 
3423 	mutex_init(&devr->cq_lock);
3424 	mutex_init(&devr->srq_lock);
3425 
3426 	return 0;
3427 }
3428 
3429 static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
3430 {
3431 	struct mlx5_ib_resources *devr = &dev->devr;
3432 
3433 	/* After s0/s1 init, they are not unset during the device lifetime. */
3434 	if (devr->s1) {
3435 		ib_destroy_srq(devr->s1);
3436 		ib_destroy_srq(devr->s0);
3437 	}
3438 	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
3439 	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
3440 	/* After p0/c0 init, they are not unset during the device lifetime. */
3441 	if (devr->c0) {
3442 		ib_destroy_cq(devr->c0);
3443 		ib_dealloc_pd(devr->p0);
3444 	}
3445 	mutex_destroy(&devr->cq_lock);
3446 	mutex_destroy(&devr->srq_lock);
3447 }
3448 
3449 static int
3450 mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
3451 {
3452 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3453 	struct mlx5_core_dev *mdev = dev->mdev;
3454 	bool ro_supp = false;
3455 	void *mkc;
3456 	u32 mkey;
3457 	u32 pdn;
3458 	u32 *in;
3459 	int err;
3460 
3461 	err = mlx5_core_alloc_pd(mdev, &pdn);
3462 	if (err)
3463 		return err;
3464 
3465 	in = kvzalloc(inlen, GFP_KERNEL);
3466 	if (!in) {
3467 		err = -ENOMEM;
3468 		goto err;
3469 	}
3470 
3471 	MLX5_SET(create_mkey_in, in, data_direct, 1);
3472 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3473 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
3474 	MLX5_SET(mkc, mkc, lw, 1);
3475 	MLX5_SET(mkc, mkc, lr, 1);
3476 	MLX5_SET(mkc, mkc, rw, 1);
3477 	MLX5_SET(mkc, mkc, rr, 1);
3478 	MLX5_SET(mkc, mkc, a, 1);
3479 	MLX5_SET(mkc, mkc, pd, pdn);
3480 	MLX5_SET(mkc, mkc, length64, 1);
3481 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
3482 	err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
3483 	if (err)
3484 		goto err_mkey;
3485 
3486 	dev->ddr.mkey = mkey;
3487 	dev->ddr.pdn = pdn;
3488 
3489 	/* create another mkey with RO support */
3490 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) {
3491 		MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
3492 		ro_supp = true;
3493 	}
3494 
3495 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) {
3496 		MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
3497 		ro_supp = true;
3498 	}
3499 
3500 	if (ro_supp) {
3501 		err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
3502 		/* RO is defined as best effort */
3503 		if (!err) {
3504 			dev->ddr.mkey_ro = mkey;
3505 			dev->ddr.mkey_ro_valid = true;
3506 		}
3507 	}
3508 
3509 	kvfree(in);
3510 	return 0;
3511 
3512 err_mkey:
3513 	kvfree(in);
3514 err:
3515 	mlx5_core_dealloc_pd(mdev, pdn);
3516 	return err;
3517 }
3518 
3519 static void
3520 mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev)
3521 {
3522 
3523 	if (dev->ddr.mkey_ro_valid)
3524 		mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey_ro);
3525 
3526 	mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey);
3527 	mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn);
3528 }
3529 
3530 static u32 get_core_cap_flags(struct ib_device *ibdev,
3531 			      struct mlx5_hca_vport_context *rep)
3532 {
3533 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3534 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
3535 	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
3536 	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
3537 	bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
3538 	u32 ret = 0;
3539 
3540 	if (rep->grh_required)
3541 		ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
3542 
3543 	if (dev->num_plane)
3544 		return ret | RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_IB_MAD |
3545 			RDMA_CORE_CAP_IB_CM | RDMA_CORE_CAP_IB_SA |
3546 			RDMA_CORE_CAP_AF_IB;
3547 	else if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
3548 		return ret | RDMA_CORE_CAP_IB_MAD | RDMA_CORE_CAP_IB_SMI;
3549 
3550 	if (ll == IB_LINK_LAYER_INFINIBAND)
3551 		return ret | RDMA_CORE_PORT_IBA_IB;
3552 
3553 	if (raw_support)
3554 		ret |= RDMA_CORE_PORT_RAW_PACKET;
3555 
3556 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
3557 		return ret;
3558 
3559 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
3560 		return ret;
3561 
3562 	if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
3563 		ret |= RDMA_CORE_PORT_IBA_ROCE;
3564 
3565 	if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
3566 		ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3567 
3568 	return ret;
3569 }
3570 
3571 static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
3572 			       struct ib_port_immutable *immutable)
3573 {
3574 	struct ib_port_attr attr;
3575 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3576 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
3577 	struct mlx5_hca_vport_context rep = {0};
3578 	int err;
3579 
3580 	err = ib_query_port(ibdev, port_num, &attr);
3581 	if (err)
3582 		return err;
3583 
3584 	if (ll == IB_LINK_LAYER_INFINIBAND) {
3585 		if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
3586 			port_num = smi_to_native_portnum(dev, port_num);
3587 
3588 		err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
3589 						   &rep);
3590 		if (err)
3591 			return err;
3592 	}
3593 
3594 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
3595 	immutable->gid_tbl_len = attr.gid_tbl_len;
3596 	immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
3597 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3598 
3599 	return 0;
3600 }
3601 
3602 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
3603 				   struct ib_port_immutable *immutable)
3604 {
3605 	struct ib_port_attr attr;
3606 	int err;
3607 
3608 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
3609 
3610 	err = ib_query_port(ibdev, port_num, &attr);
3611 	if (err)
3612 		return err;
3613 
3614 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
3615 	immutable->gid_tbl_len = attr.gid_tbl_len;
3616 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
3617 
3618 	return 0;
3619 }
3620 
3621 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
3622 {
3623 	struct mlx5_ib_dev *dev =
3624 		container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3625 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
3626 		 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3627 		 fw_rev_sub(dev->mdev));
3628 }
3629 
3630 static int lag_event(struct notifier_block *nb, unsigned long event, void *data)
3631 {
3632 	struct mlx5_ib_dev *dev = container_of(nb, struct mlx5_ib_dev,
3633 					       lag_events);
3634 	struct mlx5_core_dev *mdev = dev->mdev;
3635 	struct ib_device *ibdev = &dev->ib_dev;
3636 	struct net_device *old_ndev = NULL;
3637 	struct mlx5_ib_port *port;
3638 	struct net_device *ndev;
3639 	u32 portnum = 0;
3640 	int ret = 0;
3641 	int i;
3642 
3643 	switch (event) {
3644 	case MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE:
3645 		ndev = data;
3646 		if (ndev) {
3647 			if (!mlx5_lag_is_roce(mdev)) {
3648 				// sriov lag
3649 				for (i = 0; i < dev->num_ports; i++) {
3650 					port = &dev->port[i];
3651 					if (port->rep && port->rep->vport ==
3652 					    MLX5_VPORT_UPLINK) {
3653 						portnum = i;
3654 						break;
3655 					}
3656 				}
3657 			}
3658 			old_ndev = ib_device_get_netdev(ibdev, portnum + 1);
3659 			ret = ib_device_set_netdev(ibdev, ndev, portnum + 1);
3660 			if (ret)
3661 				goto out;
3662 
3663 			if (old_ndev)
3664 				roce_del_all_netdev_gids(ibdev, portnum + 1,
3665 							 old_ndev);
3666 			rdma_roce_rescan_port(ibdev, portnum + 1);
3667 		}
3668 		break;
3669 	default:
3670 		return NOTIFY_DONE;
3671 	}
3672 
3673 out:
3674 	dev_put(old_ndev);
3675 	return notifier_from_errno(ret);
3676 }
3677 
3678 static void mlx5e_lag_event_register(struct mlx5_ib_dev *dev)
3679 {
3680 	dev->lag_events.notifier_call = lag_event;
3681 	blocking_notifier_chain_register(&dev->mdev->priv.lag_nh,
3682 					 &dev->lag_events);
3683 }
3684 
3685 static void mlx5e_lag_event_unregister(struct mlx5_ib_dev *dev)
3686 {
3687 	blocking_notifier_chain_unregister(&dev->mdev->priv.lag_nh,
3688 					   &dev->lag_events);
3689 }
3690 
3691 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
3692 {
3693 	struct mlx5_flow_table_attr ft_attr = {};
3694 	struct mlx5_core_dev *mdev = dev->mdev;
3695 	struct mlx5_flow_namespace *ns;
3696 	int err;
3697 
3698 	ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_LAG);
3699 	if (!ns || !mlx5_lag_is_active(mdev))
3700 		return 0;
3701 
3702 	err = mlx5_cmd_create_vport_lag(mdev);
3703 	if (err)
3704 		return err;
3705 
3706 	ft_attr.level = 0;
3707 	ft_attr.prio = 0;
3708 	ft_attr.max_fte = dev->num_ports;
3709 
3710 	err = mlx5_lag_demux_init(mdev, &ft_attr);
3711 	if (err)
3712 		goto err_destroy_vport_lag;
3713 
3714 	mlx5e_lag_event_register(dev);
3715 	dev->lag_ports = mlx5_lag_get_num_ports(mdev);
3716 	dev->lag_active = true;
3717 	return 0;
3718 
3719 err_destroy_vport_lag:
3720 	mlx5_cmd_destroy_vport_lag(mdev);
3721 	return err;
3722 }
3723 
3724 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
3725 {
3726 	struct mlx5_core_dev *mdev = dev->mdev;
3727 
3728 	if (dev->lag_active) {
3729 		dev->lag_active = false;
3730 
3731 		mlx5e_lag_event_unregister(dev);
3732 		mlx5_lag_demux_cleanup(mdev);
3733 
3734 		mlx5_cmd_destroy_vport_lag(mdev);
3735 	}
3736 }
3737 
3738 static void mlx5_netdev_notifier_register(struct mlx5_roce *roce,
3739 					  struct net_device *netdev)
3740 {
3741 	int err;
3742 
3743 	if (roce->tracking_netdev)
3744 		return;
3745 	roce->tracking_netdev = netdev;
3746 	roce->nb.notifier_call = mlx5_netdev_event;
3747 	err = register_netdevice_notifier_dev_net(netdev, &roce->nb, &roce->nn);
3748 	WARN_ON(err);
3749 }
3750 
3751 static void mlx5_netdev_notifier_unregister(struct mlx5_roce *roce)
3752 {
3753 	if (!roce->tracking_netdev)
3754 		return;
3755 	unregister_netdevice_notifier_dev_net(roce->tracking_netdev, &roce->nb,
3756 					      &roce->nn);
3757 	roce->tracking_netdev = NULL;
3758 }
3759 
3760 static int mlx5e_mdev_notifier_event(struct notifier_block *nb,
3761 				     unsigned long event, void *data)
3762 {
3763 	struct mlx5_roce *roce = container_of(nb, struct mlx5_roce, mdev_nb);
3764 	struct net_device *netdev = data;
3765 
3766 	switch (event) {
3767 	case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
3768 		if (netdev)
3769 			mlx5_netdev_notifier_register(roce, netdev);
3770 		else
3771 			mlx5_netdev_notifier_unregister(roce);
3772 		break;
3773 	default:
3774 		return NOTIFY_DONE;
3775 	}
3776 
3777 	return NOTIFY_OK;
3778 }
3779 
3780 static void mlx5_mdev_netdev_track(struct mlx5_ib_dev *dev, u32 port_num)
3781 {
3782 	struct mlx5_roce *roce = &dev->port[port_num].roce;
3783 
3784 	roce->mdev_nb.notifier_call = mlx5e_mdev_notifier_event;
3785 	mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb);
3786 	mlx5_core_uplink_netdev_event_replay(dev->mdev);
3787 }
3788 
3789 static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev *dev, u32 port_num)
3790 {
3791 	struct mlx5_roce *roce = &dev->port[port_num].roce;
3792 
3793 	mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb);
3794 	mlx5_netdev_notifier_unregister(roce);
3795 }
3796 
3797 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
3798 {
3799 	int err;
3800 
3801 	if (!dev->is_rep && dev->profile != &raw_eth_profile) {
3802 		err = mlx5_nic_vport_enable_roce(dev->mdev);
3803 		if (err)
3804 			return err;
3805 	}
3806 
3807 	err = mlx5_eth_lag_init(dev);
3808 	if (err)
3809 		goto err_disable_roce;
3810 
3811 	return 0;
3812 
3813 err_disable_roce:
3814 	if (!dev->is_rep && dev->profile != &raw_eth_profile)
3815 		mlx5_nic_vport_disable_roce(dev->mdev);
3816 
3817 	return err;
3818 }
3819 
3820 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3821 {
3822 	mlx5_eth_lag_cleanup(dev);
3823 	if (!dev->is_rep && dev->profile != &raw_eth_profile)
3824 		mlx5_nic_vport_disable_roce(dev->mdev);
3825 }
3826 
3827 static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
3828 				 enum rdma_netdev_t type,
3829 				 struct rdma_netdev_alloc_params *params)
3830 {
3831 	if (type != RDMA_NETDEV_IPOIB)
3832 		return -EOPNOTSUPP;
3833 
3834 	return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
3835 }
3836 
3837 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
3838 				       size_t count, loff_t *pos)
3839 {
3840 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3841 	char lbuf[20];
3842 	int len;
3843 
3844 	len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
3845 	return simple_read_from_buffer(buf, count, pos, lbuf, len);
3846 }
3847 
3848 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
3849 					size_t count, loff_t *pos)
3850 {
3851 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3852 	u32 timeout;
3853 	u32 var;
3854 
3855 	if (kstrtouint_from_user(buf, count, 0, &var))
3856 		return -EFAULT;
3857 
3858 	timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
3859 			1000);
3860 	if (timeout != var)
3861 		mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
3862 			    timeout);
3863 
3864 	delay_drop->timeout = timeout;
3865 
3866 	return count;
3867 }
3868 
3869 static const struct file_operations fops_delay_drop_timeout = {
3870 	.owner	= THIS_MODULE,
3871 	.open	= simple_open,
3872 	.write	= delay_drop_timeout_write,
3873 	.read	= delay_drop_timeout_read,
3874 };
3875 
3876 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
3877 				      struct mlx5_ib_multiport_info *mpi)
3878 {
3879 	u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3880 	struct mlx5_ib_port *port = &ibdev->port[port_num];
3881 	int comps;
3882 	int err;
3883 	int i;
3884 
3885 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
3886 
3887 	mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
3888 
3889 	mlx5_core_mp_event_replay(ibdev->mdev,
3890 				  MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
3891 				  NULL);
3892 	mlx5_core_mp_event_replay(mpi->mdev,
3893 				  MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
3894 				  NULL);
3895 
3896 	mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
3897 
3898 	spin_lock(&port->mp.mpi_lock);
3899 	if (!mpi->ibdev) {
3900 		spin_unlock(&port->mp.mpi_lock);
3901 		return;
3902 	}
3903 
3904 	mpi->ibdev = NULL;
3905 
3906 	spin_unlock(&port->mp.mpi_lock);
3907 	if (mpi->mdev_events.notifier_call)
3908 		mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
3909 	mpi->mdev_events.notifier_call = NULL;
3910 	mlx5_mdev_netdev_untrack(ibdev, port_num);
3911 	spin_lock(&port->mp.mpi_lock);
3912 
3913 	comps = mpi->mdev_refcnt;
3914 	if (comps) {
3915 		mpi->unaffiliate = true;
3916 		init_completion(&mpi->unref_comp);
3917 		spin_unlock(&port->mp.mpi_lock);
3918 
3919 		for (i = 0; i < comps; i++)
3920 			wait_for_completion(&mpi->unref_comp);
3921 
3922 		spin_lock(&port->mp.mpi_lock);
3923 		mpi->unaffiliate = false;
3924 	}
3925 
3926 	port->mp.mpi = NULL;
3927 
3928 	spin_unlock(&port->mp.mpi_lock);
3929 
3930 	err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
3931 
3932 	mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
3933 	/* Log an error, still needed to cleanup the pointers and add
3934 	 * it back to the list.
3935 	 */
3936 	if (err)
3937 		mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
3938 			    port_num + 1);
3939 
3940 	ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3941 }
3942 
3943 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
3944 				    struct mlx5_ib_multiport_info *mpi)
3945 {
3946 	u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3947 	u64 key;
3948 	int err;
3949 
3950 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
3951 
3952 	spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3953 	if (ibdev->port[port_num].mp.mpi) {
3954 		mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
3955 			    port_num + 1);
3956 		spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3957 		return false;
3958 	}
3959 
3960 	ibdev->port[port_num].mp.mpi = mpi;
3961 	mpi->ibdev = ibdev;
3962 	mpi->mdev_events.notifier_call = NULL;
3963 	spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3964 
3965 	err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
3966 	if (err)
3967 		goto unbind;
3968 
3969 	mlx5_mdev_netdev_track(ibdev, port_num);
3970 
3971 	mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
3972 	mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
3973 
3974 	mlx5_ib_init_cong_debugfs(ibdev, port_num);
3975 
3976 	key = mpi->mdev->priv.adev_idx;
3977 	mlx5_core_mp_event_replay(mpi->mdev,
3978 				  MLX5_DRIVER_EVENT_AFFILIATION_DONE,
3979 				  &key);
3980 	mlx5_core_mp_event_replay(ibdev->mdev,
3981 				  MLX5_DRIVER_EVENT_AFFILIATION_DONE,
3982 				  &key);
3983 
3984 	err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
3985 	if (err)
3986 		goto unbind;
3987 
3988 	return true;
3989 
3990 unbind:
3991 	mlx5_ib_unbind_slave_port(ibdev, mpi);
3992 	return false;
3993 }
3994 
3995 static int mlx5_ib_data_direct_init(struct mlx5_ib_dev *dev)
3996 {
3997 	char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1] = {};
3998 	int ret;
3999 
4000 	if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
4001 	    !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
4002 		return 0;
4003 
4004 	ret = mlx5_cmd_query_vuid(dev->mdev, true, vuid);
4005 	if (ret)
4006 		return ret;
4007 
4008 	ret = mlx5_ib_create_data_direct_resources(dev);
4009 	if (ret)
4010 		return ret;
4011 
4012 	INIT_LIST_HEAD(&dev->data_direct_mr_list);
4013 	ret = mlx5_data_direct_ib_reg(dev, vuid);
4014 	if (ret)
4015 		mlx5_ib_free_data_direct_resources(dev);
4016 
4017 	return ret;
4018 }
4019 
4020 static void mlx5_ib_data_direct_cleanup(struct mlx5_ib_dev *dev)
4021 {
4022 	if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
4023 	    !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
4024 		return;
4025 
4026 	mlx5_data_direct_ib_unreg(dev);
4027 	mlx5_ib_free_data_direct_resources(dev);
4028 }
4029 
4030 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
4031 {
4032 	u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4033 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
4034 							  port_num + 1);
4035 	struct mlx5_ib_multiport_info *mpi;
4036 	int err;
4037 	u32 i;
4038 
4039 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
4040 		return 0;
4041 
4042 	err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
4043 						     &dev->sys_image_guid);
4044 	if (err)
4045 		return err;
4046 
4047 	err = mlx5_nic_vport_enable_roce(dev->mdev);
4048 	if (err)
4049 		return err;
4050 
4051 	mutex_lock(&mlx5_ib_multiport_mutex);
4052 	for (i = 0; i < dev->num_ports; i++) {
4053 		bool bound = false;
4054 
4055 		/* build a stub multiport info struct for the native port. */
4056 		if (i == port_num) {
4057 			mpi = kzalloc_obj(*mpi);
4058 			if (!mpi) {
4059 				mutex_unlock(&mlx5_ib_multiport_mutex);
4060 				mlx5_nic_vport_disable_roce(dev->mdev);
4061 				return -ENOMEM;
4062 			}
4063 
4064 			mpi->is_master = true;
4065 			mpi->mdev = dev->mdev;
4066 			mpi->sys_image_guid = dev->sys_image_guid;
4067 			dev->port[i].mp.mpi = mpi;
4068 			mpi->ibdev = dev;
4069 			mpi = NULL;
4070 			continue;
4071 		}
4072 
4073 		list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
4074 				    list) {
4075 			if (dev->sys_image_guid == mpi->sys_image_guid &&
4076 			    (mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
4077 			    mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
4078 				bound = mlx5_ib_bind_slave_port(dev, mpi);
4079 			}
4080 
4081 			if (bound) {
4082 				dev_dbg(mpi->mdev->device,
4083 					"removing port from unaffiliated list.\n");
4084 				mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
4085 				list_del(&mpi->list);
4086 				break;
4087 			}
4088 		}
4089 		if (!bound)
4090 			mlx5_ib_dbg(dev, "no free port found for port %d\n",
4091 				    i + 1);
4092 	}
4093 
4094 	list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
4095 	mutex_unlock(&mlx5_ib_multiport_mutex);
4096 	return err;
4097 }
4098 
4099 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
4100 {
4101 	u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4102 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
4103 							  port_num + 1);
4104 	u32 i;
4105 
4106 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
4107 		return;
4108 
4109 	mutex_lock(&mlx5_ib_multiport_mutex);
4110 	for (i = 0; i < dev->num_ports; i++) {
4111 		if (dev->port[i].mp.mpi) {
4112 			/* Destroy the native port stub */
4113 			if (i == port_num) {
4114 				kfree(dev->port[i].mp.mpi);
4115 				dev->port[i].mp.mpi = NULL;
4116 			} else {
4117 				mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
4118 					    i + 1);
4119 				list_add_tail(&dev->port[i].mp.mpi->list,
4120 					      &mlx5_ib_unaffiliated_port_list);
4121 				mlx5_ib_unbind_slave_port(dev,
4122 							  dev->port[i].mp.mpi);
4123 			}
4124 		}
4125 	}
4126 
4127 	mlx5_ib_dbg(dev, "removing from devlist\n");
4128 	list_del(&dev->ib_dev_list);
4129 	mutex_unlock(&mlx5_ib_multiport_mutex);
4130 
4131 	mlx5_nic_vport_disable_roce(dev->mdev);
4132 }
4133 
4134 static int mmap_obj_cleanup(struct ib_uobject *uobject,
4135 			    enum rdma_remove_reason why,
4136 			    struct uverbs_attr_bundle *attrs)
4137 {
4138 	struct mlx5_user_mmap_entry *obj = uobject->object;
4139 
4140 	rdma_user_mmap_entry_remove(&obj->rdma_entry);
4141 	return 0;
4142 }
4143 
4144 static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c,
4145 					    struct mlx5_user_mmap_entry *entry,
4146 					    size_t length)
4147 {
4148 	return rdma_user_mmap_entry_insert_range(
4149 		&c->ibucontext, &entry->rdma_entry, length,
4150 		(MLX5_IB_MMAP_OFFSET_START << 16),
4151 		((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1));
4152 }
4153 
4154 static struct mlx5_user_mmap_entry *
4155 alloc_var_entry(struct mlx5_ib_ucontext *c, u32 flags)
4156 {
4157 	struct mlx5_user_mmap_entry *entry;
4158 	struct mlx5_var_region *var_region;
4159 	struct mlx5_var_table *var_table;
4160 	u32 page_idx;
4161 	int err;
4162 
4163 	var_table = &to_mdev(c->ibucontext.device)->var_table;
4164 	if (flags & MLX5_IB_UAPI_VAR_ALLOC_FLAG_TLP)
4165 		var_region = &var_table->tlp_var_region;
4166 	else
4167 		var_region = &var_table->var_region;
4168 
4169 	entry = kzalloc_obj(*entry);
4170 	if (!entry)
4171 		return ERR_PTR(-ENOMEM);
4172 
4173 	mutex_lock(&var_region->bitmap_lock);
4174 	page_idx = find_first_zero_bit(var_region->bitmap,
4175 				       var_region->num_var_hw_entries);
4176 	if (page_idx >= var_region->num_var_hw_entries) {
4177 		err = -ENOSPC;
4178 		mutex_unlock(&var_region->bitmap_lock);
4179 		goto end;
4180 	}
4181 
4182 	set_bit(page_idx, var_region->bitmap);
4183 	mutex_unlock(&var_region->bitmap_lock);
4184 
4185 	entry->address = var_region->hw_start_addr +
4186 				(page_idx * var_region->stride_size);
4187 	entry->page_idx = page_idx;
4188 	entry->mmap_flag = flags & MLX5_IB_UAPI_VAR_ALLOC_FLAG_TLP ?
4189 				   MLX5_IB_MMAP_TYPE_TLP_VAR :
4190 				   MLX5_IB_MMAP_TYPE_VAR;
4191 
4192 	err = mlx5_rdma_user_mmap_entry_insert(c, entry,
4193 					       var_region->stride_size);
4194 	if (err)
4195 		goto err_insert;
4196 
4197 	return entry;
4198 
4199 err_insert:
4200 	mutex_lock(&var_region->bitmap_lock);
4201 	clear_bit(page_idx, var_region->bitmap);
4202 	mutex_unlock(&var_region->bitmap_lock);
4203 end:
4204 	kfree(entry);
4205 	return ERR_PTR(err);
4206 }
4207 
4208 static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
4209 	struct uverbs_attr_bundle *attrs)
4210 {
4211 	struct ib_uobject *uobj = uverbs_attr_get_uobject(
4212 		attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
4213 	struct mlx5_user_mmap_entry *entry;
4214 	struct mlx5_ib_ucontext *c;
4215 	u64 mmap_offset;
4216 	u32 flags = 0;
4217 	u32 length;
4218 	int err;
4219 
4220 	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
4221 	if (IS_ERR(c))
4222 		return PTR_ERR(c);
4223 
4224 	err = uverbs_get_flags32(&flags, attrs,
4225 				 MLX5_IB_ATTR_VAR_OBJ_ALLOC_FLAGS,
4226 				 MLX5_IB_UAPI_VAR_ALLOC_FLAG_TLP);
4227 	if (err)
4228 		return err;
4229 
4230 	if (flags & MLX5_IB_UAPI_VAR_ALLOC_FLAG_TLP) {
4231 		if (!MLX5_CAP_GEN(to_mdev(c->ibucontext.device)->mdev,
4232 				  tlp_device_emulation_manager))
4233 			return -EOPNOTSUPP;
4234 	} else {
4235 		if (!(MLX5_CAP_GEN_64(to_mdev(c->ibucontext.device)->mdev,
4236 				      general_obj_types) &
4237 		      MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
4238 			return -EOPNOTSUPP;
4239 	}
4240 
4241 	entry = alloc_var_entry(c, flags);
4242 	if (IS_ERR(entry))
4243 		return PTR_ERR(entry);
4244 
4245 	mmap_offset = mlx5_entry_to_mmap_offset(entry);
4246 	length = entry->rdma_entry.npages * PAGE_SIZE;
4247 	uobj->object = entry;
4248 	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
4249 
4250 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
4251 			     &mmap_offset, sizeof(mmap_offset));
4252 	if (err)
4253 		return err;
4254 
4255 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
4256 			     &entry->page_idx, sizeof(entry->page_idx));
4257 	if (err)
4258 		return err;
4259 
4260 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
4261 			     &length, sizeof(length));
4262 	return err;
4263 }
4264 
4265 DECLARE_UVERBS_NAMED_METHOD(
4266 	MLX5_IB_METHOD_VAR_OBJ_ALLOC,
4267 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
4268 			MLX5_IB_OBJECT_VAR,
4269 			UVERBS_ACCESS_NEW,
4270 			UA_MANDATORY),
4271 	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_VAR_OBJ_ALLOC_FLAGS,
4272 			     enum mlx5_ib_uapi_var_alloc_flags,
4273 			     UA_OPTIONAL),
4274 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
4275 			   UVERBS_ATTR_TYPE(u32),
4276 			   UA_MANDATORY),
4277 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
4278 			   UVERBS_ATTR_TYPE(u32),
4279 			   UA_MANDATORY),
4280 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
4281 			    UVERBS_ATTR_TYPE(u64),
4282 			    UA_MANDATORY));
4283 
4284 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
4285 	MLX5_IB_METHOD_VAR_OBJ_DESTROY,
4286 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
4287 			MLX5_IB_OBJECT_VAR,
4288 			UVERBS_ACCESS_DESTROY,
4289 			UA_MANDATORY));
4290 
4291 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
4292 			    UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
4293 			    &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
4294 			    &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
4295 
4296 static bool var_is_supported(struct ib_device *device)
4297 {
4298 	struct mlx5_ib_dev *dev = to_mdev(device);
4299 
4300 	return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
4301 			MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) ||
4302 		MLX5_CAP_GEN(dev->mdev, tlp_device_emulation_manager);
4303 }
4304 
4305 static struct mlx5_user_mmap_entry *
4306 alloc_uar_entry(struct mlx5_ib_ucontext *c,
4307 		enum mlx5_ib_uapi_uar_alloc_type alloc_type)
4308 {
4309 	struct mlx5_user_mmap_entry *entry;
4310 	struct mlx5_ib_dev *dev;
4311 	u32 uar_index;
4312 	int err;
4313 
4314 	entry = kzalloc_obj(*entry);
4315 	if (!entry)
4316 		return ERR_PTR(-ENOMEM);
4317 
4318 	dev = to_mdev(c->ibucontext.device);
4319 	err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
4320 	if (err)
4321 		goto end;
4322 
4323 	entry->page_idx = uar_index;
4324 	entry->address = uar_index2paddress(dev, uar_index);
4325 	if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
4326 		entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC;
4327 	else
4328 		entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC;
4329 
4330 	err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
4331 	if (err)
4332 		goto err_insert;
4333 
4334 	return entry;
4335 
4336 err_insert:
4337 	mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
4338 end:
4339 	kfree(entry);
4340 	return ERR_PTR(err);
4341 }
4342 
4343 static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
4344 	struct uverbs_attr_bundle *attrs)
4345 {
4346 	struct ib_uobject *uobj = uverbs_attr_get_uobject(
4347 		attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
4348 	enum mlx5_ib_uapi_uar_alloc_type alloc_type;
4349 	struct mlx5_ib_ucontext *c;
4350 	struct mlx5_user_mmap_entry *entry;
4351 	u64 mmap_offset;
4352 	u32 length;
4353 	int err;
4354 
4355 	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
4356 	if (IS_ERR(c))
4357 		return PTR_ERR(c);
4358 
4359 	err = uverbs_get_const(&alloc_type, attrs,
4360 			       MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
4361 	if (err)
4362 		return err;
4363 
4364 	if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
4365 	    alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
4366 		return -EOPNOTSUPP;
4367 
4368 	if (!mlx5_wc_support_get(to_mdev(c->ibucontext.device)->mdev) &&
4369 	    alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
4370 		return -EOPNOTSUPP;
4371 
4372 	entry = alloc_uar_entry(c, alloc_type);
4373 	if (IS_ERR(entry))
4374 		return PTR_ERR(entry);
4375 
4376 	mmap_offset = mlx5_entry_to_mmap_offset(entry);
4377 	length = entry->rdma_entry.npages * PAGE_SIZE;
4378 	uobj->object = entry;
4379 	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
4380 
4381 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
4382 			     &mmap_offset, sizeof(mmap_offset));
4383 	if (err)
4384 		return err;
4385 
4386 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
4387 			     &entry->page_idx, sizeof(entry->page_idx));
4388 	if (err)
4389 		return err;
4390 
4391 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
4392 			     &length, sizeof(length));
4393 	return err;
4394 }
4395 
4396 DECLARE_UVERBS_NAMED_METHOD(
4397 	MLX5_IB_METHOD_UAR_OBJ_ALLOC,
4398 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE,
4399 			MLX5_IB_OBJECT_UAR,
4400 			UVERBS_ACCESS_NEW,
4401 			UA_MANDATORY),
4402 	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
4403 			     enum mlx5_ib_uapi_uar_alloc_type,
4404 			     UA_MANDATORY),
4405 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
4406 			   UVERBS_ATTR_TYPE(u32),
4407 			   UA_MANDATORY),
4408 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
4409 			   UVERBS_ATTR_TYPE(u32),
4410 			   UA_MANDATORY),
4411 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
4412 			    UVERBS_ATTR_TYPE(u64),
4413 			    UA_MANDATORY));
4414 
4415 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
4416 	MLX5_IB_METHOD_UAR_OBJ_DESTROY,
4417 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE,
4418 			MLX5_IB_OBJECT_UAR,
4419 			UVERBS_ACCESS_DESTROY,
4420 			UA_MANDATORY));
4421 
4422 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
4423 			    UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
4424 			    &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
4425 			    &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
4426 
4427 ADD_UVERBS_ATTRIBUTES_SIMPLE(
4428 	mlx5_ib_query_context,
4429 	UVERBS_OBJECT_DEVICE,
4430 	UVERBS_METHOD_QUERY_CONTEXT,
4431 	UVERBS_ATTR_PTR_OUT(
4432 		MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
4433 		UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp,
4434 				   dump_fill_mkey),
4435 		UA_MANDATORY));
4436 
4437 ADD_UVERBS_ATTRIBUTES_SIMPLE(
4438 	mlx5_ib_reg_dmabuf_mr,
4439 	UVERBS_OBJECT_MR,
4440 	UVERBS_METHOD_REG_DMABUF_MR,
4441 	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
4442 			     enum mlx5_ib_uapi_reg_dmabuf_flags,
4443 			     UA_OPTIONAL));
4444 
4445 static const struct uapi_definition mlx5_ib_defs[] = {
4446 	UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
4447 	UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
4448 	UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
4449 	UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
4450 	UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
4451 	UAPI_DEF_CHAIN(mlx5_ib_create_cq_defs),
4452 
4453 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
4454 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR, &mlx5_ib_reg_dmabuf_mr),
4455 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
4456 				UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
4457 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
4458 	{}
4459 };
4460 
4461 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
4462 {
4463 	mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
4464 	mlx5_ib_data_direct_cleanup(dev);
4465 	mlx5_ib_cleanup_multiport_master(dev);
4466 	WARN_ON(!xa_empty(&dev->odp_mkeys));
4467 	mutex_destroy(&dev->cap_mask_mutex);
4468 	WARN_ON(!xa_empty(&dev->sig_mrs));
4469 	WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
4470 	mlx5r_macsec_dealloc_gids(dev);
4471 }
4472 
4473 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
4474 {
4475 	struct mlx5_core_dev *mdev = dev->mdev;
4476 	int err, i;
4477 
4478 	dev->ib_dev.node_type = RDMA_NODE_IB_CA;
4479 	dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
4480 	dev->ib_dev.dev.parent = mdev->device;
4481 	dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
4482 
4483 	for (i = 0; i < dev->num_ports; i++) {
4484 		spin_lock_init(&dev->port[i].mp.mpi_lock);
4485 		dev->port[i].roce.dev = dev;
4486 		dev->port[i].roce.native_port_num = i + 1;
4487 		dev->port[i].roce.last_port_state = IB_PORT_DOWN;
4488 	}
4489 
4490 	err = mlx5r_cmd_query_special_mkeys(dev);
4491 	if (err)
4492 		return err;
4493 
4494 	err = mlx5r_macsec_init_gids_and_devlist(dev);
4495 	if (err)
4496 		return err;
4497 
4498 	err = mlx5_ib_init_multiport_master(dev);
4499 	if (err)
4500 		goto err;
4501 
4502 	err = set_has_smi_cap(dev);
4503 	if (err)
4504 		goto err_mp;
4505 
4506 	err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
4507 	if (err)
4508 		goto err_mp;
4509 
4510 	if (mlx5_use_mad_ifc(dev))
4511 		get_ext_port_caps(dev);
4512 
4513 	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_max(mdev);
4514 
4515 	mutex_init(&dev->cap_mask_mutex);
4516 	mutex_init(&dev->data_direct_lock);
4517 	INIT_LIST_HEAD(&dev->qp_list);
4518 	spin_lock_init(&dev->reset_flow_resource_lock);
4519 	xa_init(&dev->odp_mkeys);
4520 	xa_init(&dev->sig_mrs);
4521 	atomic_set(&dev->mkey_var, 0);
4522 
4523 	spin_lock_init(&dev->dm.lock);
4524 	dev->dm.dev = mdev;
4525 	err = mlx5_ib_data_direct_init(dev);
4526 	if (err)
4527 		goto err_mp;
4528 
4529 	err = pcim_p2pdma_init(mdev->pdev);
4530 	if (err && err != -EOPNOTSUPP)
4531 		goto err_dd;
4532 
4533 	mlx5_cmd_init_async_ctx(mdev, &dev->async_ctx);
4534 
4535 	return 0;
4536 err_dd:
4537 	mlx5_ib_data_direct_cleanup(dev);
4538 err_mp:
4539 	mlx5_ib_cleanup_multiport_master(dev);
4540 err:
4541 	mlx5r_macsec_dealloc_gids(dev);
4542 	return err;
4543 }
4544 
4545 static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
4546 					     enum rdma_nl_dev_type type,
4547 					     const char *name);
4548 static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev);
4549 
4550 static const struct ib_device_ops mlx5_ib_dev_ops = {
4551 	.owner = THIS_MODULE,
4552 	.driver_id = RDMA_DRIVER_MLX5,
4553 	.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION,
4554 
4555 	.add_gid = mlx5_ib_add_gid,
4556 	.add_sub_dev = mlx5_ib_add_sub_dev,
4557 	.alloc_mr = mlx5_ib_alloc_mr,
4558 	.alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
4559 	.alloc_pd = mlx5_ib_alloc_pd,
4560 	.alloc_ucontext = mlx5_ib_alloc_ucontext,
4561 	.attach_mcast = mlx5_ib_mcg_attach,
4562 	.check_mr_status = mlx5_ib_check_mr_status,
4563 	.create_ah = mlx5_ib_create_ah,
4564 	.create_cq = mlx5_ib_create_cq,
4565 	.create_user_cq = mlx5_ib_create_user_cq,
4566 	.create_qp = mlx5_ib_create_qp,
4567 	.create_srq = mlx5_ib_create_srq,
4568 	.create_user_ah = mlx5_ib_create_ah,
4569 	.dealloc_pd = mlx5_ib_dealloc_pd,
4570 	.dealloc_ucontext = mlx5_ib_dealloc_ucontext,
4571 	.del_gid = mlx5_ib_del_gid,
4572 	.del_sub_dev = mlx5_ib_del_sub_dev,
4573 	.dereg_mr = mlx5_ib_dereg_mr,
4574 	.destroy_ah = mlx5_ib_destroy_ah,
4575 	.destroy_cq = mlx5_ib_destroy_cq,
4576 	.destroy_qp = mlx5_ib_destroy_qp,
4577 	.destroy_srq = mlx5_ib_destroy_srq,
4578 	.detach_mcast = mlx5_ib_mcg_detach,
4579 	.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
4580 	.drain_rq = mlx5_ib_drain_rq,
4581 	.drain_sq = mlx5_ib_drain_sq,
4582 	.device_group = &mlx5_attr_group,
4583 	.get_dev_fw_str = get_dev_fw_str,
4584 	.get_dma_mr = mlx5_ib_get_dma_mr,
4585 	.get_link_layer = mlx5_ib_port_link_layer,
4586 	.map_mr_sg = mlx5_ib_map_mr_sg,
4587 	.map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
4588 	.mmap = mlx5_ib_mmap,
4589 	.mmap_free = mlx5_ib_mmap_free,
4590 	.mmap_get_pfns = mlx5_ib_mmap_get_pfns,
4591 	.modify_cq = mlx5_ib_modify_cq,
4592 	.modify_device = mlx5_ib_modify_device,
4593 	.modify_port = mlx5_ib_modify_port,
4594 	.modify_qp = mlx5_ib_modify_qp,
4595 	.modify_srq = mlx5_ib_modify_srq,
4596 	.pgoff_to_mmap_entry = mlx5_ib_pgoff_to_mmap_entry,
4597 	.pre_destroy_cq = mlx5_ib_pre_destroy_cq,
4598 	.poll_cq = mlx5_ib_poll_cq,
4599 	.post_destroy_cq = mlx5_ib_post_destroy_cq,
4600 	.post_recv = mlx5_ib_post_recv_nodrain,
4601 	.post_send = mlx5_ib_post_send_nodrain,
4602 	.post_srq_recv = mlx5_ib_post_srq_recv,
4603 	.process_mad = mlx5_ib_process_mad,
4604 	.query_ah = mlx5_ib_query_ah,
4605 	.query_device = mlx5_ib_query_device,
4606 	.query_gid = mlx5_ib_query_gid,
4607 	.query_pkey = mlx5_ib_query_pkey,
4608 	.query_port_speed = mlx5_ib_query_port_speed,
4609 	.query_qp = mlx5_ib_query_qp,
4610 	.query_srq = mlx5_ib_query_srq,
4611 	.query_ucontext = mlx5_ib_query_ucontext,
4612 	.reg_user_mr = mlx5_ib_reg_user_mr,
4613 	.reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
4614 	.req_notify_cq = mlx5_ib_arm_cq,
4615 	.rereg_user_mr = mlx5_ib_rereg_user_mr,
4616 	.resize_user_cq = mlx5_ib_resize_cq,
4617 	.ufile_hw_cleanup = mlx5_ib_ufile_hw_cleanup,
4618 
4619 	INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
4620 	INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
4621 	INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
4622 	INIT_RDMA_OBJ_SIZE(ib_dmah, mlx5_ib_dmah, ibdmah),
4623 	INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
4624 	INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
4625 	INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
4626 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
4627 };
4628 
4629 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
4630 	.rdma_netdev_get_params = mlx5_ib_rn_get_params,
4631 };
4632 
4633 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
4634 	.get_vf_config = mlx5_ib_get_vf_config,
4635 	.get_vf_guid = mlx5_ib_get_vf_guid,
4636 	.get_vf_stats = mlx5_ib_get_vf_stats,
4637 	.set_vf_guid = mlx5_ib_set_vf_guid,
4638 	.set_vf_link_state = mlx5_ib_set_vf_link_state,
4639 };
4640 
4641 static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
4642 	.alloc_mw = mlx5_ib_alloc_mw,
4643 	.dealloc_mw = mlx5_ib_dealloc_mw,
4644 
4645 	INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
4646 };
4647 
4648 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
4649 	.alloc_xrcd = mlx5_ib_alloc_xrcd,
4650 	.dealloc_xrcd = mlx5_ib_dealloc_xrcd,
4651 
4652 	INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
4653 };
4654 
4655 static int mlx5_ib_init_var_region(struct mlx5_ib_dev *dev)
4656 {
4657 	struct mlx5_var_region *var_region = &dev->var_table.var_region;
4658 	struct mlx5_core_dev *mdev = dev->mdev;
4659 	u8 log_doorbell_bar_size;
4660 	u8 log_doorbell_stride;
4661 	u64 bar_size;
4662 
4663 	log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
4664 					log_doorbell_bar_size);
4665 	log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
4666 					log_doorbell_stride);
4667 	var_region->hw_start_addr = dev->mdev->bar_addr +
4668 				MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
4669 					doorbell_bar_offset);
4670 	bar_size = (1ULL << log_doorbell_bar_size) * 4096;
4671 	var_region->stride_size = 1ULL << log_doorbell_stride;
4672 	var_region->num_var_hw_entries = div_u64(bar_size,
4673 						 var_region->stride_size);
4674 	mutex_init(&var_region->bitmap_lock);
4675 	var_region->bitmap = bitmap_zalloc(var_region->num_var_hw_entries,
4676 					   GFP_KERNEL);
4677 	return (var_region->bitmap) ? 0 : -ENOMEM;
4678 }
4679 
4680 static int mlx5_ib_init_tlp_var_region(struct mlx5_ib_dev *dev)
4681 {
4682 	struct mlx5_var_region *var_region = &dev->var_table.tlp_var_region;
4683 	struct mlx5_core_dev *mdev = dev->mdev;
4684 	u8 log_tlp_var_stride;
4685 
4686 	log_tlp_var_stride =
4687 		MLX5_CAP_DEV_TLP_EMULATION(mdev, log_tlp_rsp_gw_page_stride);
4688 	var_region->hw_start_addr =
4689 		dev->mdev->bar_addr +
4690 		MLX5_CAP64_DEV_TLP_EMULATION(mdev, tlp_rsp_gw_pages_bar_offset);
4691 
4692 	var_region->stride_size = (1ULL << log_tlp_var_stride) * 4096;
4693 	var_region->num_var_hw_entries =
4694 		MLX5_CAP_DEV_TLP_EMULATION(mdev, tlp_rsp_gw_num_pages);
4695 
4696 	mutex_init(&var_region->bitmap_lock);
4697 	var_region->bitmap = bitmap_zalloc(var_region->num_var_hw_entries,
4698 					   GFP_KERNEL);
4699 	return (var_region->bitmap) ? 0 : -ENOMEM;
4700 }
4701 
4702 static void mlx5_ib_cleanup_ucaps(struct mlx5_ib_dev *dev)
4703 {
4704 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
4705 		ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
4706 
4707 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
4708 	    MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA)
4709 		ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
4710 }
4711 
4712 static int mlx5_ib_init_ucaps(struct mlx5_ib_dev *dev)
4713 {
4714 	int ret;
4715 
4716 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL) {
4717 		ret = ib_create_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
4718 		if (ret)
4719 			return ret;
4720 	}
4721 
4722 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
4723 	    MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA) {
4724 		ret = ib_create_ucap(RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
4725 		if (ret)
4726 			goto remove_local;
4727 	}
4728 
4729 	return 0;
4730 
4731 remove_local:
4732 	if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
4733 		ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
4734 	return ret;
4735 }
4736 
4737 static void mlx5_ib_cleanup_var_table(struct mlx5_ib_dev *dev)
4738 {
4739 	bitmap_free(dev->var_table.var_region.bitmap);
4740 	bitmap_free(dev->var_table.tlp_var_region.bitmap);
4741 }
4742 
4743 static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
4744 {
4745 	if (MLX5_CAP_GEN_2_64(dev->mdev, general_obj_types_127_64) &
4746 	    MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL)
4747 		mlx5_ib_cleanup_ucaps(dev);
4748 
4749 	mlx5_ib_cleanup_var_table(dev);
4750 }
4751 
4752 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
4753 {
4754 	struct mlx5_core_dev *mdev = dev->mdev;
4755 	int err;
4756 
4757 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
4758 	    IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
4759 		ib_set_device_ops(&dev->ib_dev,
4760 				  &mlx5_ib_dev_ipoib_enhanced_ops);
4761 
4762 	if (mlx5_core_is_pf(mdev))
4763 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
4764 
4765 	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
4766 
4767 	if (MLX5_CAP_GEN(mdev, imaicl))
4768 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
4769 
4770 	if (MLX5_CAP_GEN(mdev, xrc))
4771 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
4772 
4773 	if (MLX5_CAP_DEV_MEM(mdev, memic) ||
4774 	    MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
4775 	    MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
4776 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
4777 
4778 	if (mdev->st)
4779 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dmah_ops);
4780 
4781 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
4782 
4783 	if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
4784 		dev->ib_dev.driver_def = mlx5_ib_defs;
4785 
4786 	err = init_node_data(dev);
4787 	if (err)
4788 		return err;
4789 
4790 	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4791 	    (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
4792 	     MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
4793 		mutex_init(&dev->lb.mutex);
4794 
4795 	if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
4796 			MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
4797 		err = mlx5_ib_init_var_region(dev);
4798 		if (err)
4799 			return err;
4800 	}
4801 
4802 	if (MLX5_CAP_GEN_2_64(dev->mdev, general_obj_types_127_64) &
4803 	    MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL) {
4804 		err = mlx5_ib_init_ucaps(dev);
4805 		if (err)
4806 			goto err_ucaps;
4807 	}
4808 
4809 	if (MLX5_CAP_GEN(dev->mdev, tlp_device_emulation_manager)) {
4810 		err = mlx5_ib_init_tlp_var_region(dev);
4811 		if (err)
4812 			goto err_tlp_var;
4813 	}
4814 
4815 	dev->ib_dev.use_cq_dim = true;
4816 
4817 	return 0;
4818 
4819 err_tlp_var:
4820 	mlx5_ib_cleanup_ucaps(dev);
4821 err_ucaps:
4822 	bitmap_free(dev->var_table.var_region.bitmap);
4823 	return err;
4824 }
4825 
4826 static const struct ib_device_ops mlx5_ib_dev_port_ops = {
4827 	.get_port_immutable = mlx5_port_immutable,
4828 	.query_port = mlx5_ib_query_port,
4829 };
4830 
4831 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
4832 {
4833 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
4834 	return 0;
4835 }
4836 
4837 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
4838 	.get_port_immutable = mlx5_port_rep_immutable,
4839 	.query_port = mlx5_ib_rep_query_port,
4840 	.query_pkey = mlx5_ib_rep_query_pkey,
4841 };
4842 
4843 static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
4844 {
4845 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
4846 	return 0;
4847 }
4848 
4849 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
4850 	.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
4851 	.create_wq = mlx5_ib_create_wq,
4852 	.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
4853 	.destroy_wq = mlx5_ib_destroy_wq,
4854 	.modify_wq = mlx5_ib_modify_wq,
4855 
4856 	INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
4857 			   ib_rwq_ind_tbl),
4858 };
4859 
4860 static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
4861 {
4862 	struct mlx5_core_dev *mdev = dev->mdev;
4863 	enum rdma_link_layer ll;
4864 	int port_type_cap;
4865 	u32 port_num = 0;
4866 	int err;
4867 
4868 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4869 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4870 
4871 	if (ll == IB_LINK_LAYER_ETHERNET) {
4872 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
4873 
4874 		port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4875 
4876 		/* Register only for native ports */
4877 		mlx5_mdev_netdev_track(dev, port_num);
4878 
4879 		err = mlx5_enable_eth(dev);
4880 		if (err)
4881 			goto cleanup;
4882 	}
4883 
4884 	return 0;
4885 cleanup:
4886 	mlx5_mdev_netdev_untrack(dev, port_num);
4887 	return err;
4888 }
4889 
4890 static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
4891 {
4892 	struct mlx5_core_dev *mdev = dev->mdev;
4893 	enum rdma_link_layer ll;
4894 	int port_type_cap;
4895 	u32 port_num;
4896 
4897 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4898 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4899 
4900 	if (ll == IB_LINK_LAYER_ETHERNET) {
4901 		mlx5_disable_eth(dev);
4902 
4903 		port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4904 		mlx5_mdev_netdev_untrack(dev, port_num);
4905 	}
4906 }
4907 
4908 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
4909 {
4910 	mlx5_ib_init_cong_debugfs(dev,
4911 				  mlx5_core_native_port_num(dev->mdev) - 1);
4912 	return 0;
4913 }
4914 
4915 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
4916 {
4917 	mlx5_ib_cleanup_cong_debugfs(dev,
4918 				     mlx5_core_native_port_num(dev->mdev) - 1);
4919 }
4920 
4921 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
4922 {
4923 	int err;
4924 
4925 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4926 	if (err)
4927 		return err;
4928 
4929 	err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4930 	if (err)
4931 		mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4932 
4933 	return err;
4934 }
4935 
4936 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
4937 {
4938 	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4939 	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4940 }
4941 
4942 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4943 {
4944 	const char *name;
4945 
4946 	if (dev->sub_dev_name) {
4947 		name = dev->sub_dev_name;
4948 		ib_mark_name_assigned_by_user(&dev->ib_dev);
4949 	} else if (!mlx5_lag_is_active(dev->mdev))
4950 		name = "mlx5_%d";
4951 	else
4952 		name = "mlx5_bond_%d";
4953 	return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
4954 }
4955 
4956 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4957 {
4958 	mlx5r_frmr_pools_cleanup(&dev->ib_dev);
4959 	mlx5r_umr_resource_cleanup(dev);
4960 	mlx5r_umr_cleanup(dev);
4961 }
4962 
4963 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4964 {
4965 	ib_unregister_device(&dev->ib_dev);
4966 }
4967 
4968 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
4969 {
4970 	int ret;
4971 
4972 	ret = mlx5r_umr_init(dev);
4973 	if (ret)
4974 		return ret;
4975 
4976 	ret = mlx5r_frmr_pools_init(&dev->ib_dev);
4977 	if (ret)
4978 		mlx5_ib_warn(dev, "frmr pools init failed %d\n", ret);
4979 
4980 	return ret;
4981 }
4982 
4983 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
4984 {
4985 	struct dentry *root;
4986 
4987 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4988 		return 0;
4989 
4990 	mutex_init(&dev->delay_drop.lock);
4991 	dev->delay_drop.dev = dev;
4992 	dev->delay_drop.activate = false;
4993 	dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
4994 	INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
4995 	atomic_set(&dev->delay_drop.rqs_cnt, 0);
4996 	atomic_set(&dev->delay_drop.events_cnt, 0);
4997 
4998 	if (!mlx5_debugfs_root)
4999 		return 0;
5000 
5001 	root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
5002 	dev->delay_drop.dir_debugfs = root;
5003 
5004 	debugfs_create_atomic_t("num_timeout_events", 0400, root,
5005 				&dev->delay_drop.events_cnt);
5006 	debugfs_create_atomic_t("num_rqs", 0400, root,
5007 				&dev->delay_drop.rqs_cnt);
5008 	debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
5009 			    &fops_delay_drop_timeout);
5010 	return 0;
5011 }
5012 
5013 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
5014 {
5015 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5016 		return;
5017 
5018 	cancel_work_sync(&dev->delay_drop.delay_drop_work);
5019 	if (!dev->delay_drop.dir_debugfs)
5020 		return;
5021 
5022 	debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
5023 	dev->delay_drop.dir_debugfs = NULL;
5024 }
5025 
5026 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
5027 {
5028 	struct mlx5_ib_resources *devr = &dev->devr;
5029 	int port;
5030 
5031 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
5032 		INIT_WORK(&devr->ports[port].pkey_change_work,
5033 			  pkey_change_handler);
5034 
5035 	dev->mdev_events.notifier_call = mlx5_ib_event;
5036 	mlx5_notifier_register(dev->mdev, &dev->mdev_events);
5037 
5038 	mlx5r_macsec_event_register(dev);
5039 
5040 	return 0;
5041 }
5042 
5043 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
5044 {
5045 	struct mlx5_ib_resources *devr = &dev->devr;
5046 	int port;
5047 
5048 	mlx5r_macsec_event_unregister(dev);
5049 	mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
5050 
5051 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
5052 		cancel_work_sync(&devr->ports[port].pkey_change_work);
5053 }
5054 
5055 void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
5056 			      struct mlx5_data_direct_dev *dev)
5057 {
5058 	mutex_lock(&ibdev->data_direct_lock);
5059 	ibdev->data_direct_dev = dev;
5060 	mutex_unlock(&ibdev->data_direct_lock);
5061 }
5062 
5063 void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev)
5064 {
5065 	mutex_lock(&ibdev->data_direct_lock);
5066 	mlx5_ib_revoke_data_direct_mrs(ibdev);
5067 	ibdev->data_direct_dev = NULL;
5068 	mutex_unlock(&ibdev->data_direct_lock);
5069 }
5070 
5071 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
5072 		      const struct mlx5_ib_profile *profile,
5073 		      int stage)
5074 {
5075 	dev->ib_active = false;
5076 
5077 	/* Number of stages to cleanup */
5078 	while (stage) {
5079 		stage--;
5080 		if (profile->stage[stage].cleanup)
5081 			profile->stage[stage].cleanup(dev);
5082 	}
5083 
5084 	kfree(dev->port);
5085 	ib_dealloc_device(&dev->ib_dev);
5086 }
5087 
5088 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
5089 		  const struct mlx5_ib_profile *profile)
5090 {
5091 	int err;
5092 	int i;
5093 
5094 	dev->profile = profile;
5095 
5096 	for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
5097 		if (profile->stage[i].init) {
5098 			err = profile->stage[i].init(dev);
5099 			if (err)
5100 				goto err_out;
5101 		}
5102 	}
5103 
5104 	dev->ib_active = true;
5105 	return 0;
5106 
5107 err_out:
5108 	/* Clean up stages which were initialized */
5109 	while (i) {
5110 		i--;
5111 		if (profile->stage[i].cleanup)
5112 			profile->stage[i].cleanup(dev);
5113 	}
5114 	return -ENOMEM;
5115 }
5116 
5117 static const struct mlx5_ib_profile pf_profile = {
5118 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
5119 		     mlx5_ib_stage_init_init,
5120 		     mlx5_ib_stage_init_cleanup),
5121 	STAGE_CREATE(MLX5_IB_STAGE_FS,
5122 		     mlx5_ib_fs_init,
5123 		     mlx5_ib_fs_cleanup),
5124 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5125 		     mlx5_ib_stage_caps_init,
5126 		     mlx5_ib_stage_caps_cleanup),
5127 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5128 		     mlx5_ib_stage_non_default_cb,
5129 		     NULL),
5130 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
5131 		     mlx5_ib_roce_init,
5132 		     mlx5_ib_roce_cleanup),
5133 	STAGE_CREATE(MLX5_IB_STAGE_QP,
5134 		     mlx5_init_qp_table,
5135 		     mlx5_cleanup_qp_table),
5136 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
5137 		     mlx5_init_srq_table,
5138 		     mlx5_cleanup_srq_table),
5139 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5140 		     mlx5_ib_dev_res_init,
5141 		     mlx5_ib_dev_res_cleanup),
5142 	STAGE_CREATE(MLX5_IB_STAGE_ODP,
5143 		     mlx5_ib_odp_init_one,
5144 		     mlx5_ib_odp_cleanup_one),
5145 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
5146 		     mlx5_ib_counters_init,
5147 		     mlx5_ib_counters_cleanup),
5148 	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
5149 		     mlx5_ib_stage_cong_debugfs_init,
5150 		     mlx5_ib_stage_cong_debugfs_cleanup),
5151 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5152 		     mlx5_ib_stage_bfrag_init,
5153 		     mlx5_ib_stage_bfrag_cleanup),
5154 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5155 		     NULL,
5156 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5157 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
5158 		     mlx5_ib_devx_init,
5159 		     mlx5_ib_devx_cleanup),
5160 	STAGE_CREATE(MLX5_IB_STAGE_SYS_ERROR_NOTIFIER,
5161 		     mlx5_ib_stage_sys_error_notifier_init,
5162 		     mlx5_ib_stage_sys_error_notifier_cleanup),
5163 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5164 		     mlx5_ib_stage_ib_reg_init,
5165 		     mlx5_ib_stage_ib_reg_cleanup),
5166 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
5167 		     mlx5_ib_stage_dev_notifier_init,
5168 		     mlx5_ib_stage_dev_notifier_cleanup),
5169 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5170 		     mlx5_ib_stage_post_ib_reg_umr_init,
5171 		     NULL),
5172 	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5173 		     mlx5_ib_stage_delay_drop_init,
5174 		     mlx5_ib_stage_delay_drop_cleanup),
5175 	STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
5176 		     mlx5_ib_restrack_init,
5177 		     NULL),
5178 };
5179 
5180 const struct mlx5_ib_profile raw_eth_profile = {
5181 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
5182 		     mlx5_ib_stage_init_init,
5183 		     mlx5_ib_stage_init_cleanup),
5184 	STAGE_CREATE(MLX5_IB_STAGE_FS,
5185 		     mlx5_ib_fs_init,
5186 		     mlx5_ib_fs_cleanup),
5187 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5188 		     mlx5_ib_stage_caps_init,
5189 		     mlx5_ib_stage_caps_cleanup),
5190 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5191 		     mlx5_ib_stage_raw_eth_non_default_cb,
5192 		     NULL),
5193 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
5194 		     mlx5_ib_roce_init,
5195 		     mlx5_ib_roce_cleanup),
5196 	STAGE_CREATE(MLX5_IB_STAGE_QP,
5197 		     mlx5_init_qp_table,
5198 		     mlx5_cleanup_qp_table),
5199 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
5200 		     mlx5_init_srq_table,
5201 		     mlx5_cleanup_srq_table),
5202 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5203 		     mlx5_ib_dev_res_init,
5204 		     mlx5_ib_dev_res_cleanup),
5205 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
5206 		     mlx5_ib_counters_init,
5207 		     mlx5_ib_counters_cleanup),
5208 	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
5209 		     mlx5_ib_stage_cong_debugfs_init,
5210 		     mlx5_ib_stage_cong_debugfs_cleanup),
5211 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5212 		     mlx5_ib_stage_bfrag_init,
5213 		     mlx5_ib_stage_bfrag_cleanup),
5214 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5215 		     NULL,
5216 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5217 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
5218 		     mlx5_ib_devx_init,
5219 		     mlx5_ib_devx_cleanup),
5220 	STAGE_CREATE(MLX5_IB_STAGE_SYS_ERROR_NOTIFIER,
5221 		     mlx5_ib_stage_sys_error_notifier_init,
5222 		     mlx5_ib_stage_sys_error_notifier_cleanup),
5223 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5224 		     mlx5_ib_stage_ib_reg_init,
5225 		     mlx5_ib_stage_ib_reg_cleanup),
5226 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
5227 		     mlx5_ib_stage_dev_notifier_init,
5228 		     mlx5_ib_stage_dev_notifier_cleanup),
5229 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5230 		     mlx5_ib_stage_post_ib_reg_umr_init,
5231 		     NULL),
5232 	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5233 		     mlx5_ib_stage_delay_drop_init,
5234 		     mlx5_ib_stage_delay_drop_cleanup),
5235 	STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
5236 		     mlx5_ib_restrack_init,
5237 		     NULL),
5238 };
5239 
5240 static const struct mlx5_ib_profile plane_profile = {
5241 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
5242 		     mlx5_ib_stage_init_init,
5243 		     mlx5_ib_stage_init_cleanup),
5244 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5245 		     mlx5_ib_stage_caps_init,
5246 		     mlx5_ib_stage_caps_cleanup),
5247 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5248 		     mlx5_ib_stage_non_default_cb,
5249 		     NULL),
5250 	STAGE_CREATE(MLX5_IB_STAGE_QP,
5251 		     mlx5_init_qp_table,
5252 		     mlx5_cleanup_qp_table),
5253 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
5254 		     mlx5_init_srq_table,
5255 		     mlx5_cleanup_srq_table),
5256 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5257 		     mlx5_ib_dev_res_init,
5258 		     mlx5_ib_dev_res_cleanup),
5259 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5260 		     mlx5_ib_stage_bfrag_init,
5261 		     mlx5_ib_stage_bfrag_cleanup),
5262 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5263 		     mlx5_ib_stage_ib_reg_init,
5264 		     mlx5_ib_stage_ib_reg_cleanup),
5265 };
5266 
5267 static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
5268 					     enum rdma_nl_dev_type type,
5269 					     const char *name)
5270 {
5271 	struct mlx5_ib_dev *mparent = to_mdev(parent), *mplane;
5272 	enum rdma_link_layer ll;
5273 	int ret;
5274 
5275 	if (mparent->smi_dev)
5276 		return ERR_PTR(-EEXIST);
5277 
5278 	ll = mlx5_port_type_cap_to_rdma_ll(MLX5_CAP_GEN(mparent->mdev,
5279 							port_type));
5280 	if (type != RDMA_DEVICE_TYPE_SMI || !mparent->num_plane ||
5281 	    ll != IB_LINK_LAYER_INFINIBAND ||
5282 	    !MLX5_CAP_GEN_2(mparent->mdev, multiplane_qp_ud))
5283 		return ERR_PTR(-EOPNOTSUPP);
5284 
5285 	mplane = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
5286 					  mlx5_core_net(mparent->mdev));
5287 	if (!mplane)
5288 		return ERR_PTR(-ENOMEM);
5289 
5290 	mplane->port = kzalloc_objs(*mplane->port,
5291 				    mparent->num_plane * mparent->num_ports);
5292 	if (!mplane->port) {
5293 		ret = -ENOMEM;
5294 		goto fail_kcalloc;
5295 	}
5296 
5297 	mplane->ib_dev.type = type;
5298 	mplane->mdev = mparent->mdev;
5299 	mplane->num_ports = mparent->num_plane;
5300 	mplane->sub_dev_name = name;
5301 	mplane->ib_dev.phys_port_cnt = mplane->num_ports;
5302 
5303 	ret = __mlx5_ib_add(mplane, &plane_profile);
5304 	if (ret)
5305 		goto fail_ib_add;
5306 
5307 	mparent->smi_dev = mplane;
5308 	return &mplane->ib_dev;
5309 
5310 fail_ib_add:
5311 	kfree(mplane->port);
5312 fail_kcalloc:
5313 	ib_dealloc_device(&mplane->ib_dev);
5314 	return ERR_PTR(ret);
5315 }
5316 
5317 static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev)
5318 {
5319 	struct mlx5_ib_dev *mdev = to_mdev(sub_dev);
5320 
5321 	to_mdev(sub_dev->parent)->smi_dev = NULL;
5322 	__mlx5_ib_remove(mdev, mdev->profile, MLX5_IB_STAGE_MAX);
5323 }
5324 
5325 static int mlx5r_mp_probe(struct auxiliary_device *adev,
5326 			  const struct auxiliary_device_id *id)
5327 {
5328 	struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
5329 	struct mlx5_core_dev *mdev = idev->mdev;
5330 	struct mlx5_ib_multiport_info *mpi;
5331 	struct mlx5_ib_dev *dev;
5332 	bool bound = false;
5333 	int err;
5334 
5335 	mpi = kzalloc_obj(*mpi);
5336 	if (!mpi)
5337 		return -ENOMEM;
5338 
5339 	mpi->mdev = mdev;
5340 	err = mlx5_query_nic_vport_system_image_guid(mdev,
5341 						     &mpi->sys_image_guid);
5342 	if (err) {
5343 		kfree(mpi);
5344 		return err;
5345 	}
5346 
5347 	mutex_lock(&mlx5_ib_multiport_mutex);
5348 	list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
5349 		if (dev->sys_image_guid == mpi->sys_image_guid &&
5350 		    mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
5351 			bound = mlx5_ib_bind_slave_port(dev, mpi);
5352 
5353 		if (bound) {
5354 			rdma_roce_rescan_device(&dev->ib_dev);
5355 			mpi->ibdev->ib_active = true;
5356 			break;
5357 		}
5358 	}
5359 
5360 	if (!bound) {
5361 		list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5362 		dev_dbg(mdev->device,
5363 			"no suitable IB device found to bind to, added to unaffiliated list.\n");
5364 	}
5365 	mutex_unlock(&mlx5_ib_multiport_mutex);
5366 
5367 	auxiliary_set_drvdata(adev, mpi);
5368 	return 0;
5369 }
5370 
5371 static void mlx5r_mp_remove(struct auxiliary_device *adev)
5372 {
5373 	struct mlx5_ib_multiport_info *mpi;
5374 
5375 	mpi = auxiliary_get_drvdata(adev);
5376 	mutex_lock(&mlx5_ib_multiport_mutex);
5377 	if (mpi->ibdev)
5378 		mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
5379 	else
5380 		list_del(&mpi->list);
5381 	mutex_unlock(&mlx5_ib_multiport_mutex);
5382 	kfree(mpi);
5383 }
5384 
5385 static int mlx5r_probe(struct auxiliary_device *adev,
5386 		       const struct auxiliary_device_id *id)
5387 {
5388 	struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
5389 	struct mlx5_core_dev *mdev = idev->mdev;
5390 	const struct mlx5_ib_profile *profile;
5391 	int port_type_cap, num_ports, ret;
5392 	enum rdma_link_layer ll;
5393 	struct mlx5_ib_dev *dev;
5394 
5395 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5396 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5397 
5398 	num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
5399 			MLX5_CAP_GEN(mdev, num_vhca_ports));
5400 	dev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
5401 				       mlx5_core_net(mdev));
5402 	if (!dev)
5403 		return -ENOMEM;
5404 
5405 	if (ll == IB_LINK_LAYER_INFINIBAND) {
5406 		ret = mlx5_ib_get_plane_num(mdev, &dev->num_plane);
5407 		if (ret)
5408 			goto fail;
5409 	}
5410 
5411 	dev->port = kzalloc_objs(*dev->port, num_ports);
5412 	if (!dev->port) {
5413 		ret = -ENOMEM;
5414 		goto fail;
5415 	}
5416 
5417 	dev->mdev = mdev;
5418 	dev->num_ports = num_ports;
5419 	dev->ib_dev.phys_port_cnt = num_ports;
5420 
5421 	if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
5422 		profile = &raw_eth_profile;
5423 	else
5424 		profile = &pf_profile;
5425 
5426 	ret = __mlx5_ib_add(dev, profile);
5427 	if (ret)
5428 		goto fail_ib_add;
5429 
5430 	auxiliary_set_drvdata(adev, dev);
5431 	return 0;
5432 
5433 fail_ib_add:
5434 	kfree(dev->port);
5435 fail:
5436 	ib_dealloc_device(&dev->ib_dev);
5437 	return ret;
5438 }
5439 
5440 static void mlx5r_remove(struct auxiliary_device *adev)
5441 {
5442 	struct mlx5_ib_dev *dev;
5443 
5444 	dev = auxiliary_get_drvdata(adev);
5445 	__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
5446 }
5447 
5448 static const struct auxiliary_device_id mlx5r_mp_id_table[] = {
5449 	{ .name = MLX5_ADEV_NAME ".multiport", },
5450 	{},
5451 };
5452 
5453 static const struct auxiliary_device_id mlx5r_id_table[] = {
5454 	{ .name = MLX5_ADEV_NAME ".rdma", },
5455 	{},
5456 };
5457 
5458 MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table);
5459 MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table);
5460 
5461 static struct auxiliary_driver mlx5r_mp_driver = {
5462 	.name = "multiport",
5463 	.probe = mlx5r_mp_probe,
5464 	.remove = mlx5r_mp_remove,
5465 	.id_table = mlx5r_mp_id_table,
5466 };
5467 
5468 static struct auxiliary_driver mlx5r_driver = {
5469 	.name = "rdma",
5470 	.probe = mlx5r_probe,
5471 	.remove = mlx5r_remove,
5472 	.id_table = mlx5r_id_table,
5473 };
5474 
5475 static int __init mlx5_ib_init(void)
5476 {
5477 	int ret;
5478 
5479 	xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL);
5480 	if (!xlt_emergency_page)
5481 		return -ENOMEM;
5482 
5483 	mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
5484 	if (!mlx5_ib_event_wq) {
5485 		free_page((unsigned long)xlt_emergency_page);
5486 		return -ENOMEM;
5487 	}
5488 
5489 	ret = mlx5_ib_qp_event_init();
5490 	if (ret)
5491 		goto qp_event_err;
5492 
5493 	mlx5_ib_odp_init();
5494 	ret = mlx5r_rep_init();
5495 	if (ret)
5496 		goto rep_err;
5497 	ret = mlx5_data_direct_driver_register();
5498 	if (ret)
5499 		goto dd_err;
5500 	ret = auxiliary_driver_register(&mlx5r_mp_driver);
5501 	if (ret)
5502 		goto mp_err;
5503 	ret = auxiliary_driver_register(&mlx5r_driver);
5504 	if (ret)
5505 		goto drv_err;
5506 
5507 	return 0;
5508 
5509 drv_err:
5510 	auxiliary_driver_unregister(&mlx5r_mp_driver);
5511 mp_err:
5512 	mlx5_data_direct_driver_unregister();
5513 dd_err:
5514 	mlx5r_rep_cleanup();
5515 rep_err:
5516 	mlx5_ib_qp_event_cleanup();
5517 qp_event_err:
5518 	destroy_workqueue(mlx5_ib_event_wq);
5519 	free_page((unsigned long)xlt_emergency_page);
5520 	return ret;
5521 }
5522 
5523 static void __exit mlx5_ib_cleanup(void)
5524 {
5525 	mlx5_data_direct_driver_unregister();
5526 	auxiliary_driver_unregister(&mlx5r_driver);
5527 	auxiliary_driver_unregister(&mlx5r_mp_driver);
5528 	mlx5r_rep_cleanup();
5529 
5530 	mlx5_ib_qp_event_cleanup();
5531 	destroy_workqueue(mlx5_ib_event_wq);
5532 	free_page((unsigned long)xlt_emergency_page);
5533 }
5534 
5535 module_init(mlx5_ib_init);
5536 module_exit(mlx5_ib_cleanup);
5537