xref: /linux/drivers/infiniband/hw/mlx5/main.c (revision bd0abfa8ca1dab85e9cedbf1988e5b4e53c67584)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/errno.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/bitmap.h>
42 #if defined(CONFIG_X86)
43 #include <asm/pat.h>
44 #endif
45 #include <linux/sched.h>
46 #include <linux/sched/mm.h>
47 #include <linux/sched/task.h>
48 #include <linux/delay.h>
49 #include <rdma/ib_user_verbs.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/ib_cache.h>
52 #include <linux/mlx5/port.h>
53 #include <linux/mlx5/vport.h>
54 #include <linux/mlx5/fs.h>
55 #include <linux/mlx5/eswitch.h>
56 #include <linux/list.h>
57 #include <rdma/ib_smi.h>
58 #include <rdma/ib_umem.h>
59 #include <linux/in.h>
60 #include <linux/etherdevice.h>
61 #include "mlx5_ib.h"
62 #include "ib_rep.h"
63 #include "cmd.h"
64 #include "srq.h"
65 #include <linux/mlx5/fs_helpers.h>
66 #include <linux/mlx5/accel.h>
67 #include <rdma/uverbs_std_types.h>
68 #include <rdma/mlx5_user_ioctl_verbs.h>
69 #include <rdma/mlx5_user_ioctl_cmds.h>
70 
71 #define UVERBS_MODULE_NAME mlx5_ib
72 #include <rdma/uverbs_named_ioctl.h>
73 
74 #define DRIVER_NAME "mlx5_ib"
75 #define DRIVER_VERSION "5.0-0"
76 
77 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
78 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
79 MODULE_LICENSE("Dual BSD/GPL");
80 
81 static char mlx5_version[] =
82 	DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
83 	DRIVER_VERSION "\n";
84 
85 struct mlx5_ib_event_work {
86 	struct work_struct	work;
87 	union {
88 		struct mlx5_ib_dev	      *dev;
89 		struct mlx5_ib_multiport_info *mpi;
90 	};
91 	bool			is_slave;
92 	unsigned int		event;
93 	void			*param;
94 };
95 
96 enum {
97 	MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
98 };
99 
100 static struct workqueue_struct *mlx5_ib_event_wq;
101 static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
102 static LIST_HEAD(mlx5_ib_dev_list);
103 /*
104  * This mutex should be held when accessing either of the above lists
105  */
106 static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
107 
108 /* We can't use an array for xlt_emergency_page because dma_map_single
109  * doesn't work on kernel modules memory
110  */
111 static unsigned long xlt_emergency_page;
112 static struct mutex xlt_emergency_page_mutex;
113 
114 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
115 {
116 	struct mlx5_ib_dev *dev;
117 
118 	mutex_lock(&mlx5_ib_multiport_mutex);
119 	dev = mpi->ibdev;
120 	mutex_unlock(&mlx5_ib_multiport_mutex);
121 	return dev;
122 }
123 
124 static enum rdma_link_layer
125 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
126 {
127 	switch (port_type_cap) {
128 	case MLX5_CAP_PORT_TYPE_IB:
129 		return IB_LINK_LAYER_INFINIBAND;
130 	case MLX5_CAP_PORT_TYPE_ETH:
131 		return IB_LINK_LAYER_ETHERNET;
132 	default:
133 		return IB_LINK_LAYER_UNSPECIFIED;
134 	}
135 }
136 
137 static enum rdma_link_layer
138 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
139 {
140 	struct mlx5_ib_dev *dev = to_mdev(device);
141 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
142 
143 	return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
144 }
145 
146 static int get_port_state(struct ib_device *ibdev,
147 			  u8 port_num,
148 			  enum ib_port_state *state)
149 {
150 	struct ib_port_attr attr;
151 	int ret;
152 
153 	memset(&attr, 0, sizeof(attr));
154 	ret = ibdev->ops.query_port(ibdev, port_num, &attr);
155 	if (!ret)
156 		*state = attr.state;
157 	return ret;
158 }
159 
160 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
161 					   struct net_device *ndev,
162 					   u8 *port_num)
163 {
164 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
165 	struct net_device *rep_ndev;
166 	struct mlx5_ib_port *port;
167 	int i;
168 
169 	for (i = 0; i < dev->num_ports; i++) {
170 		port  = &dev->port[i];
171 		if (!port->rep)
172 			continue;
173 
174 		read_lock(&port->roce.netdev_lock);
175 		rep_ndev = mlx5_ib_get_rep_netdev(esw,
176 						  port->rep->vport);
177 		if (rep_ndev == ndev) {
178 			read_unlock(&port->roce.netdev_lock);
179 			*port_num = i + 1;
180 			return &port->roce;
181 		}
182 		read_unlock(&port->roce.netdev_lock);
183 	}
184 
185 	return NULL;
186 }
187 
188 static int mlx5_netdev_event(struct notifier_block *this,
189 			     unsigned long event, void *ptr)
190 {
191 	struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
192 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
193 	u8 port_num = roce->native_port_num;
194 	struct mlx5_core_dev *mdev;
195 	struct mlx5_ib_dev *ibdev;
196 
197 	ibdev = roce->dev;
198 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
199 	if (!mdev)
200 		return NOTIFY_DONE;
201 
202 	switch (event) {
203 	case NETDEV_REGISTER:
204 		/* Should already be registered during the load */
205 		if (ibdev->is_rep)
206 			break;
207 		write_lock(&roce->netdev_lock);
208 		if (ndev->dev.parent == mdev->device)
209 			roce->netdev = ndev;
210 		write_unlock(&roce->netdev_lock);
211 		break;
212 
213 	case NETDEV_UNREGISTER:
214 		/* In case of reps, ib device goes away before the netdevs */
215 		write_lock(&roce->netdev_lock);
216 		if (roce->netdev == ndev)
217 			roce->netdev = NULL;
218 		write_unlock(&roce->netdev_lock);
219 		break;
220 
221 	case NETDEV_CHANGE:
222 	case NETDEV_UP:
223 	case NETDEV_DOWN: {
224 		struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
225 		struct net_device *upper = NULL;
226 
227 		if (lag_ndev) {
228 			upper = netdev_master_upper_dev_get(lag_ndev);
229 			dev_put(lag_ndev);
230 		}
231 
232 		if (ibdev->is_rep)
233 			roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
234 		if (!roce)
235 			return NOTIFY_DONE;
236 		if ((upper == ndev || (!upper && ndev == roce->netdev))
237 		    && ibdev->ib_active) {
238 			struct ib_event ibev = { };
239 			enum ib_port_state port_state;
240 
241 			if (get_port_state(&ibdev->ib_dev, port_num,
242 					   &port_state))
243 				goto done;
244 
245 			if (roce->last_port_state == port_state)
246 				goto done;
247 
248 			roce->last_port_state = port_state;
249 			ibev.device = &ibdev->ib_dev;
250 			if (port_state == IB_PORT_DOWN)
251 				ibev.event = IB_EVENT_PORT_ERR;
252 			else if (port_state == IB_PORT_ACTIVE)
253 				ibev.event = IB_EVENT_PORT_ACTIVE;
254 			else
255 				goto done;
256 
257 			ibev.element.port_num = port_num;
258 			ib_dispatch_event(&ibev);
259 		}
260 		break;
261 	}
262 
263 	default:
264 		break;
265 	}
266 done:
267 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
268 	return NOTIFY_DONE;
269 }
270 
271 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
272 					     u8 port_num)
273 {
274 	struct mlx5_ib_dev *ibdev = to_mdev(device);
275 	struct net_device *ndev;
276 	struct mlx5_core_dev *mdev;
277 
278 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
279 	if (!mdev)
280 		return NULL;
281 
282 	ndev = mlx5_lag_get_roce_netdev(mdev);
283 	if (ndev)
284 		goto out;
285 
286 	/* Ensure ndev does not disappear before we invoke dev_hold()
287 	 */
288 	read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
289 	ndev = ibdev->port[port_num - 1].roce.netdev;
290 	if (ndev)
291 		dev_hold(ndev);
292 	read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
293 
294 out:
295 	mlx5_ib_put_native_port_mdev(ibdev, port_num);
296 	return ndev;
297 }
298 
299 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
300 						   u8 ib_port_num,
301 						   u8 *native_port_num)
302 {
303 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
304 							  ib_port_num);
305 	struct mlx5_core_dev *mdev = NULL;
306 	struct mlx5_ib_multiport_info *mpi;
307 	struct mlx5_ib_port *port;
308 
309 	if (!mlx5_core_mp_enabled(ibdev->mdev) ||
310 	    ll != IB_LINK_LAYER_ETHERNET) {
311 		if (native_port_num)
312 			*native_port_num = ib_port_num;
313 		return ibdev->mdev;
314 	}
315 
316 	if (native_port_num)
317 		*native_port_num = 1;
318 
319 	port = &ibdev->port[ib_port_num - 1];
320 	if (!port)
321 		return NULL;
322 
323 	spin_lock(&port->mp.mpi_lock);
324 	mpi = ibdev->port[ib_port_num - 1].mp.mpi;
325 	if (mpi && !mpi->unaffiliate) {
326 		mdev = mpi->mdev;
327 		/* If it's the master no need to refcount, it'll exist
328 		 * as long as the ib_dev exists.
329 		 */
330 		if (!mpi->is_master)
331 			mpi->mdev_refcnt++;
332 	}
333 	spin_unlock(&port->mp.mpi_lock);
334 
335 	return mdev;
336 }
337 
338 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
339 {
340 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
341 							  port_num);
342 	struct mlx5_ib_multiport_info *mpi;
343 	struct mlx5_ib_port *port;
344 
345 	if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
346 		return;
347 
348 	port = &ibdev->port[port_num - 1];
349 
350 	spin_lock(&port->mp.mpi_lock);
351 	mpi = ibdev->port[port_num - 1].mp.mpi;
352 	if (mpi->is_master)
353 		goto out;
354 
355 	mpi->mdev_refcnt--;
356 	if (mpi->unaffiliate)
357 		complete(&mpi->unref_comp);
358 out:
359 	spin_unlock(&port->mp.mpi_lock);
360 }
361 
362 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
363 					   u8 *active_width)
364 {
365 	switch (eth_proto_oper) {
366 	case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
367 	case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
368 	case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
369 	case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
370 		*active_width = IB_WIDTH_1X;
371 		*active_speed = IB_SPEED_SDR;
372 		break;
373 	case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
374 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
375 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
376 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
377 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
378 	case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
379 	case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
380 		*active_width = IB_WIDTH_1X;
381 		*active_speed = IB_SPEED_QDR;
382 		break;
383 	case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
384 	case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
385 	case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
386 		*active_width = IB_WIDTH_1X;
387 		*active_speed = IB_SPEED_EDR;
388 		break;
389 	case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
390 	case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
391 	case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
392 	case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
393 		*active_width = IB_WIDTH_4X;
394 		*active_speed = IB_SPEED_QDR;
395 		break;
396 	case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
397 	case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
398 	case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
399 		*active_width = IB_WIDTH_1X;
400 		*active_speed = IB_SPEED_HDR;
401 		break;
402 	case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
403 		*active_width = IB_WIDTH_4X;
404 		*active_speed = IB_SPEED_FDR;
405 		break;
406 	case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
407 	case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
408 	case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
409 	case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
410 		*active_width = IB_WIDTH_4X;
411 		*active_speed = IB_SPEED_EDR;
412 		break;
413 	default:
414 		return -EINVAL;
415 	}
416 
417 	return 0;
418 }
419 
420 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
421 					u8 *active_width)
422 {
423 	switch (eth_proto_oper) {
424 	case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
425 	case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
426 		*active_width = IB_WIDTH_1X;
427 		*active_speed = IB_SPEED_SDR;
428 		break;
429 	case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
430 		*active_width = IB_WIDTH_1X;
431 		*active_speed = IB_SPEED_DDR;
432 		break;
433 	case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
434 		*active_width = IB_WIDTH_1X;
435 		*active_speed = IB_SPEED_QDR;
436 		break;
437 	case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
438 		*active_width = IB_WIDTH_4X;
439 		*active_speed = IB_SPEED_QDR;
440 		break;
441 	case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
442 		*active_width = IB_WIDTH_1X;
443 		*active_speed = IB_SPEED_EDR;
444 		break;
445 	case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
446 		*active_width = IB_WIDTH_2X;
447 		*active_speed = IB_SPEED_EDR;
448 		break;
449 	case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
450 		*active_width = IB_WIDTH_1X;
451 		*active_speed = IB_SPEED_HDR;
452 		break;
453 	case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
454 		*active_width = IB_WIDTH_4X;
455 		*active_speed = IB_SPEED_EDR;
456 		break;
457 	case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
458 		*active_width = IB_WIDTH_2X;
459 		*active_speed = IB_SPEED_HDR;
460 		break;
461 	case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
462 		*active_width = IB_WIDTH_4X;
463 		*active_speed = IB_SPEED_HDR;
464 		break;
465 	default:
466 		return -EINVAL;
467 	}
468 
469 	return 0;
470 }
471 
472 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
473 				    u8 *active_width, bool ext)
474 {
475 	return ext ?
476 		translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
477 					     active_width) :
478 		translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
479 						active_width);
480 }
481 
482 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
483 				struct ib_port_attr *props)
484 {
485 	struct mlx5_ib_dev *dev = to_mdev(device);
486 	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
487 	struct mlx5_core_dev *mdev;
488 	struct net_device *ndev, *upper;
489 	enum ib_mtu ndev_ib_mtu;
490 	bool put_mdev = true;
491 	u16 qkey_viol_cntr;
492 	u32 eth_prot_oper;
493 	u8 mdev_port_num;
494 	bool ext;
495 	int err;
496 
497 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
498 	if (!mdev) {
499 		/* This means the port isn't affiliated yet. Get the
500 		 * info for the master port instead.
501 		 */
502 		put_mdev = false;
503 		mdev = dev->mdev;
504 		mdev_port_num = 1;
505 		port_num = 1;
506 	}
507 
508 	/* Possible bad flows are checked before filling out props so in case
509 	 * of an error it will still be zeroed out.
510 	 * Use native port in case of reps
511 	 */
512 	if (dev->is_rep)
513 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
514 					   1);
515 	else
516 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
517 					   mdev_port_num);
518 	if (err)
519 		goto out;
520 	ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
521 	eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
522 
523 	props->active_width     = IB_WIDTH_4X;
524 	props->active_speed     = IB_SPEED_QDR;
525 
526 	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
527 				 &props->active_width, ext);
528 
529 	props->port_cap_flags |= IB_PORT_CM_SUP;
530 	props->ip_gids = true;
531 
532 	props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
533 						roce_address_table_size);
534 	props->max_mtu          = IB_MTU_4096;
535 	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
536 	props->pkey_tbl_len     = 1;
537 	props->state            = IB_PORT_DOWN;
538 	props->phys_state       = IB_PORT_PHYS_STATE_DISABLED;
539 
540 	mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
541 	props->qkey_viol_cntr = qkey_viol_cntr;
542 
543 	/* If this is a stub query for an unaffiliated port stop here */
544 	if (!put_mdev)
545 		goto out;
546 
547 	ndev = mlx5_ib_get_netdev(device, port_num);
548 	if (!ndev)
549 		goto out;
550 
551 	if (dev->lag_active) {
552 		rcu_read_lock();
553 		upper = netdev_master_upper_dev_get_rcu(ndev);
554 		if (upper) {
555 			dev_put(ndev);
556 			ndev = upper;
557 			dev_hold(ndev);
558 		}
559 		rcu_read_unlock();
560 	}
561 
562 	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
563 		props->state      = IB_PORT_ACTIVE;
564 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
565 	}
566 
567 	ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
568 
569 	dev_put(ndev);
570 
571 	props->active_mtu	= min(props->max_mtu, ndev_ib_mtu);
572 out:
573 	if (put_mdev)
574 		mlx5_ib_put_native_port_mdev(dev, port_num);
575 	return err;
576 }
577 
578 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
579 			 unsigned int index, const union ib_gid *gid,
580 			 const struct ib_gid_attr *attr)
581 {
582 	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
583 	u16 vlan_id = 0xffff;
584 	u8 roce_version = 0;
585 	u8 roce_l3_type = 0;
586 	u8 mac[ETH_ALEN];
587 	int ret;
588 
589 	if (gid) {
590 		gid_type = attr->gid_type;
591 		ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
592 		if (ret)
593 			return ret;
594 	}
595 
596 	switch (gid_type) {
597 	case IB_GID_TYPE_IB:
598 		roce_version = MLX5_ROCE_VERSION_1;
599 		break;
600 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
601 		roce_version = MLX5_ROCE_VERSION_2;
602 		if (ipv6_addr_v4mapped((void *)gid))
603 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
604 		else
605 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
606 		break;
607 
608 	default:
609 		mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
610 	}
611 
612 	return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
613 				      roce_l3_type, gid->raw, mac,
614 				      vlan_id < VLAN_CFI_MASK, vlan_id,
615 				      port_num);
616 }
617 
618 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
619 			   __always_unused void **context)
620 {
621 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
622 			     attr->index, &attr->gid, attr);
623 }
624 
625 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
626 			   __always_unused void **context)
627 {
628 	return set_roce_addr(to_mdev(attr->device), attr->port_num,
629 			     attr->index, NULL, NULL);
630 }
631 
632 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
633 			       const struct ib_gid_attr *attr)
634 {
635 	if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
636 		return 0;
637 
638 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
639 }
640 
641 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
642 {
643 	if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
644 		return !MLX5_CAP_GEN(dev->mdev, ib_virt);
645 	return 0;
646 }
647 
648 enum {
649 	MLX5_VPORT_ACCESS_METHOD_MAD,
650 	MLX5_VPORT_ACCESS_METHOD_HCA,
651 	MLX5_VPORT_ACCESS_METHOD_NIC,
652 };
653 
654 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
655 {
656 	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
657 		return MLX5_VPORT_ACCESS_METHOD_MAD;
658 
659 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
660 	    IB_LINK_LAYER_ETHERNET)
661 		return MLX5_VPORT_ACCESS_METHOD_NIC;
662 
663 	return MLX5_VPORT_ACCESS_METHOD_HCA;
664 }
665 
666 static void get_atomic_caps(struct mlx5_ib_dev *dev,
667 			    u8 atomic_size_qp,
668 			    struct ib_device_attr *props)
669 {
670 	u8 tmp;
671 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
672 	u8 atomic_req_8B_endianness_mode =
673 		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
674 
675 	/* Check if HW supports 8 bytes standard atomic operations and capable
676 	 * of host endianness respond
677 	 */
678 	tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
679 	if (((atomic_operations & tmp) == tmp) &&
680 	    (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
681 	    (atomic_req_8B_endianness_mode)) {
682 		props->atomic_cap = IB_ATOMIC_HCA;
683 	} else {
684 		props->atomic_cap = IB_ATOMIC_NONE;
685 	}
686 }
687 
688 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
689 			       struct ib_device_attr *props)
690 {
691 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
692 
693 	get_atomic_caps(dev, atomic_size_qp, props);
694 }
695 
696 static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
697 			       struct ib_device_attr *props)
698 {
699 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
700 
701 	get_atomic_caps(dev, atomic_size_qp, props);
702 }
703 
704 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
705 {
706 	struct ib_device_attr props = {};
707 
708 	get_atomic_caps_dc(dev, &props);
709 	return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
710 }
711 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
712 					__be64 *sys_image_guid)
713 {
714 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
715 	struct mlx5_core_dev *mdev = dev->mdev;
716 	u64 tmp;
717 	int err;
718 
719 	switch (mlx5_get_vport_access_method(ibdev)) {
720 	case MLX5_VPORT_ACCESS_METHOD_MAD:
721 		return mlx5_query_mad_ifc_system_image_guid(ibdev,
722 							    sys_image_guid);
723 
724 	case MLX5_VPORT_ACCESS_METHOD_HCA:
725 		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
726 		break;
727 
728 	case MLX5_VPORT_ACCESS_METHOD_NIC:
729 		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
730 		break;
731 
732 	default:
733 		return -EINVAL;
734 	}
735 
736 	if (!err)
737 		*sys_image_guid = cpu_to_be64(tmp);
738 
739 	return err;
740 
741 }
742 
743 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
744 				u16 *max_pkeys)
745 {
746 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
747 	struct mlx5_core_dev *mdev = dev->mdev;
748 
749 	switch (mlx5_get_vport_access_method(ibdev)) {
750 	case MLX5_VPORT_ACCESS_METHOD_MAD:
751 		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
752 
753 	case MLX5_VPORT_ACCESS_METHOD_HCA:
754 	case MLX5_VPORT_ACCESS_METHOD_NIC:
755 		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
756 						pkey_table_size));
757 		return 0;
758 
759 	default:
760 		return -EINVAL;
761 	}
762 }
763 
764 static int mlx5_query_vendor_id(struct ib_device *ibdev,
765 				u32 *vendor_id)
766 {
767 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
768 
769 	switch (mlx5_get_vport_access_method(ibdev)) {
770 	case MLX5_VPORT_ACCESS_METHOD_MAD:
771 		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
772 
773 	case MLX5_VPORT_ACCESS_METHOD_HCA:
774 	case MLX5_VPORT_ACCESS_METHOD_NIC:
775 		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
776 
777 	default:
778 		return -EINVAL;
779 	}
780 }
781 
782 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
783 				__be64 *node_guid)
784 {
785 	u64 tmp;
786 	int err;
787 
788 	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
789 	case MLX5_VPORT_ACCESS_METHOD_MAD:
790 		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
791 
792 	case MLX5_VPORT_ACCESS_METHOD_HCA:
793 		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
794 		break;
795 
796 	case MLX5_VPORT_ACCESS_METHOD_NIC:
797 		err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
798 		break;
799 
800 	default:
801 		return -EINVAL;
802 	}
803 
804 	if (!err)
805 		*node_guid = cpu_to_be64(tmp);
806 
807 	return err;
808 }
809 
810 struct mlx5_reg_node_desc {
811 	u8	desc[IB_DEVICE_NODE_DESC_MAX];
812 };
813 
814 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
815 {
816 	struct mlx5_reg_node_desc in;
817 
818 	if (mlx5_use_mad_ifc(dev))
819 		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
820 
821 	memset(&in, 0, sizeof(in));
822 
823 	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
824 				    sizeof(struct mlx5_reg_node_desc),
825 				    MLX5_REG_NODE_DESC, 0, 0);
826 }
827 
828 static int mlx5_ib_query_device(struct ib_device *ibdev,
829 				struct ib_device_attr *props,
830 				struct ib_udata *uhw)
831 {
832 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
833 	struct mlx5_core_dev *mdev = dev->mdev;
834 	int err = -ENOMEM;
835 	int max_sq_desc;
836 	int max_rq_sg;
837 	int max_sq_sg;
838 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
839 	bool raw_support = !mlx5_core_mp_enabled(mdev);
840 	struct mlx5_ib_query_device_resp resp = {};
841 	size_t resp_len;
842 	u64 max_tso;
843 
844 	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
845 	if (uhw->outlen && uhw->outlen < resp_len)
846 		return -EINVAL;
847 	else
848 		resp.response_length = resp_len;
849 
850 	if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
851 		return -EINVAL;
852 
853 	memset(props, 0, sizeof(*props));
854 	err = mlx5_query_system_image_guid(ibdev,
855 					   &props->sys_image_guid);
856 	if (err)
857 		return err;
858 
859 	err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
860 	if (err)
861 		return err;
862 
863 	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
864 	if (err)
865 		return err;
866 
867 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
868 		(fw_rev_min(dev->mdev) << 16) |
869 		fw_rev_sub(dev->mdev);
870 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
871 		IB_DEVICE_PORT_ACTIVE_EVENT		|
872 		IB_DEVICE_SYS_IMAGE_GUID		|
873 		IB_DEVICE_RC_RNR_NAK_GEN;
874 
875 	if (MLX5_CAP_GEN(mdev, pkv))
876 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
877 	if (MLX5_CAP_GEN(mdev, qkv))
878 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
879 	if (MLX5_CAP_GEN(mdev, apm))
880 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
881 	if (MLX5_CAP_GEN(mdev, xrc))
882 		props->device_cap_flags |= IB_DEVICE_XRC;
883 	if (MLX5_CAP_GEN(mdev, imaicl)) {
884 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
885 					   IB_DEVICE_MEM_WINDOW_TYPE_2B;
886 		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
887 		/* We support 'Gappy' memory registration too */
888 		props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
889 	}
890 	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
891 	if (MLX5_CAP_GEN(mdev, sho)) {
892 		props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
893 		/* At this stage no support for signature handover */
894 		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
895 				      IB_PROT_T10DIF_TYPE_2 |
896 				      IB_PROT_T10DIF_TYPE_3;
897 		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
898 				       IB_GUARD_T10DIF_CSUM;
899 	}
900 	if (MLX5_CAP_GEN(mdev, block_lb_mc))
901 		props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
902 
903 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
904 		if (MLX5_CAP_ETH(mdev, csum_cap)) {
905 			/* Legacy bit to support old userspace libraries */
906 			props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
907 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
908 		}
909 
910 		if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
911 			props->raw_packet_caps |=
912 				IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
913 
914 		if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
915 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
916 			if (max_tso) {
917 				resp.tso_caps.max_tso = 1 << max_tso;
918 				resp.tso_caps.supported_qpts |=
919 					1 << IB_QPT_RAW_PACKET;
920 				resp.response_length += sizeof(resp.tso_caps);
921 			}
922 		}
923 
924 		if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
925 			resp.rss_caps.rx_hash_function =
926 						MLX5_RX_HASH_FUNC_TOEPLITZ;
927 			resp.rss_caps.rx_hash_fields_mask =
928 						MLX5_RX_HASH_SRC_IPV4 |
929 						MLX5_RX_HASH_DST_IPV4 |
930 						MLX5_RX_HASH_SRC_IPV6 |
931 						MLX5_RX_HASH_DST_IPV6 |
932 						MLX5_RX_HASH_SRC_PORT_TCP |
933 						MLX5_RX_HASH_DST_PORT_TCP |
934 						MLX5_RX_HASH_SRC_PORT_UDP |
935 						MLX5_RX_HASH_DST_PORT_UDP |
936 						MLX5_RX_HASH_INNER;
937 			if (mlx5_accel_ipsec_device_caps(dev->mdev) &
938 			    MLX5_ACCEL_IPSEC_CAP_DEVICE)
939 				resp.rss_caps.rx_hash_fields_mask |=
940 					MLX5_RX_HASH_IPSEC_SPI;
941 			resp.response_length += sizeof(resp.rss_caps);
942 		}
943 	} else {
944 		if (field_avail(typeof(resp), tso_caps, uhw->outlen))
945 			resp.response_length += sizeof(resp.tso_caps);
946 		if (field_avail(typeof(resp), rss_caps, uhw->outlen))
947 			resp.response_length += sizeof(resp.rss_caps);
948 	}
949 
950 	if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
951 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
952 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
953 	}
954 
955 	if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
956 	    MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
957 	    raw_support)
958 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
959 
960 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
961 	    MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
962 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
963 
964 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
965 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
966 	    raw_support) {
967 		/* Legacy bit to support old userspace libraries */
968 		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
969 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
970 	}
971 
972 	if (MLX5_CAP_DEV_MEM(mdev, memic)) {
973 		props->max_dm_size =
974 			MLX5_CAP_DEV_MEM(mdev, max_memic_size);
975 	}
976 
977 	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
978 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
979 
980 	if (MLX5_CAP_GEN(mdev, end_pad))
981 		props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
982 
983 	props->vendor_part_id	   = mdev->pdev->device;
984 	props->hw_ver		   = mdev->pdev->revision;
985 
986 	props->max_mr_size	   = ~0ull;
987 	props->page_size_cap	   = ~(min_page_size - 1);
988 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
989 	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
990 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
991 		     sizeof(struct mlx5_wqe_data_seg);
992 	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
993 	max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
994 		     sizeof(struct mlx5_wqe_raddr_seg)) /
995 		sizeof(struct mlx5_wqe_data_seg);
996 	props->max_send_sge = max_sq_sg;
997 	props->max_recv_sge = max_rq_sg;
998 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
999 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
1000 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
1001 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
1002 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
1003 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
1004 	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
1005 	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
1006 	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
1007 	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
1008 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
1009 	props->max_srq_sge	   = max_rq_sg - 1;
1010 	props->max_fast_reg_page_list_len =
1011 		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
1012 	props->max_pi_fast_reg_page_list_len =
1013 		props->max_fast_reg_page_list_len / 2;
1014 	get_atomic_caps_qp(dev, props);
1015 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
1016 	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1017 	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1018 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1019 					   props->max_mcast_grp;
1020 	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
1021 	props->max_ah = INT_MAX;
1022 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1023 	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1024 
1025 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1026 		if (MLX5_CAP_GEN(mdev, pg))
1027 			props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1028 		props->odp_caps = dev->odp_caps;
1029 	}
1030 
1031 	if (MLX5_CAP_GEN(mdev, cd))
1032 		props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
1033 
1034 	if (!mlx5_core_is_pf(mdev))
1035 		props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
1036 
1037 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
1038 	    IB_LINK_LAYER_ETHERNET && raw_support) {
1039 		props->rss_caps.max_rwq_indirection_tables =
1040 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1041 		props->rss_caps.max_rwq_indirection_table_size =
1042 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1043 		props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1044 		props->max_wq_type_rq =
1045 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1046 	}
1047 
1048 	if (MLX5_CAP_GEN(mdev, tag_matching)) {
1049 		props->tm_caps.max_num_tags =
1050 			(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1051 		props->tm_caps.max_ops =
1052 			1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1053 		props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1054 	}
1055 
1056 	if (MLX5_CAP_GEN(mdev, tag_matching) &&
1057 	    MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1058 		props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
1059 		props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1060 	}
1061 
1062 	if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1063 		props->cq_caps.max_cq_moderation_count =
1064 						MLX5_MAX_CQ_COUNT;
1065 		props->cq_caps.max_cq_moderation_period =
1066 						MLX5_MAX_CQ_PERIOD;
1067 	}
1068 
1069 	if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
1070 		resp.response_length += sizeof(resp.cqe_comp_caps);
1071 
1072 		if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1073 			resp.cqe_comp_caps.max_num =
1074 				MLX5_CAP_GEN(dev->mdev,
1075 					     cqe_compression_max_num);
1076 
1077 			resp.cqe_comp_caps.supported_format =
1078 				MLX5_IB_CQE_RES_FORMAT_HASH |
1079 				MLX5_IB_CQE_RES_FORMAT_CSUM;
1080 
1081 			if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1082 				resp.cqe_comp_caps.supported_format |=
1083 					MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1084 		}
1085 	}
1086 
1087 	if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
1088 	    raw_support) {
1089 		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1090 		    MLX5_CAP_GEN(mdev, qos)) {
1091 			resp.packet_pacing_caps.qp_rate_limit_max =
1092 				MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1093 			resp.packet_pacing_caps.qp_rate_limit_min =
1094 				MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1095 			resp.packet_pacing_caps.supported_qpts |=
1096 				1 << IB_QPT_RAW_PACKET;
1097 			if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1098 			    MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1099 				resp.packet_pacing_caps.cap_flags |=
1100 					MLX5_IB_PP_SUPPORT_BURST;
1101 		}
1102 		resp.response_length += sizeof(resp.packet_pacing_caps);
1103 	}
1104 
1105 	if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
1106 			uhw->outlen)) {
1107 		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1108 			resp.mlx5_ib_support_multi_pkt_send_wqes =
1109 				MLX5_IB_ALLOW_MPW;
1110 
1111 		if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1112 			resp.mlx5_ib_support_multi_pkt_send_wqes |=
1113 				MLX5_IB_SUPPORT_EMPW;
1114 
1115 		resp.response_length +=
1116 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1117 	}
1118 
1119 	if (field_avail(typeof(resp), flags, uhw->outlen)) {
1120 		resp.response_length += sizeof(resp.flags);
1121 
1122 		if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1123 			resp.flags |=
1124 				MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1125 
1126 		if (MLX5_CAP_GEN(mdev, cqe_128_always))
1127 			resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1128 		if (MLX5_CAP_GEN(mdev, qp_packet_based))
1129 			resp.flags |=
1130 				MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1131 
1132 		resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1133 	}
1134 
1135 	if (field_avail(typeof(resp), sw_parsing_caps,
1136 			uhw->outlen)) {
1137 		resp.response_length += sizeof(resp.sw_parsing_caps);
1138 		if (MLX5_CAP_ETH(mdev, swp)) {
1139 			resp.sw_parsing_caps.sw_parsing_offloads |=
1140 				MLX5_IB_SW_PARSING;
1141 
1142 			if (MLX5_CAP_ETH(mdev, swp_csum))
1143 				resp.sw_parsing_caps.sw_parsing_offloads |=
1144 					MLX5_IB_SW_PARSING_CSUM;
1145 
1146 			if (MLX5_CAP_ETH(mdev, swp_lso))
1147 				resp.sw_parsing_caps.sw_parsing_offloads |=
1148 					MLX5_IB_SW_PARSING_LSO;
1149 
1150 			if (resp.sw_parsing_caps.sw_parsing_offloads)
1151 				resp.sw_parsing_caps.supported_qpts =
1152 					BIT(IB_QPT_RAW_PACKET);
1153 		}
1154 	}
1155 
1156 	if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
1157 	    raw_support) {
1158 		resp.response_length += sizeof(resp.striding_rq_caps);
1159 		if (MLX5_CAP_GEN(mdev, striding_rq)) {
1160 			resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1161 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1162 			resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1163 				MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1164 			resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
1165 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1166 			resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1167 				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1168 			resp.striding_rq_caps.supported_qpts =
1169 				BIT(IB_QPT_RAW_PACKET);
1170 		}
1171 	}
1172 
1173 	if (field_avail(typeof(resp), tunnel_offloads_caps,
1174 			uhw->outlen)) {
1175 		resp.response_length += sizeof(resp.tunnel_offloads_caps);
1176 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1177 			resp.tunnel_offloads_caps |=
1178 				MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1179 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1180 			resp.tunnel_offloads_caps |=
1181 				MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1182 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1183 			resp.tunnel_offloads_caps |=
1184 				MLX5_IB_TUNNELED_OFFLOADS_GRE;
1185 		if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1186 		    MLX5_FLEX_PROTO_CW_MPLS_GRE)
1187 			resp.tunnel_offloads_caps |=
1188 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1189 		if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1190 		    MLX5_FLEX_PROTO_CW_MPLS_UDP)
1191 			resp.tunnel_offloads_caps |=
1192 				MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1193 	}
1194 
1195 	if (uhw->outlen) {
1196 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1197 
1198 		if (err)
1199 			return err;
1200 	}
1201 
1202 	return 0;
1203 }
1204 
1205 enum mlx5_ib_width {
1206 	MLX5_IB_WIDTH_1X	= 1 << 0,
1207 	MLX5_IB_WIDTH_2X	= 1 << 1,
1208 	MLX5_IB_WIDTH_4X	= 1 << 2,
1209 	MLX5_IB_WIDTH_8X	= 1 << 3,
1210 	MLX5_IB_WIDTH_12X	= 1 << 4
1211 };
1212 
1213 static void translate_active_width(struct ib_device *ibdev, u8 active_width,
1214 				  u8 *ib_width)
1215 {
1216 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1217 
1218 	if (active_width & MLX5_IB_WIDTH_1X)
1219 		*ib_width = IB_WIDTH_1X;
1220 	else if (active_width & MLX5_IB_WIDTH_2X)
1221 		*ib_width = IB_WIDTH_2X;
1222 	else if (active_width & MLX5_IB_WIDTH_4X)
1223 		*ib_width = IB_WIDTH_4X;
1224 	else if (active_width & MLX5_IB_WIDTH_8X)
1225 		*ib_width = IB_WIDTH_8X;
1226 	else if (active_width & MLX5_IB_WIDTH_12X)
1227 		*ib_width = IB_WIDTH_12X;
1228 	else {
1229 		mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1230 			    (int)active_width);
1231 		*ib_width = IB_WIDTH_4X;
1232 	}
1233 
1234 	return;
1235 }
1236 
1237 static int mlx5_mtu_to_ib_mtu(int mtu)
1238 {
1239 	switch (mtu) {
1240 	case 256: return 1;
1241 	case 512: return 2;
1242 	case 1024: return 3;
1243 	case 2048: return 4;
1244 	case 4096: return 5;
1245 	default:
1246 		pr_warn("invalid mtu\n");
1247 		return -1;
1248 	}
1249 }
1250 
1251 enum ib_max_vl_num {
1252 	__IB_MAX_VL_0		= 1,
1253 	__IB_MAX_VL_0_1		= 2,
1254 	__IB_MAX_VL_0_3		= 3,
1255 	__IB_MAX_VL_0_7		= 4,
1256 	__IB_MAX_VL_0_14	= 5,
1257 };
1258 
1259 enum mlx5_vl_hw_cap {
1260 	MLX5_VL_HW_0	= 1,
1261 	MLX5_VL_HW_0_1	= 2,
1262 	MLX5_VL_HW_0_2	= 3,
1263 	MLX5_VL_HW_0_3	= 4,
1264 	MLX5_VL_HW_0_4	= 5,
1265 	MLX5_VL_HW_0_5	= 6,
1266 	MLX5_VL_HW_0_6	= 7,
1267 	MLX5_VL_HW_0_7	= 8,
1268 	MLX5_VL_HW_0_14	= 15
1269 };
1270 
1271 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1272 				u8 *max_vl_num)
1273 {
1274 	switch (vl_hw_cap) {
1275 	case MLX5_VL_HW_0:
1276 		*max_vl_num = __IB_MAX_VL_0;
1277 		break;
1278 	case MLX5_VL_HW_0_1:
1279 		*max_vl_num = __IB_MAX_VL_0_1;
1280 		break;
1281 	case MLX5_VL_HW_0_3:
1282 		*max_vl_num = __IB_MAX_VL_0_3;
1283 		break;
1284 	case MLX5_VL_HW_0_7:
1285 		*max_vl_num = __IB_MAX_VL_0_7;
1286 		break;
1287 	case MLX5_VL_HW_0_14:
1288 		*max_vl_num = __IB_MAX_VL_0_14;
1289 		break;
1290 
1291 	default:
1292 		return -EINVAL;
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1299 			       struct ib_port_attr *props)
1300 {
1301 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1302 	struct mlx5_core_dev *mdev = dev->mdev;
1303 	struct mlx5_hca_vport_context *rep;
1304 	u16 max_mtu;
1305 	u16 oper_mtu;
1306 	int err;
1307 	u8 ib_link_width_oper;
1308 	u8 vl_hw_cap;
1309 
1310 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1311 	if (!rep) {
1312 		err = -ENOMEM;
1313 		goto out;
1314 	}
1315 
1316 	/* props being zeroed by the caller, avoid zeroing it here */
1317 
1318 	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1319 	if (err)
1320 		goto out;
1321 
1322 	props->lid		= rep->lid;
1323 	props->lmc		= rep->lmc;
1324 	props->sm_lid		= rep->sm_lid;
1325 	props->sm_sl		= rep->sm_sl;
1326 	props->state		= rep->vport_state;
1327 	props->phys_state	= rep->port_physical_state;
1328 	props->port_cap_flags	= rep->cap_mask1;
1329 	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1330 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1331 	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1332 	props->bad_pkey_cntr	= rep->pkey_violation_counter;
1333 	props->qkey_viol_cntr	= rep->qkey_violation_counter;
1334 	props->subnet_timeout	= rep->subnet_timeout;
1335 	props->init_type_reply	= rep->init_type_reply;
1336 
1337 	if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1338 		props->port_cap_flags2 = rep->cap_mask2;
1339 
1340 	err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1341 	if (err)
1342 		goto out;
1343 
1344 	translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1345 
1346 	err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
1347 	if (err)
1348 		goto out;
1349 
1350 	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1351 
1352 	props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1353 
1354 	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1355 
1356 	props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1357 
1358 	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1359 	if (err)
1360 		goto out;
1361 
1362 	err = translate_max_vl_num(ibdev, vl_hw_cap,
1363 				   &props->max_vl_num);
1364 out:
1365 	kfree(rep);
1366 	return err;
1367 }
1368 
1369 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1370 		       struct ib_port_attr *props)
1371 {
1372 	unsigned int count;
1373 	int ret;
1374 
1375 	switch (mlx5_get_vport_access_method(ibdev)) {
1376 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1377 		ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1378 		break;
1379 
1380 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1381 		ret = mlx5_query_hca_port(ibdev, port, props);
1382 		break;
1383 
1384 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1385 		ret = mlx5_query_port_roce(ibdev, port, props);
1386 		break;
1387 
1388 	default:
1389 		ret = -EINVAL;
1390 	}
1391 
1392 	if (!ret && props) {
1393 		struct mlx5_ib_dev *dev = to_mdev(ibdev);
1394 		struct mlx5_core_dev *mdev;
1395 		bool put_mdev = true;
1396 
1397 		mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1398 		if (!mdev) {
1399 			/* If the port isn't affiliated yet query the master.
1400 			 * The master and slave will have the same values.
1401 			 */
1402 			mdev = dev->mdev;
1403 			port = 1;
1404 			put_mdev = false;
1405 		}
1406 		count = mlx5_core_reserved_gids_count(mdev);
1407 		if (put_mdev)
1408 			mlx5_ib_put_native_port_mdev(dev, port);
1409 		props->gid_tbl_len -= count;
1410 	}
1411 	return ret;
1412 }
1413 
1414 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1415 				  struct ib_port_attr *props)
1416 {
1417 	int ret;
1418 
1419 	/* Only link layer == ethernet is valid for representors
1420 	 * and we always use port 1
1421 	 */
1422 	ret = mlx5_query_port_roce(ibdev, port, props);
1423 	if (ret || !props)
1424 		return ret;
1425 
1426 	/* We don't support GIDS */
1427 	props->gid_tbl_len = 0;
1428 
1429 	return ret;
1430 }
1431 
1432 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1433 			     union ib_gid *gid)
1434 {
1435 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1436 	struct mlx5_core_dev *mdev = dev->mdev;
1437 
1438 	switch (mlx5_get_vport_access_method(ibdev)) {
1439 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1440 		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1441 
1442 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1443 		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1444 
1445 	default:
1446 		return -EINVAL;
1447 	}
1448 
1449 }
1450 
1451 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1452 				   u16 index, u16 *pkey)
1453 {
1454 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1455 	struct mlx5_core_dev *mdev;
1456 	bool put_mdev = true;
1457 	u8 mdev_port_num;
1458 	int err;
1459 
1460 	mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1461 	if (!mdev) {
1462 		/* The port isn't affiliated yet, get the PKey from the master
1463 		 * port. For RoCE the PKey tables will be the same.
1464 		 */
1465 		put_mdev = false;
1466 		mdev = dev->mdev;
1467 		mdev_port_num = 1;
1468 	}
1469 
1470 	err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1471 					index, pkey);
1472 	if (put_mdev)
1473 		mlx5_ib_put_native_port_mdev(dev, port);
1474 
1475 	return err;
1476 }
1477 
1478 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1479 			      u16 *pkey)
1480 {
1481 	switch (mlx5_get_vport_access_method(ibdev)) {
1482 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1483 		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1484 
1485 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1486 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1487 		return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1488 	default:
1489 		return -EINVAL;
1490 	}
1491 }
1492 
1493 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1494 				 struct ib_device_modify *props)
1495 {
1496 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1497 	struct mlx5_reg_node_desc in;
1498 	struct mlx5_reg_node_desc out;
1499 	int err;
1500 
1501 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1502 		return -EOPNOTSUPP;
1503 
1504 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1505 		return 0;
1506 
1507 	/*
1508 	 * If possible, pass node desc to FW, so it can generate
1509 	 * a 144 trap.  If cmd fails, just ignore.
1510 	 */
1511 	memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1512 	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1513 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1514 	if (err)
1515 		return err;
1516 
1517 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1518 
1519 	return err;
1520 }
1521 
1522 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1523 				u32 value)
1524 {
1525 	struct mlx5_hca_vport_context ctx = {};
1526 	struct mlx5_core_dev *mdev;
1527 	u8 mdev_port_num;
1528 	int err;
1529 
1530 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1531 	if (!mdev)
1532 		return -ENODEV;
1533 
1534 	err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1535 	if (err)
1536 		goto out;
1537 
1538 	if (~ctx.cap_mask1_perm & mask) {
1539 		mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1540 			     mask, ctx.cap_mask1_perm);
1541 		err = -EINVAL;
1542 		goto out;
1543 	}
1544 
1545 	ctx.cap_mask1 = value;
1546 	ctx.cap_mask1_perm = mask;
1547 	err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1548 						 0, &ctx);
1549 
1550 out:
1551 	mlx5_ib_put_native_port_mdev(dev, port_num);
1552 
1553 	return err;
1554 }
1555 
1556 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1557 			       struct ib_port_modify *props)
1558 {
1559 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1560 	struct ib_port_attr attr;
1561 	u32 tmp;
1562 	int err;
1563 	u32 change_mask;
1564 	u32 value;
1565 	bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1566 		      IB_LINK_LAYER_INFINIBAND);
1567 
1568 	/* CM layer calls ib_modify_port() regardless of the link layer. For
1569 	 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1570 	 */
1571 	if (!is_ib)
1572 		return 0;
1573 
1574 	if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1575 		change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1576 		value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1577 		return set_port_caps_atomic(dev, port, change_mask, value);
1578 	}
1579 
1580 	mutex_lock(&dev->cap_mask_mutex);
1581 
1582 	err = ib_query_port(ibdev, port, &attr);
1583 	if (err)
1584 		goto out;
1585 
1586 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1587 		~props->clr_port_cap_mask;
1588 
1589 	err = mlx5_set_port_caps(dev->mdev, port, tmp);
1590 
1591 out:
1592 	mutex_unlock(&dev->cap_mask_mutex);
1593 	return err;
1594 }
1595 
1596 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1597 {
1598 	mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1599 		    caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1600 }
1601 
1602 static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1603 {
1604 	/* Large page with non 4k uar support might limit the dynamic size */
1605 	if (uars_per_sys_page == 1  && PAGE_SIZE > 4096)
1606 		return MLX5_MIN_DYN_BFREGS;
1607 
1608 	return MLX5_MAX_DYN_BFREGS;
1609 }
1610 
1611 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1612 			     struct mlx5_ib_alloc_ucontext_req_v2 *req,
1613 			     struct mlx5_bfreg_info *bfregi)
1614 {
1615 	int uars_per_sys_page;
1616 	int bfregs_per_sys_page;
1617 	int ref_bfregs = req->total_num_bfregs;
1618 
1619 	if (req->total_num_bfregs == 0)
1620 		return -EINVAL;
1621 
1622 	BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1623 	BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1624 
1625 	if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1626 		return -ENOMEM;
1627 
1628 	uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1629 	bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1630 	/* This holds the required static allocation asked by the user */
1631 	req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1632 	if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1633 		return -EINVAL;
1634 
1635 	bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1636 	bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1637 	bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1638 	bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1639 
1640 	mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1641 		    MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1642 		    lib_uar_4k ? "yes" : "no", ref_bfregs,
1643 		    req->total_num_bfregs, bfregi->total_num_bfregs,
1644 		    bfregi->num_sys_pages);
1645 
1646 	return 0;
1647 }
1648 
1649 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1650 {
1651 	struct mlx5_bfreg_info *bfregi;
1652 	int err;
1653 	int i;
1654 
1655 	bfregi = &context->bfregi;
1656 	for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1657 		err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1658 		if (err)
1659 			goto error;
1660 
1661 		mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1662 	}
1663 
1664 	for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1665 		bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1666 
1667 	return 0;
1668 
1669 error:
1670 	for (--i; i >= 0; i--)
1671 		if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1672 			mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1673 
1674 	return err;
1675 }
1676 
1677 static void deallocate_uars(struct mlx5_ib_dev *dev,
1678 			    struct mlx5_ib_ucontext *context)
1679 {
1680 	struct mlx5_bfreg_info *bfregi;
1681 	int i;
1682 
1683 	bfregi = &context->bfregi;
1684 	for (i = 0; i < bfregi->num_sys_pages; i++)
1685 		if (i < bfregi->num_static_sys_pages ||
1686 		    bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1687 			mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1688 }
1689 
1690 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1691 {
1692 	int err = 0;
1693 
1694 	mutex_lock(&dev->lb.mutex);
1695 	if (td)
1696 		dev->lb.user_td++;
1697 	if (qp)
1698 		dev->lb.qps++;
1699 
1700 	if (dev->lb.user_td == 2 ||
1701 	    dev->lb.qps == 1) {
1702 		if (!dev->lb.enabled) {
1703 			err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1704 			dev->lb.enabled = true;
1705 		}
1706 	}
1707 
1708 	mutex_unlock(&dev->lb.mutex);
1709 
1710 	return err;
1711 }
1712 
1713 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1714 {
1715 	mutex_lock(&dev->lb.mutex);
1716 	if (td)
1717 		dev->lb.user_td--;
1718 	if (qp)
1719 		dev->lb.qps--;
1720 
1721 	if (dev->lb.user_td == 1 &&
1722 	    dev->lb.qps == 0) {
1723 		if (dev->lb.enabled) {
1724 			mlx5_nic_vport_update_local_lb(dev->mdev, false);
1725 			dev->lb.enabled = false;
1726 		}
1727 	}
1728 
1729 	mutex_unlock(&dev->lb.mutex);
1730 }
1731 
1732 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1733 					  u16 uid)
1734 {
1735 	int err;
1736 
1737 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1738 		return 0;
1739 
1740 	err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1741 	if (err)
1742 		return err;
1743 
1744 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1745 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1746 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1747 		return err;
1748 
1749 	return mlx5_ib_enable_lb(dev, true, false);
1750 }
1751 
1752 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1753 					     u16 uid)
1754 {
1755 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1756 		return;
1757 
1758 	mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1759 
1760 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1761 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1762 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1763 		return;
1764 
1765 	mlx5_ib_disable_lb(dev, true, false);
1766 }
1767 
1768 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1769 				  struct ib_udata *udata)
1770 {
1771 	struct ib_device *ibdev = uctx->device;
1772 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1773 	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1774 	struct mlx5_ib_alloc_ucontext_resp resp = {};
1775 	struct mlx5_core_dev *mdev = dev->mdev;
1776 	struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1777 	struct mlx5_bfreg_info *bfregi;
1778 	int ver;
1779 	int err;
1780 	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1781 				     max_cqe_version);
1782 	u32 dump_fill_mkey;
1783 	bool lib_uar_4k;
1784 
1785 	if (!dev->ib_active)
1786 		return -EAGAIN;
1787 
1788 	if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1789 		ver = 0;
1790 	else if (udata->inlen >= min_req_v2)
1791 		ver = 2;
1792 	else
1793 		return -EINVAL;
1794 
1795 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1796 	if (err)
1797 		return err;
1798 
1799 	if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
1800 		return -EOPNOTSUPP;
1801 
1802 	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1803 		return -EOPNOTSUPP;
1804 
1805 	req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1806 				    MLX5_NON_FP_BFREGS_PER_UAR);
1807 	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1808 		return -EINVAL;
1809 
1810 	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1811 	if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1812 		resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1813 	resp.cache_line_size = cache_line_size();
1814 	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1815 	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1816 	resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1817 	resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1818 	resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1819 	resp.cqe_version = min_t(__u8,
1820 				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1821 				 req.max_cqe_version);
1822 	resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1823 				MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1824 	resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1825 					MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1826 	resp.response_length = min(offsetof(typeof(resp), response_length) +
1827 				   sizeof(resp.response_length), udata->outlen);
1828 
1829 	if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1830 		if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1831 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1832 		if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1833 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1834 		if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1835 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1836 		if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1837 			resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1838 		/* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1839 	}
1840 
1841 	lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1842 	bfregi = &context->bfregi;
1843 
1844 	/* updates req->total_num_bfregs */
1845 	err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1846 	if (err)
1847 		goto out_ctx;
1848 
1849 	mutex_init(&bfregi->lock);
1850 	bfregi->lib_uar_4k = lib_uar_4k;
1851 	bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
1852 				GFP_KERNEL);
1853 	if (!bfregi->count) {
1854 		err = -ENOMEM;
1855 		goto out_ctx;
1856 	}
1857 
1858 	bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1859 				    sizeof(*bfregi->sys_pages),
1860 				    GFP_KERNEL);
1861 	if (!bfregi->sys_pages) {
1862 		err = -ENOMEM;
1863 		goto out_count;
1864 	}
1865 
1866 	err = allocate_uars(dev, context);
1867 	if (err)
1868 		goto out_sys_pages;
1869 
1870 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1871 		err = mlx5_ib_devx_create(dev, true);
1872 		if (err < 0)
1873 			goto out_uars;
1874 		context->devx_uid = err;
1875 	}
1876 
1877 	err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1878 					     context->devx_uid);
1879 	if (err)
1880 		goto out_devx;
1881 
1882 	if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1883 		err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1884 		if (err)
1885 			goto out_mdev;
1886 	}
1887 
1888 	INIT_LIST_HEAD(&context->db_page_list);
1889 	mutex_init(&context->db_page_mutex);
1890 
1891 	resp.tot_bfregs = req.total_num_bfregs;
1892 	resp.num_ports = dev->num_ports;
1893 
1894 	if (field_avail(typeof(resp), cqe_version, udata->outlen))
1895 		resp.response_length += sizeof(resp.cqe_version);
1896 
1897 	if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1898 		resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1899 				      MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1900 		resp.response_length += sizeof(resp.cmds_supp_uhw);
1901 	}
1902 
1903 	if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1904 		if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1905 			mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1906 			resp.eth_min_inline++;
1907 		}
1908 		resp.response_length += sizeof(resp.eth_min_inline);
1909 	}
1910 
1911 	if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
1912 		if (mdev->clock_info)
1913 			resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1914 		resp.response_length += sizeof(resp.clock_info_versions);
1915 	}
1916 
1917 	/*
1918 	 * We don't want to expose information from the PCI bar that is located
1919 	 * after 4096 bytes, so if the arch only supports larger pages, let's
1920 	 * pretend we don't support reading the HCA's core clock. This is also
1921 	 * forced by mmap function.
1922 	 */
1923 	if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1924 		if (PAGE_SIZE <= 4096) {
1925 			resp.comp_mask |=
1926 				MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1927 			resp.hca_core_clock_offset =
1928 				offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1929 		}
1930 		resp.response_length += sizeof(resp.hca_core_clock_offset);
1931 	}
1932 
1933 	if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1934 		resp.response_length += sizeof(resp.log_uar_size);
1935 
1936 	if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1937 		resp.response_length += sizeof(resp.num_uars_per_page);
1938 
1939 	if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1940 		resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1941 		resp.response_length += sizeof(resp.num_dyn_bfregs);
1942 	}
1943 
1944 	if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
1945 		if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1946 			resp.dump_fill_mkey = dump_fill_mkey;
1947 			resp.comp_mask |=
1948 				MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1949 		}
1950 		resp.response_length += sizeof(resp.dump_fill_mkey);
1951 	}
1952 
1953 	err = ib_copy_to_udata(udata, &resp, resp.response_length);
1954 	if (err)
1955 		goto out_mdev;
1956 
1957 	bfregi->ver = ver;
1958 	bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1959 	context->cqe_version = resp.cqe_version;
1960 	context->lib_caps = req.lib_caps;
1961 	print_lib_caps(dev, context->lib_caps);
1962 
1963 	if (dev->lag_active) {
1964 		u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
1965 
1966 		atomic_set(&context->tx_port_affinity,
1967 			   atomic_add_return(
1968 				   1, &dev->port[port].roce.tx_port_affinity));
1969 	}
1970 
1971 	return 0;
1972 
1973 out_mdev:
1974 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1975 out_devx:
1976 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1977 		mlx5_ib_devx_destroy(dev, context->devx_uid);
1978 
1979 out_uars:
1980 	deallocate_uars(dev, context);
1981 
1982 out_sys_pages:
1983 	kfree(bfregi->sys_pages);
1984 
1985 out_count:
1986 	kfree(bfregi->count);
1987 
1988 out_ctx:
1989 	return err;
1990 }
1991 
1992 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1993 {
1994 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1995 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1996 	struct mlx5_bfreg_info *bfregi;
1997 
1998 	/* All umem's must be destroyed before destroying the ucontext. */
1999 	mutex_lock(&ibcontext->per_mm_list_lock);
2000 	WARN_ON(!list_empty(&ibcontext->per_mm_list));
2001 	mutex_unlock(&ibcontext->per_mm_list_lock);
2002 
2003 	bfregi = &context->bfregi;
2004 	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2005 
2006 	if (context->devx_uid)
2007 		mlx5_ib_devx_destroy(dev, context->devx_uid);
2008 
2009 	deallocate_uars(dev, context);
2010 	kfree(bfregi->sys_pages);
2011 	kfree(bfregi->count);
2012 }
2013 
2014 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2015 				 int uar_idx)
2016 {
2017 	int fw_uars_per_page;
2018 
2019 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2020 
2021 	return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2022 }
2023 
2024 static int get_command(unsigned long offset)
2025 {
2026 	return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2027 }
2028 
2029 static int get_arg(unsigned long offset)
2030 {
2031 	return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2032 }
2033 
2034 static int get_index(unsigned long offset)
2035 {
2036 	return get_arg(offset);
2037 }
2038 
2039 /* Index resides in an extra byte to enable larger values than 255 */
2040 static int get_extended_index(unsigned long offset)
2041 {
2042 	return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2043 }
2044 
2045 
2046 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2047 {
2048 }
2049 
2050 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2051 {
2052 	switch (cmd) {
2053 	case MLX5_IB_MMAP_WC_PAGE:
2054 		return "WC";
2055 	case MLX5_IB_MMAP_REGULAR_PAGE:
2056 		return "best effort WC";
2057 	case MLX5_IB_MMAP_NC_PAGE:
2058 		return "NC";
2059 	case MLX5_IB_MMAP_DEVICE_MEM:
2060 		return "Device Memory";
2061 	default:
2062 		return NULL;
2063 	}
2064 }
2065 
2066 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2067 					struct vm_area_struct *vma,
2068 					struct mlx5_ib_ucontext *context)
2069 {
2070 	if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2071 	    !(vma->vm_flags & VM_SHARED))
2072 		return -EINVAL;
2073 
2074 	if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2075 		return -EOPNOTSUPP;
2076 
2077 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
2078 		return -EPERM;
2079 	vma->vm_flags &= ~VM_MAYWRITE;
2080 
2081 	if (!dev->mdev->clock_info)
2082 		return -EOPNOTSUPP;
2083 
2084 	return vm_insert_page(vma, vma->vm_start,
2085 			      virt_to_page(dev->mdev->clock_info));
2086 }
2087 
2088 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2089 		    struct vm_area_struct *vma,
2090 		    struct mlx5_ib_ucontext *context)
2091 {
2092 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
2093 	int err;
2094 	unsigned long idx;
2095 	phys_addr_t pfn;
2096 	pgprot_t prot;
2097 	u32 bfreg_dyn_idx = 0;
2098 	u32 uar_index;
2099 	int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2100 	int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2101 				bfregi->num_static_sys_pages;
2102 
2103 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2104 		return -EINVAL;
2105 
2106 	if (dyn_uar)
2107 		idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2108 	else
2109 		idx = get_index(vma->vm_pgoff);
2110 
2111 	if (idx >= max_valid_idx) {
2112 		mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2113 			     idx, max_valid_idx);
2114 		return -EINVAL;
2115 	}
2116 
2117 	switch (cmd) {
2118 	case MLX5_IB_MMAP_WC_PAGE:
2119 	case MLX5_IB_MMAP_ALLOC_WC:
2120 /* Some architectures don't support WC memory */
2121 #if defined(CONFIG_X86)
2122 		if (!pat_enabled())
2123 			return -EPERM;
2124 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
2125 			return -EPERM;
2126 #endif
2127 	/* fall through */
2128 	case MLX5_IB_MMAP_REGULAR_PAGE:
2129 		/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2130 		prot = pgprot_writecombine(vma->vm_page_prot);
2131 		break;
2132 	case MLX5_IB_MMAP_NC_PAGE:
2133 		prot = pgprot_noncached(vma->vm_page_prot);
2134 		break;
2135 	default:
2136 		return -EINVAL;
2137 	}
2138 
2139 	if (dyn_uar) {
2140 		int uars_per_page;
2141 
2142 		uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2143 		bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2144 		if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2145 			mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2146 				     bfreg_dyn_idx, bfregi->total_num_bfregs);
2147 			return -EINVAL;
2148 		}
2149 
2150 		mutex_lock(&bfregi->lock);
2151 		/* Fail if uar already allocated, first bfreg index of each
2152 		 * page holds its count.
2153 		 */
2154 		if (bfregi->count[bfreg_dyn_idx]) {
2155 			mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2156 			mutex_unlock(&bfregi->lock);
2157 			return -EINVAL;
2158 		}
2159 
2160 		bfregi->count[bfreg_dyn_idx]++;
2161 		mutex_unlock(&bfregi->lock);
2162 
2163 		err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
2164 		if (err) {
2165 			mlx5_ib_warn(dev, "UAR alloc failed\n");
2166 			goto free_bfreg;
2167 		}
2168 	} else {
2169 		uar_index = bfregi->sys_pages[idx];
2170 	}
2171 
2172 	pfn = uar_index2pfn(dev, uar_index);
2173 	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2174 
2175 	err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2176 				prot);
2177 	if (err) {
2178 		mlx5_ib_err(dev,
2179 			    "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2180 			    err, mmap_cmd2str(cmd));
2181 		goto err;
2182 	}
2183 
2184 	if (dyn_uar)
2185 		bfregi->sys_pages[idx] = uar_index;
2186 	return 0;
2187 
2188 err:
2189 	if (!dyn_uar)
2190 		return err;
2191 
2192 	mlx5_cmd_free_uar(dev->mdev, idx);
2193 
2194 free_bfreg:
2195 	mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2196 
2197 	return err;
2198 }
2199 
2200 static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2201 {
2202 	struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2203 	struct mlx5_ib_dev *dev = to_mdev(context->device);
2204 	u16 page_idx = get_extended_index(vma->vm_pgoff);
2205 	size_t map_size = vma->vm_end - vma->vm_start;
2206 	u32 npages = map_size >> PAGE_SHIFT;
2207 	phys_addr_t pfn;
2208 
2209 	if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2210 	    page_idx + npages)
2211 		return -EINVAL;
2212 
2213 	pfn = ((dev->mdev->bar_addr +
2214 	      MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2215 	      PAGE_SHIFT) +
2216 	      page_idx;
2217 	return rdma_user_mmap_io(context, vma, pfn, map_size,
2218 				 pgprot_writecombine(vma->vm_page_prot));
2219 }
2220 
2221 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2222 {
2223 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2224 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2225 	unsigned long command;
2226 	phys_addr_t pfn;
2227 
2228 	command = get_command(vma->vm_pgoff);
2229 	switch (command) {
2230 	case MLX5_IB_MMAP_WC_PAGE:
2231 	case MLX5_IB_MMAP_NC_PAGE:
2232 	case MLX5_IB_MMAP_REGULAR_PAGE:
2233 	case MLX5_IB_MMAP_ALLOC_WC:
2234 		return uar_mmap(dev, command, vma, context);
2235 
2236 	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2237 		return -ENOSYS;
2238 
2239 	case MLX5_IB_MMAP_CORE_CLOCK:
2240 		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2241 			return -EINVAL;
2242 
2243 		if (vma->vm_flags & VM_WRITE)
2244 			return -EPERM;
2245 		vma->vm_flags &= ~VM_MAYWRITE;
2246 
2247 		/* Don't expose to user-space information it shouldn't have */
2248 		if (PAGE_SIZE > 4096)
2249 			return -EOPNOTSUPP;
2250 
2251 		pfn = (dev->mdev->iseg_base +
2252 		       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2253 			PAGE_SHIFT;
2254 		return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2255 					 PAGE_SIZE,
2256 					 pgprot_noncached(vma->vm_page_prot));
2257 	case MLX5_IB_MMAP_CLOCK_INFO:
2258 		return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2259 
2260 	case MLX5_IB_MMAP_DEVICE_MEM:
2261 		return dm_mmap(ibcontext, vma);
2262 
2263 	default:
2264 		return -EINVAL;
2265 	}
2266 
2267 	return 0;
2268 }
2269 
2270 static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
2271 					u32 type)
2272 {
2273 	switch (type) {
2274 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2275 		if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
2276 			return -EOPNOTSUPP;
2277 		break;
2278 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2279 		if (!capable(CAP_SYS_RAWIO) ||
2280 		    !capable(CAP_NET_RAW))
2281 			return -EPERM;
2282 
2283 		if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
2284 		      MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
2285 			return -EOPNOTSUPP;
2286 		break;
2287 	}
2288 
2289 	return 0;
2290 }
2291 
2292 static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
2293 				 struct mlx5_ib_dm *dm,
2294 				 struct ib_dm_alloc_attr *attr,
2295 				 struct uverbs_attr_bundle *attrs)
2296 {
2297 	struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
2298 	u64 start_offset;
2299 	u32 page_idx;
2300 	int err;
2301 
2302 	dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
2303 
2304 	err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
2305 				   dm->size, attr->alignment);
2306 	if (err)
2307 		return err;
2308 
2309 	page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) -
2310 		    MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >>
2311 		    PAGE_SHIFT;
2312 
2313 	err = uverbs_copy_to(attrs,
2314 			     MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
2315 			     &page_idx, sizeof(page_idx));
2316 	if (err)
2317 		goto err_dealloc;
2318 
2319 	start_offset = dm->dev_addr & ~PAGE_MASK;
2320 	err = uverbs_copy_to(attrs,
2321 			     MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2322 			     &start_offset, sizeof(start_offset));
2323 	if (err)
2324 		goto err_dealloc;
2325 
2326 	bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
2327 		   DIV_ROUND_UP(dm->size, PAGE_SIZE));
2328 
2329 	return 0;
2330 
2331 err_dealloc:
2332 	mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2333 
2334 	return err;
2335 }
2336 
2337 static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
2338 				  struct mlx5_ib_dm *dm,
2339 				  struct ib_dm_alloc_attr *attr,
2340 				  struct uverbs_attr_bundle *attrs,
2341 				  int type)
2342 {
2343 	struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
2344 	u64 act_size;
2345 	int err;
2346 
2347 	/* Allocation size must a multiple of the basic block size
2348 	 * and a power of 2.
2349 	 */
2350 	act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
2351 	act_size = roundup_pow_of_two(act_size);
2352 
2353 	dm->size = act_size;
2354 	err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size,
2355 				    to_mucontext(ctx)->devx_uid, &dm->dev_addr,
2356 				    &dm->icm_dm.obj_id);
2357 	if (err)
2358 		return err;
2359 
2360 	err = uverbs_copy_to(attrs,
2361 			     MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2362 			     &dm->dev_addr, sizeof(dm->dev_addr));
2363 	if (err)
2364 		mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size,
2365 					to_mucontext(ctx)->devx_uid,
2366 					dm->dev_addr, dm->icm_dm.obj_id);
2367 
2368 	return err;
2369 }
2370 
2371 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2372 			       struct ib_ucontext *context,
2373 			       struct ib_dm_alloc_attr *attr,
2374 			       struct uverbs_attr_bundle *attrs)
2375 {
2376 	struct mlx5_ib_dm *dm;
2377 	enum mlx5_ib_uapi_dm_type type;
2378 	int err;
2379 
2380 	err = uverbs_get_const_default(&type, attrs,
2381 				       MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
2382 				       MLX5_IB_UAPI_DM_TYPE_MEMIC);
2383 	if (err)
2384 		return ERR_PTR(err);
2385 
2386 	mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
2387 		    type, attr->length, attr->alignment);
2388 
2389 	err = check_dm_type_support(to_mdev(ibdev), type);
2390 	if (err)
2391 		return ERR_PTR(err);
2392 
2393 	dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2394 	if (!dm)
2395 		return ERR_PTR(-ENOMEM);
2396 
2397 	dm->type = type;
2398 
2399 	switch (type) {
2400 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2401 		err = handle_alloc_dm_memic(context, dm,
2402 					    attr,
2403 					    attrs);
2404 		break;
2405 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2406 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2407 		err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type);
2408 		break;
2409 	default:
2410 		err = -EOPNOTSUPP;
2411 	}
2412 
2413 	if (err)
2414 		goto err_free;
2415 
2416 	return &dm->ibdm;
2417 
2418 err_free:
2419 	kfree(dm);
2420 	return ERR_PTR(err);
2421 }
2422 
2423 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
2424 {
2425 	struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
2426 		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2427 	struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
2428 	struct mlx5_ib_dm *dm = to_mdm(ibdm);
2429 	u32 page_idx;
2430 	int ret;
2431 
2432 	switch (dm->type) {
2433 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2434 		ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2435 		if (ret)
2436 			return ret;
2437 
2438 		page_idx = (dm->dev_addr -
2439 			    pci_resource_start(dm_db->dev->pdev, 0) -
2440 			    MLX5_CAP64_DEV_MEM(dm_db->dev,
2441 					       memic_bar_start_addr)) >>
2442 			   PAGE_SHIFT;
2443 		bitmap_clear(ctx->dm_pages, page_idx,
2444 			     DIV_ROUND_UP(dm->size, PAGE_SIZE));
2445 		break;
2446 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2447 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2448 		ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size,
2449 					      ctx->devx_uid, dm->dev_addr,
2450 					      dm->icm_dm.obj_id);
2451 		if (ret)
2452 			return ret;
2453 		break;
2454 	default:
2455 		return -EOPNOTSUPP;
2456 	}
2457 
2458 	kfree(dm);
2459 
2460 	return 0;
2461 }
2462 
2463 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2464 {
2465 	struct mlx5_ib_pd *pd = to_mpd(ibpd);
2466 	struct ib_device *ibdev = ibpd->device;
2467 	struct mlx5_ib_alloc_pd_resp resp;
2468 	int err;
2469 	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2470 	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)]   = {};
2471 	u16 uid = 0;
2472 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2473 		udata, struct mlx5_ib_ucontext, ibucontext);
2474 
2475 	uid = context ? context->devx_uid : 0;
2476 	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2477 	MLX5_SET(alloc_pd_in, in, uid, uid);
2478 	err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
2479 			    out, sizeof(out));
2480 	if (err)
2481 		return err;
2482 
2483 	pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2484 	pd->uid = uid;
2485 	if (udata) {
2486 		resp.pdn = pd->pdn;
2487 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2488 			mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2489 			return -EFAULT;
2490 		}
2491 	}
2492 
2493 	return 0;
2494 }
2495 
2496 static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
2497 {
2498 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2499 	struct mlx5_ib_pd *mpd = to_mpd(pd);
2500 
2501 	mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2502 }
2503 
2504 enum {
2505 	MATCH_CRITERIA_ENABLE_OUTER_BIT,
2506 	MATCH_CRITERIA_ENABLE_MISC_BIT,
2507 	MATCH_CRITERIA_ENABLE_INNER_BIT,
2508 	MATCH_CRITERIA_ENABLE_MISC2_BIT
2509 };
2510 
2511 #define HEADER_IS_ZERO(match_criteria, headers)			           \
2512 	!(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2513 		    0, MLX5_FLD_SZ_BYTES(fte_match_param, headers)))       \
2514 
2515 static u8 get_match_criteria_enable(u32 *match_criteria)
2516 {
2517 	u8 match_criteria_enable;
2518 
2519 	match_criteria_enable =
2520 		(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2521 		MATCH_CRITERIA_ENABLE_OUTER_BIT;
2522 	match_criteria_enable |=
2523 		(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2524 		MATCH_CRITERIA_ENABLE_MISC_BIT;
2525 	match_criteria_enable |=
2526 		(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2527 		MATCH_CRITERIA_ENABLE_INNER_BIT;
2528 	match_criteria_enable |=
2529 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2530 		MATCH_CRITERIA_ENABLE_MISC2_BIT;
2531 
2532 	return match_criteria_enable;
2533 }
2534 
2535 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2536 {
2537 	u8 entry_mask;
2538 	u8 entry_val;
2539 	int err = 0;
2540 
2541 	if (!mask)
2542 		goto out;
2543 
2544 	entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
2545 			      ip_protocol);
2546 	entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
2547 			     ip_protocol);
2548 	if (!entry_mask) {
2549 		MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
2550 		MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2551 		goto out;
2552 	}
2553 	/* Don't override existing ip protocol */
2554 	if (mask != entry_mask || val != entry_val)
2555 		err = -EINVAL;
2556 out:
2557 	return err;
2558 }
2559 
2560 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2561 			   bool inner)
2562 {
2563 	if (inner) {
2564 		MLX5_SET(fte_match_set_misc,
2565 			 misc_c, inner_ipv6_flow_label, mask);
2566 		MLX5_SET(fte_match_set_misc,
2567 			 misc_v, inner_ipv6_flow_label, val);
2568 	} else {
2569 		MLX5_SET(fte_match_set_misc,
2570 			 misc_c, outer_ipv6_flow_label, mask);
2571 		MLX5_SET(fte_match_set_misc,
2572 			 misc_v, outer_ipv6_flow_label, val);
2573 	}
2574 }
2575 
2576 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2577 {
2578 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
2579 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
2580 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
2581 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
2582 }
2583 
2584 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
2585 {
2586 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
2587 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
2588 		return -EOPNOTSUPP;
2589 
2590 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
2591 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
2592 		return -EOPNOTSUPP;
2593 
2594 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
2595 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
2596 		return -EOPNOTSUPP;
2597 
2598 	if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
2599 	    !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
2600 		return -EOPNOTSUPP;
2601 
2602 	return 0;
2603 }
2604 
2605 #define LAST_ETH_FIELD vlan_tag
2606 #define LAST_IB_FIELD sl
2607 #define LAST_IPV4_FIELD tos
2608 #define LAST_IPV6_FIELD traffic_class
2609 #define LAST_TCP_UDP_FIELD src_port
2610 #define LAST_TUNNEL_FIELD tunnel_id
2611 #define LAST_FLOW_TAG_FIELD tag_id
2612 #define LAST_DROP_FIELD size
2613 #define LAST_COUNTERS_FIELD counters
2614 
2615 /* Field is the last supported field */
2616 #define FIELDS_NOT_SUPPORTED(filter, field)\
2617 	memchr_inv((void *)&filter.field  +\
2618 		   sizeof(filter.field), 0,\
2619 		   sizeof(filter) -\
2620 		   offsetof(typeof(filter), field) -\
2621 		   sizeof(filter.field))
2622 
2623 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
2624 			   bool is_egress,
2625 			   struct mlx5_flow_act *action)
2626 {
2627 
2628 	switch (maction->ib_action.type) {
2629 	case IB_FLOW_ACTION_ESP:
2630 		if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2631 				      MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
2632 			return -EINVAL;
2633 		/* Currently only AES_GCM keymat is supported by the driver */
2634 		action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2635 		action->action |= is_egress ?
2636 			MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2637 			MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2638 		return 0;
2639 	case IB_FLOW_ACTION_UNSPECIFIED:
2640 		if (maction->flow_action_raw.sub_type ==
2641 		    MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
2642 			if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2643 				return -EINVAL;
2644 			action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2645 			action->modify_id = maction->flow_action_raw.action_id;
2646 			return 0;
2647 		}
2648 		if (maction->flow_action_raw.sub_type ==
2649 		    MLX5_IB_FLOW_ACTION_DECAP) {
2650 			if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2651 				return -EINVAL;
2652 			action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2653 			return 0;
2654 		}
2655 		if (maction->flow_action_raw.sub_type ==
2656 		    MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
2657 			if (action->action &
2658 			    MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
2659 				return -EINVAL;
2660 			action->action |=
2661 				MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2662 			action->reformat_id =
2663 				maction->flow_action_raw.action_id;
2664 			return 0;
2665 		}
2666 		/* fall through */
2667 	default:
2668 		return -EOPNOTSUPP;
2669 	}
2670 }
2671 
2672 static int parse_flow_attr(struct mlx5_core_dev *mdev,
2673 			   struct mlx5_flow_spec *spec,
2674 			   const union ib_flow_spec *ib_spec,
2675 			   const struct ib_flow_attr *flow_attr,
2676 			   struct mlx5_flow_act *action, u32 prev_type)
2677 {
2678 	struct mlx5_flow_context *flow_context = &spec->flow_context;
2679 	u32 *match_c = spec->match_criteria;
2680 	u32 *match_v = spec->match_value;
2681 	void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2682 					   misc_parameters);
2683 	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2684 					   misc_parameters);
2685 	void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
2686 					    misc_parameters_2);
2687 	void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
2688 					    misc_parameters_2);
2689 	void *headers_c;
2690 	void *headers_v;
2691 	int match_ipv;
2692 	int ret;
2693 
2694 	if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2695 		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2696 					 inner_headers);
2697 		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2698 					 inner_headers);
2699 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2700 					ft_field_support.inner_ip_version);
2701 	} else {
2702 		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2703 					 outer_headers);
2704 		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2705 					 outer_headers);
2706 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2707 					ft_field_support.outer_ip_version);
2708 	}
2709 
2710 	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2711 	case IB_FLOW_SPEC_ETH:
2712 		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
2713 			return -EOPNOTSUPP;
2714 
2715 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2716 					     dmac_47_16),
2717 				ib_spec->eth.mask.dst_mac);
2718 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2719 					     dmac_47_16),
2720 				ib_spec->eth.val.dst_mac);
2721 
2722 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2723 					     smac_47_16),
2724 				ib_spec->eth.mask.src_mac);
2725 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2726 					     smac_47_16),
2727 				ib_spec->eth.val.src_mac);
2728 
2729 		if (ib_spec->eth.mask.vlan_tag) {
2730 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2731 				 cvlan_tag, 1);
2732 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2733 				 cvlan_tag, 1);
2734 
2735 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2736 				 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
2737 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2738 				 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2739 
2740 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2741 				 first_cfi,
2742 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
2743 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2744 				 first_cfi,
2745 				 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2746 
2747 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2748 				 first_prio,
2749 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
2750 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2751 				 first_prio,
2752 				 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2753 		}
2754 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2755 			 ethertype, ntohs(ib_spec->eth.mask.ether_type));
2756 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2757 			 ethertype, ntohs(ib_spec->eth.val.ether_type));
2758 		break;
2759 	case IB_FLOW_SPEC_IPV4:
2760 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
2761 			return -EOPNOTSUPP;
2762 
2763 		if (match_ipv) {
2764 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2765 				 ip_version, 0xf);
2766 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2767 				 ip_version, MLX5_FS_IPV4_VERSION);
2768 		} else {
2769 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2770 				 ethertype, 0xffff);
2771 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2772 				 ethertype, ETH_P_IP);
2773 		}
2774 
2775 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2776 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2777 		       &ib_spec->ipv4.mask.src_ip,
2778 		       sizeof(ib_spec->ipv4.mask.src_ip));
2779 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2780 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2781 		       &ib_spec->ipv4.val.src_ip,
2782 		       sizeof(ib_spec->ipv4.val.src_ip));
2783 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2784 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2785 		       &ib_spec->ipv4.mask.dst_ip,
2786 		       sizeof(ib_spec->ipv4.mask.dst_ip));
2787 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2788 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2789 		       &ib_spec->ipv4.val.dst_ip,
2790 		       sizeof(ib_spec->ipv4.val.dst_ip));
2791 
2792 		set_tos(headers_c, headers_v,
2793 			ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2794 
2795 		if (set_proto(headers_c, headers_v,
2796 			      ib_spec->ipv4.mask.proto,
2797 			      ib_spec->ipv4.val.proto))
2798 			return -EINVAL;
2799 		break;
2800 	case IB_FLOW_SPEC_IPV6:
2801 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
2802 			return -EOPNOTSUPP;
2803 
2804 		if (match_ipv) {
2805 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2806 				 ip_version, 0xf);
2807 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2808 				 ip_version, MLX5_FS_IPV6_VERSION);
2809 		} else {
2810 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2811 				 ethertype, 0xffff);
2812 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2813 				 ethertype, ETH_P_IPV6);
2814 		}
2815 
2816 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2817 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2818 		       &ib_spec->ipv6.mask.src_ip,
2819 		       sizeof(ib_spec->ipv6.mask.src_ip));
2820 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2821 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2822 		       &ib_spec->ipv6.val.src_ip,
2823 		       sizeof(ib_spec->ipv6.val.src_ip));
2824 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2825 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2826 		       &ib_spec->ipv6.mask.dst_ip,
2827 		       sizeof(ib_spec->ipv6.mask.dst_ip));
2828 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2829 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2830 		       &ib_spec->ipv6.val.dst_ip,
2831 		       sizeof(ib_spec->ipv6.val.dst_ip));
2832 
2833 		set_tos(headers_c, headers_v,
2834 			ib_spec->ipv6.mask.traffic_class,
2835 			ib_spec->ipv6.val.traffic_class);
2836 
2837 		if (set_proto(headers_c, headers_v,
2838 			      ib_spec->ipv6.mask.next_hdr,
2839 			      ib_spec->ipv6.val.next_hdr))
2840 			return -EINVAL;
2841 
2842 		set_flow_label(misc_params_c, misc_params_v,
2843 			       ntohl(ib_spec->ipv6.mask.flow_label),
2844 			       ntohl(ib_spec->ipv6.val.flow_label),
2845 			       ib_spec->type & IB_FLOW_SPEC_INNER);
2846 		break;
2847 	case IB_FLOW_SPEC_ESP:
2848 		if (ib_spec->esp.mask.seq)
2849 			return -EOPNOTSUPP;
2850 
2851 		MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
2852 			 ntohl(ib_spec->esp.mask.spi));
2853 		MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
2854 			 ntohl(ib_spec->esp.val.spi));
2855 		break;
2856 	case IB_FLOW_SPEC_TCP:
2857 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2858 					 LAST_TCP_UDP_FIELD))
2859 			return -EOPNOTSUPP;
2860 
2861 		if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
2862 			return -EINVAL;
2863 
2864 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
2865 			 ntohs(ib_spec->tcp_udp.mask.src_port));
2866 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2867 			 ntohs(ib_spec->tcp_udp.val.src_port));
2868 
2869 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
2870 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
2871 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2872 			 ntohs(ib_spec->tcp_udp.val.dst_port));
2873 		break;
2874 	case IB_FLOW_SPEC_UDP:
2875 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2876 					 LAST_TCP_UDP_FIELD))
2877 			return -EOPNOTSUPP;
2878 
2879 		if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
2880 			return -EINVAL;
2881 
2882 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
2883 			 ntohs(ib_spec->tcp_udp.mask.src_port));
2884 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2885 			 ntohs(ib_spec->tcp_udp.val.src_port));
2886 
2887 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
2888 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
2889 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2890 			 ntohs(ib_spec->tcp_udp.val.dst_port));
2891 		break;
2892 	case IB_FLOW_SPEC_GRE:
2893 		if (ib_spec->gre.mask.c_ks_res0_ver)
2894 			return -EOPNOTSUPP;
2895 
2896 		if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
2897 			return -EINVAL;
2898 
2899 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2900 			 0xff);
2901 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2902 			 IPPROTO_GRE);
2903 
2904 		MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
2905 			 ntohs(ib_spec->gre.mask.protocol));
2906 		MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
2907 			 ntohs(ib_spec->gre.val.protocol));
2908 
2909 		memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
2910 				    gre_key.nvgre.hi),
2911 		       &ib_spec->gre.mask.key,
2912 		       sizeof(ib_spec->gre.mask.key));
2913 		memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
2914 				    gre_key.nvgre.hi),
2915 		       &ib_spec->gre.val.key,
2916 		       sizeof(ib_spec->gre.val.key));
2917 		break;
2918 	case IB_FLOW_SPEC_MPLS:
2919 		switch (prev_type) {
2920 		case IB_FLOW_SPEC_UDP:
2921 			if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2922 						   ft_field_support.outer_first_mpls_over_udp),
2923 						   &ib_spec->mpls.mask.tag))
2924 				return -EOPNOTSUPP;
2925 
2926 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2927 					    outer_first_mpls_over_udp),
2928 			       &ib_spec->mpls.val.tag,
2929 			       sizeof(ib_spec->mpls.val.tag));
2930 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2931 					    outer_first_mpls_over_udp),
2932 			       &ib_spec->mpls.mask.tag,
2933 			       sizeof(ib_spec->mpls.mask.tag));
2934 			break;
2935 		case IB_FLOW_SPEC_GRE:
2936 			if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2937 						   ft_field_support.outer_first_mpls_over_gre),
2938 						   &ib_spec->mpls.mask.tag))
2939 				return -EOPNOTSUPP;
2940 
2941 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2942 					    outer_first_mpls_over_gre),
2943 			       &ib_spec->mpls.val.tag,
2944 			       sizeof(ib_spec->mpls.val.tag));
2945 			memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2946 					    outer_first_mpls_over_gre),
2947 			       &ib_spec->mpls.mask.tag,
2948 			       sizeof(ib_spec->mpls.mask.tag));
2949 			break;
2950 		default:
2951 			if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2952 				if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2953 							   ft_field_support.inner_first_mpls),
2954 							   &ib_spec->mpls.mask.tag))
2955 					return -EOPNOTSUPP;
2956 
2957 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2958 						    inner_first_mpls),
2959 				       &ib_spec->mpls.val.tag,
2960 				       sizeof(ib_spec->mpls.val.tag));
2961 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2962 						    inner_first_mpls),
2963 				       &ib_spec->mpls.mask.tag,
2964 				       sizeof(ib_spec->mpls.mask.tag));
2965 			} else {
2966 				if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2967 							   ft_field_support.outer_first_mpls),
2968 							   &ib_spec->mpls.mask.tag))
2969 					return -EOPNOTSUPP;
2970 
2971 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2972 						    outer_first_mpls),
2973 				       &ib_spec->mpls.val.tag,
2974 				       sizeof(ib_spec->mpls.val.tag));
2975 				memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2976 						    outer_first_mpls),
2977 				       &ib_spec->mpls.mask.tag,
2978 				       sizeof(ib_spec->mpls.mask.tag));
2979 			}
2980 		}
2981 		break;
2982 	case IB_FLOW_SPEC_VXLAN_TUNNEL:
2983 		if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
2984 					 LAST_TUNNEL_FIELD))
2985 			return -EOPNOTSUPP;
2986 
2987 		MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
2988 			 ntohl(ib_spec->tunnel.mask.tunnel_id));
2989 		MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
2990 			 ntohl(ib_spec->tunnel.val.tunnel_id));
2991 		break;
2992 	case IB_FLOW_SPEC_ACTION_TAG:
2993 		if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2994 					 LAST_FLOW_TAG_FIELD))
2995 			return -EOPNOTSUPP;
2996 		if (ib_spec->flow_tag.tag_id >= BIT(24))
2997 			return -EINVAL;
2998 
2999 		flow_context->flow_tag = ib_spec->flow_tag.tag_id;
3000 		flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
3001 		break;
3002 	case IB_FLOW_SPEC_ACTION_DROP:
3003 		if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
3004 					 LAST_DROP_FIELD))
3005 			return -EOPNOTSUPP;
3006 		action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3007 		break;
3008 	case IB_FLOW_SPEC_ACTION_HANDLE:
3009 		ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
3010 			flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
3011 		if (ret)
3012 			return ret;
3013 		break;
3014 	case IB_FLOW_SPEC_ACTION_COUNT:
3015 		if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
3016 					 LAST_COUNTERS_FIELD))
3017 			return -EOPNOTSUPP;
3018 
3019 		/* for now support only one counters spec per flow */
3020 		if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
3021 			return -EINVAL;
3022 
3023 		action->counters = ib_spec->flow_count.counters;
3024 		action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3025 		break;
3026 	default:
3027 		return -EINVAL;
3028 	}
3029 
3030 	return 0;
3031 }
3032 
3033 /* If a flow could catch both multicast and unicast packets,
3034  * it won't fall into the multicast flow steering table and this rule
3035  * could steal other multicast packets.
3036  */
3037 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
3038 {
3039 	union ib_flow_spec *flow_spec;
3040 
3041 	if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
3042 	    ib_attr->num_of_specs < 1)
3043 		return false;
3044 
3045 	flow_spec = (union ib_flow_spec *)(ib_attr + 1);
3046 	if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
3047 		struct ib_flow_spec_ipv4 *ipv4_spec;
3048 
3049 		ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
3050 		if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
3051 			return true;
3052 
3053 		return false;
3054 	}
3055 
3056 	if (flow_spec->type == IB_FLOW_SPEC_ETH) {
3057 		struct ib_flow_spec_eth *eth_spec;
3058 
3059 		eth_spec = (struct ib_flow_spec_eth *)flow_spec;
3060 		return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
3061 		       is_multicast_ether_addr(eth_spec->val.dst_mac);
3062 	}
3063 
3064 	return false;
3065 }
3066 
3067 enum valid_spec {
3068 	VALID_SPEC_INVALID,
3069 	VALID_SPEC_VALID,
3070 	VALID_SPEC_NA,
3071 };
3072 
3073 static enum valid_spec
3074 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
3075 		     const struct mlx5_flow_spec *spec,
3076 		     const struct mlx5_flow_act *flow_act,
3077 		     bool egress)
3078 {
3079 	const u32 *match_c = spec->match_criteria;
3080 	bool is_crypto =
3081 		(flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
3082 				     MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
3083 	bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
3084 	bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
3085 
3086 	/*
3087 	 * Currently only crypto is supported in egress, when regular egress
3088 	 * rules would be supported, always return VALID_SPEC_NA.
3089 	 */
3090 	if (!is_crypto)
3091 		return VALID_SPEC_NA;
3092 
3093 	return is_crypto && is_ipsec &&
3094 		(!egress || (!is_drop &&
3095 			     !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
3096 		VALID_SPEC_VALID : VALID_SPEC_INVALID;
3097 }
3098 
3099 static bool is_valid_spec(struct mlx5_core_dev *mdev,
3100 			  const struct mlx5_flow_spec *spec,
3101 			  const struct mlx5_flow_act *flow_act,
3102 			  bool egress)
3103 {
3104 	/* We curretly only support ipsec egress flow */
3105 	return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
3106 }
3107 
3108 static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
3109 			       const struct ib_flow_attr *flow_attr,
3110 			       bool check_inner)
3111 {
3112 	union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
3113 	int match_ipv = check_inner ?
3114 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3115 					ft_field_support.inner_ip_version) :
3116 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3117 					ft_field_support.outer_ip_version);
3118 	int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
3119 	bool ipv4_spec_valid, ipv6_spec_valid;
3120 	unsigned int ip_spec_type = 0;
3121 	bool has_ethertype = false;
3122 	unsigned int spec_index;
3123 	bool mask_valid = true;
3124 	u16 eth_type = 0;
3125 	bool type_valid;
3126 
3127 	/* Validate that ethertype is correct */
3128 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3129 		if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
3130 		    ib_spec->eth.mask.ether_type) {
3131 			mask_valid = (ib_spec->eth.mask.ether_type ==
3132 				      htons(0xffff));
3133 			has_ethertype = true;
3134 			eth_type = ntohs(ib_spec->eth.val.ether_type);
3135 		} else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
3136 			   (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
3137 			ip_spec_type = ib_spec->type;
3138 		}
3139 		ib_spec = (void *)ib_spec + ib_spec->size;
3140 	}
3141 
3142 	type_valid = (!has_ethertype) || (!ip_spec_type);
3143 	if (!type_valid && mask_valid) {
3144 		ipv4_spec_valid = (eth_type == ETH_P_IP) &&
3145 			(ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
3146 		ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
3147 			(ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
3148 
3149 		type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
3150 			     (((eth_type == ETH_P_MPLS_UC) ||
3151 			       (eth_type == ETH_P_MPLS_MC)) && match_ipv);
3152 	}
3153 
3154 	return type_valid;
3155 }
3156 
3157 static bool is_valid_attr(struct mlx5_core_dev *mdev,
3158 			  const struct ib_flow_attr *flow_attr)
3159 {
3160 	return is_valid_ethertype(mdev, flow_attr, false) &&
3161 	       is_valid_ethertype(mdev, flow_attr, true);
3162 }
3163 
3164 static void put_flow_table(struct mlx5_ib_dev *dev,
3165 			   struct mlx5_ib_flow_prio *prio, bool ft_added)
3166 {
3167 	prio->refcount -= !!ft_added;
3168 	if (!prio->refcount) {
3169 		mlx5_destroy_flow_table(prio->flow_table);
3170 		prio->flow_table = NULL;
3171 	}
3172 }
3173 
3174 static void counters_clear_description(struct ib_counters *counters)
3175 {
3176 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3177 
3178 	mutex_lock(&mcounters->mcntrs_mutex);
3179 	kfree(mcounters->counters_data);
3180 	mcounters->counters_data = NULL;
3181 	mcounters->cntrs_max_index = 0;
3182 	mutex_unlock(&mcounters->mcntrs_mutex);
3183 }
3184 
3185 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
3186 {
3187 	struct mlx5_ib_flow_handler *handler = container_of(flow_id,
3188 							  struct mlx5_ib_flow_handler,
3189 							  ibflow);
3190 	struct mlx5_ib_flow_handler *iter, *tmp;
3191 	struct mlx5_ib_dev *dev = handler->dev;
3192 
3193 	mutex_lock(&dev->flow_db->lock);
3194 
3195 	list_for_each_entry_safe(iter, tmp, &handler->list, list) {
3196 		mlx5_del_flow_rules(iter->rule);
3197 		put_flow_table(dev, iter->prio, true);
3198 		list_del(&iter->list);
3199 		kfree(iter);
3200 	}
3201 
3202 	mlx5_del_flow_rules(handler->rule);
3203 	put_flow_table(dev, handler->prio, true);
3204 	if (handler->ibcounters &&
3205 	    atomic_read(&handler->ibcounters->usecnt) == 1)
3206 		counters_clear_description(handler->ibcounters);
3207 
3208 	mutex_unlock(&dev->flow_db->lock);
3209 	if (handler->flow_matcher)
3210 		atomic_dec(&handler->flow_matcher->usecnt);
3211 	kfree(handler);
3212 
3213 	return 0;
3214 }
3215 
3216 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
3217 {
3218 	priority *= 2;
3219 	if (!dont_trap)
3220 		priority++;
3221 	return priority;
3222 }
3223 
3224 enum flow_table_type {
3225 	MLX5_IB_FT_RX,
3226 	MLX5_IB_FT_TX
3227 };
3228 
3229 #define MLX5_FS_MAX_TYPES	 6
3230 #define MLX5_FS_MAX_ENTRIES	 BIT(16)
3231 
3232 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3233 					   struct mlx5_ib_flow_prio *prio,
3234 					   int priority,
3235 					   int num_entries, int num_groups,
3236 					   u32 flags)
3237 {
3238 	struct mlx5_flow_table *ft;
3239 
3240 	ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3241 						 num_entries,
3242 						 num_groups,
3243 						 0, flags);
3244 	if (IS_ERR(ft))
3245 		return ERR_CAST(ft);
3246 
3247 	prio->flow_table = ft;
3248 	prio->refcount = 0;
3249 	return prio;
3250 }
3251 
3252 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
3253 						struct ib_flow_attr *flow_attr,
3254 						enum flow_table_type ft_type)
3255 {
3256 	bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
3257 	struct mlx5_flow_namespace *ns = NULL;
3258 	struct mlx5_ib_flow_prio *prio;
3259 	struct mlx5_flow_table *ft;
3260 	int max_table_size;
3261 	int num_entries;
3262 	int num_groups;
3263 	bool esw_encap;
3264 	u32 flags = 0;
3265 	int priority;
3266 
3267 	max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3268 						       log_max_ft_size));
3269 	esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
3270 		DEVLINK_ESWITCH_ENCAP_MODE_NONE;
3271 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3272 		enum mlx5_flow_namespace_type fn_type;
3273 
3274 		if (flow_is_multicast_only(flow_attr) &&
3275 		    !dont_trap)
3276 			priority = MLX5_IB_FLOW_MCAST_PRIO;
3277 		else
3278 			priority = ib_prio_to_core_prio(flow_attr->priority,
3279 							dont_trap);
3280 		if (ft_type == MLX5_IB_FT_RX) {
3281 			fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
3282 			prio = &dev->flow_db->prios[priority];
3283 			if (!dev->is_rep && !esw_encap &&
3284 			    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3285 				flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3286 			if (!dev->is_rep && !esw_encap &&
3287 			    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3288 					reformat_l3_tunnel_to_l2))
3289 				flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3290 		} else {
3291 			max_table_size =
3292 				BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3293 							      log_max_ft_size));
3294 			fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
3295 			prio = &dev->flow_db->egress_prios[priority];
3296 			if (!dev->is_rep && !esw_encap &&
3297 			    MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3298 				flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3299 		}
3300 		ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
3301 		num_entries = MLX5_FS_MAX_ENTRIES;
3302 		num_groups = MLX5_FS_MAX_TYPES;
3303 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3304 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3305 		ns = mlx5_get_flow_namespace(dev->mdev,
3306 					     MLX5_FLOW_NAMESPACE_LEFTOVERS);
3307 		build_leftovers_ft_param(&priority,
3308 					 &num_entries,
3309 					 &num_groups);
3310 		prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
3311 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3312 		if (!MLX5_CAP_FLOWTABLE(dev->mdev,
3313 					allow_sniffer_and_nic_rx_shared_tir))
3314 			return ERR_PTR(-ENOTSUPP);
3315 
3316 		ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
3317 					     MLX5_FLOW_NAMESPACE_SNIFFER_RX :
3318 					     MLX5_FLOW_NAMESPACE_SNIFFER_TX);
3319 
3320 		prio = &dev->flow_db->sniffer[ft_type];
3321 		priority = 0;
3322 		num_entries = 1;
3323 		num_groups = 1;
3324 	}
3325 
3326 	if (!ns)
3327 		return ERR_PTR(-ENOTSUPP);
3328 
3329 	max_table_size = min_t(int, num_entries, max_table_size);
3330 
3331 	ft = prio->flow_table;
3332 	if (!ft)
3333 		return _get_prio(ns, prio, priority, max_table_size, num_groups,
3334 				 flags);
3335 
3336 	return prio;
3337 }
3338 
3339 static void set_underlay_qp(struct mlx5_ib_dev *dev,
3340 			    struct mlx5_flow_spec *spec,
3341 			    u32 underlay_qpn)
3342 {
3343 	void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
3344 					   spec->match_criteria,
3345 					   misc_parameters);
3346 	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3347 					   misc_parameters);
3348 
3349 	if (underlay_qpn &&
3350 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3351 				      ft_field_support.bth_dst_qp)) {
3352 		MLX5_SET(fte_match_set_misc,
3353 			 misc_params_v, bth_dst_qp, underlay_qpn);
3354 		MLX5_SET(fte_match_set_misc,
3355 			 misc_params_c, bth_dst_qp, 0xffffff);
3356 	}
3357 }
3358 
3359 static int read_flow_counters(struct ib_device *ibdev,
3360 			      struct mlx5_read_counters_attr *read_attr)
3361 {
3362 	struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
3363 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3364 
3365 	return mlx5_fc_query(dev->mdev, fc,
3366 			     &read_attr->out[IB_COUNTER_PACKETS],
3367 			     &read_attr->out[IB_COUNTER_BYTES]);
3368 }
3369 
3370 /* flow counters currently expose two counters packets and bytes */
3371 #define FLOW_COUNTERS_NUM 2
3372 static int counters_set_description(struct ib_counters *counters,
3373 				    enum mlx5_ib_counters_type counters_type,
3374 				    struct mlx5_ib_flow_counters_desc *desc_data,
3375 				    u32 ncounters)
3376 {
3377 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3378 	u32 cntrs_max_index = 0;
3379 	int i;
3380 
3381 	if (counters_type != MLX5_IB_COUNTERS_FLOW)
3382 		return -EINVAL;
3383 
3384 	/* init the fields for the object */
3385 	mcounters->type = counters_type;
3386 	mcounters->read_counters = read_flow_counters;
3387 	mcounters->counters_num = FLOW_COUNTERS_NUM;
3388 	mcounters->ncounters = ncounters;
3389 	/* each counter entry have both description and index pair */
3390 	for (i = 0; i < ncounters; i++) {
3391 		if (desc_data[i].description > IB_COUNTER_BYTES)
3392 			return -EINVAL;
3393 
3394 		if (cntrs_max_index <= desc_data[i].index)
3395 			cntrs_max_index = desc_data[i].index + 1;
3396 	}
3397 
3398 	mutex_lock(&mcounters->mcntrs_mutex);
3399 	mcounters->counters_data = desc_data;
3400 	mcounters->cntrs_max_index = cntrs_max_index;
3401 	mutex_unlock(&mcounters->mcntrs_mutex);
3402 
3403 	return 0;
3404 }
3405 
3406 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3407 static int flow_counters_set_data(struct ib_counters *ibcounters,
3408 				  struct mlx5_ib_create_flow *ucmd)
3409 {
3410 	struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
3411 	struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
3412 	struct mlx5_ib_flow_counters_desc *desc_data = NULL;
3413 	bool hw_hndl = false;
3414 	int ret = 0;
3415 
3416 	if (ucmd && ucmd->ncounters_data != 0) {
3417 		cntrs_data = ucmd->data;
3418 		if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
3419 			return -EINVAL;
3420 
3421 		desc_data = kcalloc(cntrs_data->ncounters,
3422 				    sizeof(*desc_data),
3423 				    GFP_KERNEL);
3424 		if (!desc_data)
3425 			return  -ENOMEM;
3426 
3427 		if (copy_from_user(desc_data,
3428 				   u64_to_user_ptr(cntrs_data->counters_data),
3429 				   sizeof(*desc_data) * cntrs_data->ncounters)) {
3430 			ret = -EFAULT;
3431 			goto free;
3432 		}
3433 	}
3434 
3435 	if (!mcounters->hw_cntrs_hndl) {
3436 		mcounters->hw_cntrs_hndl = mlx5_fc_create(
3437 			to_mdev(ibcounters->device)->mdev, false);
3438 		if (IS_ERR(mcounters->hw_cntrs_hndl)) {
3439 			ret = PTR_ERR(mcounters->hw_cntrs_hndl);
3440 			goto free;
3441 		}
3442 		hw_hndl = true;
3443 	}
3444 
3445 	if (desc_data) {
3446 		/* counters already bound to at least one flow */
3447 		if (mcounters->cntrs_max_index) {
3448 			ret = -EINVAL;
3449 			goto free_hndl;
3450 		}
3451 
3452 		ret = counters_set_description(ibcounters,
3453 					       MLX5_IB_COUNTERS_FLOW,
3454 					       desc_data,
3455 					       cntrs_data->ncounters);
3456 		if (ret)
3457 			goto free_hndl;
3458 
3459 	} else if (!mcounters->cntrs_max_index) {
3460 		/* counters not bound yet, must have udata passed */
3461 		ret = -EINVAL;
3462 		goto free_hndl;
3463 	}
3464 
3465 	return 0;
3466 
3467 free_hndl:
3468 	if (hw_hndl) {
3469 		mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
3470 				mcounters->hw_cntrs_hndl);
3471 		mcounters->hw_cntrs_hndl = NULL;
3472 	}
3473 free:
3474 	kfree(desc_data);
3475 	return ret;
3476 }
3477 
3478 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
3479 					 struct mlx5_flow_spec *spec,
3480 					 struct mlx5_eswitch_rep *rep)
3481 {
3482 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
3483 	void *misc;
3484 
3485 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
3486 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3487 				    misc_parameters_2);
3488 
3489 		MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
3490 			 mlx5_eswitch_get_vport_metadata_for_match(esw,
3491 								   rep->vport));
3492 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3493 				    misc_parameters_2);
3494 
3495 		MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
3496 	} else {
3497 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3498 				    misc_parameters);
3499 
3500 		MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
3501 
3502 		misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3503 				    misc_parameters);
3504 
3505 		MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3506 	}
3507 }
3508 
3509 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3510 						      struct mlx5_ib_flow_prio *ft_prio,
3511 						      const struct ib_flow_attr *flow_attr,
3512 						      struct mlx5_flow_destination *dst,
3513 						      u32 underlay_qpn,
3514 						      struct mlx5_ib_create_flow *ucmd)
3515 {
3516 	struct mlx5_flow_table	*ft = ft_prio->flow_table;
3517 	struct mlx5_ib_flow_handler *handler;
3518 	struct mlx5_flow_act flow_act = {};
3519 	struct mlx5_flow_spec *spec;
3520 	struct mlx5_flow_destination dest_arr[2] = {};
3521 	struct mlx5_flow_destination *rule_dst = dest_arr;
3522 	const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
3523 	unsigned int spec_index;
3524 	u32 prev_type = 0;
3525 	int err = 0;
3526 	int dest_num = 0;
3527 	bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3528 
3529 	if (!is_valid_attr(dev->mdev, flow_attr))
3530 		return ERR_PTR(-EINVAL);
3531 
3532 	if (dev->is_rep && is_egress)
3533 		return ERR_PTR(-EINVAL);
3534 
3535 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3536 	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3537 	if (!handler || !spec) {
3538 		err = -ENOMEM;
3539 		goto free;
3540 	}
3541 
3542 	INIT_LIST_HEAD(&handler->list);
3543 	if (dst) {
3544 		memcpy(&dest_arr[0], dst, sizeof(*dst));
3545 		dest_num++;
3546 	}
3547 
3548 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3549 		err = parse_flow_attr(dev->mdev, spec,
3550 				      ib_flow, flow_attr, &flow_act,
3551 				      prev_type);
3552 		if (err < 0)
3553 			goto free;
3554 
3555 		prev_type = ((union ib_flow_spec *)ib_flow)->type;
3556 		ib_flow += ((union ib_flow_spec *)ib_flow)->size;
3557 	}
3558 
3559 	if (!flow_is_multicast_only(flow_attr))
3560 		set_underlay_qp(dev, spec, underlay_qpn);
3561 
3562 	if (dev->is_rep) {
3563 		struct mlx5_eswitch_rep *rep;
3564 
3565 		rep = dev->port[flow_attr->port - 1].rep;
3566 		if (!rep) {
3567 			err = -EINVAL;
3568 			goto free;
3569 		}
3570 
3571 		mlx5_ib_set_rule_source_port(dev, spec, rep);
3572 	}
3573 
3574 	spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
3575 
3576 	if (is_egress &&
3577 	    !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
3578 		err = -EINVAL;
3579 		goto free;
3580 	}
3581 
3582 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3583 		struct mlx5_ib_mcounters *mcounters;
3584 
3585 		err = flow_counters_set_data(flow_act.counters, ucmd);
3586 		if (err)
3587 			goto free;
3588 
3589 		mcounters = to_mcounters(flow_act.counters);
3590 		handler->ibcounters = flow_act.counters;
3591 		dest_arr[dest_num].type =
3592 			MLX5_FLOW_DESTINATION_TYPE_COUNTER;
3593 		dest_arr[dest_num].counter_id =
3594 			mlx5_fc_id(mcounters->hw_cntrs_hndl);
3595 		dest_num++;
3596 	}
3597 
3598 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3599 		if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
3600 			rule_dst = NULL;
3601 			dest_num = 0;
3602 		}
3603 	} else {
3604 		if (is_egress)
3605 			flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3606 		else
3607 			flow_act.action |=
3608 				dest_num ?  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3609 					MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
3610 	}
3611 
3612 	if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG)  &&
3613 	    (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3614 	     flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3615 		mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
3616 			     spec->flow_context.flow_tag, flow_attr->type);
3617 		err = -EINVAL;
3618 		goto free;
3619 	}
3620 	handler->rule = mlx5_add_flow_rules(ft, spec,
3621 					    &flow_act,
3622 					    rule_dst, dest_num);
3623 
3624 	if (IS_ERR(handler->rule)) {
3625 		err = PTR_ERR(handler->rule);
3626 		goto free;
3627 	}
3628 
3629 	ft_prio->refcount++;
3630 	handler->prio = ft_prio;
3631 	handler->dev = dev;
3632 
3633 	ft_prio->flow_table = ft;
3634 free:
3635 	if (err && handler) {
3636 		if (handler->ibcounters &&
3637 		    atomic_read(&handler->ibcounters->usecnt) == 1)
3638 			counters_clear_description(handler->ibcounters);
3639 		kfree(handler);
3640 	}
3641 	kvfree(spec);
3642 	return err ? ERR_PTR(err) : handler;
3643 }
3644 
3645 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3646 						     struct mlx5_ib_flow_prio *ft_prio,
3647 						     const struct ib_flow_attr *flow_attr,
3648 						     struct mlx5_flow_destination *dst)
3649 {
3650 	return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
3651 }
3652 
3653 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
3654 							  struct mlx5_ib_flow_prio *ft_prio,
3655 							  struct ib_flow_attr *flow_attr,
3656 							  struct mlx5_flow_destination *dst)
3657 {
3658 	struct mlx5_ib_flow_handler *handler_dst = NULL;
3659 	struct mlx5_ib_flow_handler *handler = NULL;
3660 
3661 	handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
3662 	if (!IS_ERR(handler)) {
3663 		handler_dst = create_flow_rule(dev, ft_prio,
3664 					       flow_attr, dst);
3665 		if (IS_ERR(handler_dst)) {
3666 			mlx5_del_flow_rules(handler->rule);
3667 			ft_prio->refcount--;
3668 			kfree(handler);
3669 			handler = handler_dst;
3670 		} else {
3671 			list_add(&handler_dst->list, &handler->list);
3672 		}
3673 	}
3674 
3675 	return handler;
3676 }
3677 enum {
3678 	LEFTOVERS_MC,
3679 	LEFTOVERS_UC,
3680 };
3681 
3682 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
3683 							  struct mlx5_ib_flow_prio *ft_prio,
3684 							  struct ib_flow_attr *flow_attr,
3685 							  struct mlx5_flow_destination *dst)
3686 {
3687 	struct mlx5_ib_flow_handler *handler_ucast = NULL;
3688 	struct mlx5_ib_flow_handler *handler = NULL;
3689 
3690 	static struct {
3691 		struct ib_flow_attr	flow_attr;
3692 		struct ib_flow_spec_eth eth_flow;
3693 	} leftovers_specs[] = {
3694 		[LEFTOVERS_MC] = {
3695 			.flow_attr = {
3696 				.num_of_specs = 1,
3697 				.size = sizeof(leftovers_specs[0])
3698 			},
3699 			.eth_flow = {
3700 				.type = IB_FLOW_SPEC_ETH,
3701 				.size = sizeof(struct ib_flow_spec_eth),
3702 				.mask = {.dst_mac = {0x1} },
3703 				.val =  {.dst_mac = {0x1} }
3704 			}
3705 		},
3706 		[LEFTOVERS_UC] = {
3707 			.flow_attr = {
3708 				.num_of_specs = 1,
3709 				.size = sizeof(leftovers_specs[0])
3710 			},
3711 			.eth_flow = {
3712 				.type = IB_FLOW_SPEC_ETH,
3713 				.size = sizeof(struct ib_flow_spec_eth),
3714 				.mask = {.dst_mac = {0x1} },
3715 				.val = {.dst_mac = {} }
3716 			}
3717 		}
3718 	};
3719 
3720 	handler = create_flow_rule(dev, ft_prio,
3721 				   &leftovers_specs[LEFTOVERS_MC].flow_attr,
3722 				   dst);
3723 	if (!IS_ERR(handler) &&
3724 	    flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
3725 		handler_ucast = create_flow_rule(dev, ft_prio,
3726 						 &leftovers_specs[LEFTOVERS_UC].flow_attr,
3727 						 dst);
3728 		if (IS_ERR(handler_ucast)) {
3729 			mlx5_del_flow_rules(handler->rule);
3730 			ft_prio->refcount--;
3731 			kfree(handler);
3732 			handler = handler_ucast;
3733 		} else {
3734 			list_add(&handler_ucast->list, &handler->list);
3735 		}
3736 	}
3737 
3738 	return handler;
3739 }
3740 
3741 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
3742 							struct mlx5_ib_flow_prio *ft_rx,
3743 							struct mlx5_ib_flow_prio *ft_tx,
3744 							struct mlx5_flow_destination *dst)
3745 {
3746 	struct mlx5_ib_flow_handler *handler_rx;
3747 	struct mlx5_ib_flow_handler *handler_tx;
3748 	int err;
3749 	static const struct ib_flow_attr flow_attr  = {
3750 		.num_of_specs = 0,
3751 		.size = sizeof(flow_attr)
3752 	};
3753 
3754 	handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
3755 	if (IS_ERR(handler_rx)) {
3756 		err = PTR_ERR(handler_rx);
3757 		goto err;
3758 	}
3759 
3760 	handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
3761 	if (IS_ERR(handler_tx)) {
3762 		err = PTR_ERR(handler_tx);
3763 		goto err_tx;
3764 	}
3765 
3766 	list_add(&handler_tx->list, &handler_rx->list);
3767 
3768 	return handler_rx;
3769 
3770 err_tx:
3771 	mlx5_del_flow_rules(handler_rx->rule);
3772 	ft_rx->refcount--;
3773 	kfree(handler_rx);
3774 err:
3775 	return ERR_PTR(err);
3776 }
3777 
3778 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3779 					   struct ib_flow_attr *flow_attr,
3780 					   int domain,
3781 					   struct ib_udata *udata)
3782 {
3783 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
3784 	struct mlx5_ib_qp *mqp = to_mqp(qp);
3785 	struct mlx5_ib_flow_handler *handler = NULL;
3786 	struct mlx5_flow_destination *dst = NULL;
3787 	struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
3788 	struct mlx5_ib_flow_prio *ft_prio;
3789 	bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3790 	struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
3791 	size_t min_ucmd_sz, required_ucmd_sz;
3792 	int err;
3793 	int underlay_qpn;
3794 
3795 	if (udata && udata->inlen) {
3796 		min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
3797 				sizeof(ucmd_hdr.reserved);
3798 		if (udata->inlen < min_ucmd_sz)
3799 			return ERR_PTR(-EOPNOTSUPP);
3800 
3801 		err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
3802 		if (err)
3803 			return ERR_PTR(err);
3804 
3805 		/* currently supports only one counters data */
3806 		if (ucmd_hdr.ncounters_data > 1)
3807 			return ERR_PTR(-EINVAL);
3808 
3809 		required_ucmd_sz = min_ucmd_sz +
3810 			sizeof(struct mlx5_ib_flow_counters_data) *
3811 			ucmd_hdr.ncounters_data;
3812 		if (udata->inlen > required_ucmd_sz &&
3813 		    !ib_is_udata_cleared(udata, required_ucmd_sz,
3814 					 udata->inlen - required_ucmd_sz))
3815 			return ERR_PTR(-EOPNOTSUPP);
3816 
3817 		ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
3818 		if (!ucmd)
3819 			return ERR_PTR(-ENOMEM);
3820 
3821 		err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
3822 		if (err)
3823 			goto free_ucmd;
3824 	}
3825 
3826 	if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
3827 		err = -ENOMEM;
3828 		goto free_ucmd;
3829 	}
3830 
3831 	if (domain != IB_FLOW_DOMAIN_USER ||
3832 	    flow_attr->port > dev->num_ports ||
3833 	    (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
3834 				  IB_FLOW_ATTR_FLAGS_EGRESS))) {
3835 		err = -EINVAL;
3836 		goto free_ucmd;
3837 	}
3838 
3839 	if (is_egress &&
3840 	    (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3841 	     flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3842 		err = -EINVAL;
3843 		goto free_ucmd;
3844 	}
3845 
3846 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3847 	if (!dst) {
3848 		err = -ENOMEM;
3849 		goto free_ucmd;
3850 	}
3851 
3852 	mutex_lock(&dev->flow_db->lock);
3853 
3854 	ft_prio = get_flow_table(dev, flow_attr,
3855 				 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
3856 	if (IS_ERR(ft_prio)) {
3857 		err = PTR_ERR(ft_prio);
3858 		goto unlock;
3859 	}
3860 	if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3861 		ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3862 		if (IS_ERR(ft_prio_tx)) {
3863 			err = PTR_ERR(ft_prio_tx);
3864 			ft_prio_tx = NULL;
3865 			goto destroy_ft;
3866 		}
3867 	}
3868 
3869 	if (is_egress) {
3870 		dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3871 	} else {
3872 		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3873 		if (mqp->flags & MLX5_IB_QP_RSS)
3874 			dst->tir_num = mqp->rss_qp.tirn;
3875 		else
3876 			dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3877 	}
3878 
3879 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3880 		if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
3881 			handler = create_dont_trap_rule(dev, ft_prio,
3882 							flow_attr, dst);
3883 		} else {
3884 			underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3885 					mqp->underlay_qpn : 0;
3886 			handler = _create_flow_rule(dev, ft_prio, flow_attr,
3887 						    dst, underlay_qpn, ucmd);
3888 		}
3889 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3890 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3891 		handler = create_leftovers_rule(dev, ft_prio, flow_attr,
3892 						dst);
3893 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3894 		handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
3895 	} else {
3896 		err = -EINVAL;
3897 		goto destroy_ft;
3898 	}
3899 
3900 	if (IS_ERR(handler)) {
3901 		err = PTR_ERR(handler);
3902 		handler = NULL;
3903 		goto destroy_ft;
3904 	}
3905 
3906 	mutex_unlock(&dev->flow_db->lock);
3907 	kfree(dst);
3908 	kfree(ucmd);
3909 
3910 	return &handler->ibflow;
3911 
3912 destroy_ft:
3913 	put_flow_table(dev, ft_prio, false);
3914 	if (ft_prio_tx)
3915 		put_flow_table(dev, ft_prio_tx, false);
3916 unlock:
3917 	mutex_unlock(&dev->flow_db->lock);
3918 	kfree(dst);
3919 free_ucmd:
3920 	kfree(ucmd);
3921 	return ERR_PTR(err);
3922 }
3923 
3924 static struct mlx5_ib_flow_prio *
3925 _get_flow_table(struct mlx5_ib_dev *dev,
3926 		struct mlx5_ib_flow_matcher *fs_matcher,
3927 		bool mcast)
3928 {
3929 	struct mlx5_flow_namespace *ns = NULL;
3930 	struct mlx5_ib_flow_prio *prio = NULL;
3931 	int max_table_size = 0;
3932 	bool esw_encap;
3933 	u32 flags = 0;
3934 	int priority;
3935 
3936 	if (mcast)
3937 		priority = MLX5_IB_FLOW_MCAST_PRIO;
3938 	else
3939 		priority = ib_prio_to_core_prio(fs_matcher->priority, false);
3940 
3941 	esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
3942 		DEVLINK_ESWITCH_ENCAP_MODE_NONE;
3943 	if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
3944 		max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3945 					log_max_ft_size));
3946 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
3947 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3948 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3949 					      reformat_l3_tunnel_to_l2) &&
3950 		    !esw_encap)
3951 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3952 	} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
3953 		max_table_size = BIT(
3954 			MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
3955 		if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap)
3956 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3957 	} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3958 		max_table_size = BIT(
3959 			MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
3960 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
3961 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3962 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) &&
3963 		    esw_encap)
3964 			flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3965 		priority = FDB_BYPASS_PATH;
3966 	} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
3967 		max_table_size =
3968 			BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
3969 						       log_max_ft_size));
3970 		priority = fs_matcher->priority;
3971 	}
3972 
3973 	max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
3974 
3975 	ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
3976 	if (!ns)
3977 		return ERR_PTR(-ENOTSUPP);
3978 
3979 	if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
3980 		prio = &dev->flow_db->prios[priority];
3981 	else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
3982 		prio = &dev->flow_db->egress_prios[priority];
3983 	else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
3984 		prio = &dev->flow_db->fdb;
3985 	else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX)
3986 		prio = &dev->flow_db->rdma_rx[priority];
3987 
3988 	if (!prio)
3989 		return ERR_PTR(-EINVAL);
3990 
3991 	if (prio->flow_table)
3992 		return prio;
3993 
3994 	return _get_prio(ns, prio, priority, max_table_size,
3995 			 MLX5_FS_MAX_TYPES, flags);
3996 }
3997 
3998 static struct mlx5_ib_flow_handler *
3999 _create_raw_flow_rule(struct mlx5_ib_dev *dev,
4000 		      struct mlx5_ib_flow_prio *ft_prio,
4001 		      struct mlx5_flow_destination *dst,
4002 		      struct mlx5_ib_flow_matcher  *fs_matcher,
4003 		      struct mlx5_flow_context *flow_context,
4004 		      struct mlx5_flow_act *flow_act,
4005 		      void *cmd_in, int inlen,
4006 		      int dst_num)
4007 {
4008 	struct mlx5_ib_flow_handler *handler;
4009 	struct mlx5_flow_spec *spec;
4010 	struct mlx5_flow_table *ft = ft_prio->flow_table;
4011 	int err = 0;
4012 
4013 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
4014 	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
4015 	if (!handler || !spec) {
4016 		err = -ENOMEM;
4017 		goto free;
4018 	}
4019 
4020 	INIT_LIST_HEAD(&handler->list);
4021 
4022 	memcpy(spec->match_value, cmd_in, inlen);
4023 	memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
4024 	       fs_matcher->mask_len);
4025 	spec->match_criteria_enable = fs_matcher->match_criteria_enable;
4026 	spec->flow_context = *flow_context;
4027 
4028 	handler->rule = mlx5_add_flow_rules(ft, spec,
4029 					    flow_act, dst, dst_num);
4030 
4031 	if (IS_ERR(handler->rule)) {
4032 		err = PTR_ERR(handler->rule);
4033 		goto free;
4034 	}
4035 
4036 	ft_prio->refcount++;
4037 	handler->prio = ft_prio;
4038 	handler->dev = dev;
4039 	ft_prio->flow_table = ft;
4040 
4041 free:
4042 	if (err)
4043 		kfree(handler);
4044 	kvfree(spec);
4045 	return err ? ERR_PTR(err) : handler;
4046 }
4047 
4048 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
4049 				void *match_v)
4050 {
4051 	void *match_c;
4052 	void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
4053 	void *dmac, *dmac_mask;
4054 	void *ipv4, *ipv4_mask;
4055 
4056 	if (!(fs_matcher->match_criteria_enable &
4057 	      (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
4058 		return false;
4059 
4060 	match_c = fs_matcher->matcher_mask.match_params;
4061 	match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
4062 					   outer_headers);
4063 	match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
4064 					   outer_headers);
4065 
4066 	dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4067 			    dmac_47_16);
4068 	dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4069 				 dmac_47_16);
4070 
4071 	if (is_multicast_ether_addr(dmac) &&
4072 	    is_multicast_ether_addr(dmac_mask))
4073 		return true;
4074 
4075 	ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4076 			    dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4077 
4078 	ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4079 				 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4080 
4081 	if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
4082 	    ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
4083 		return true;
4084 
4085 	return false;
4086 }
4087 
4088 struct mlx5_ib_flow_handler *
4089 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
4090 			struct mlx5_ib_flow_matcher *fs_matcher,
4091 			struct mlx5_flow_context *flow_context,
4092 			struct mlx5_flow_act *flow_act,
4093 			u32 counter_id,
4094 			void *cmd_in, int inlen, int dest_id,
4095 			int dest_type)
4096 {
4097 	struct mlx5_flow_destination *dst;
4098 	struct mlx5_ib_flow_prio *ft_prio;
4099 	struct mlx5_ib_flow_handler *handler;
4100 	int dst_num = 0;
4101 	bool mcast;
4102 	int err;
4103 
4104 	if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
4105 		return ERR_PTR(-EOPNOTSUPP);
4106 
4107 	if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
4108 		return ERR_PTR(-ENOMEM);
4109 
4110 	dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
4111 	if (!dst)
4112 		return ERR_PTR(-ENOMEM);
4113 
4114 	mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
4115 	mutex_lock(&dev->flow_db->lock);
4116 
4117 	ft_prio = _get_flow_table(dev, fs_matcher, mcast);
4118 	if (IS_ERR(ft_prio)) {
4119 		err = PTR_ERR(ft_prio);
4120 		goto unlock;
4121 	}
4122 
4123 	if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
4124 		dst[dst_num].type = dest_type;
4125 		dst[dst_num].tir_num = dest_id;
4126 		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4127 	} else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
4128 		dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
4129 		dst[dst_num].ft_num = dest_id;
4130 		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4131 	} else {
4132 		dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
4133 		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
4134 	}
4135 
4136 	dst_num++;
4137 
4138 	if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
4139 		dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
4140 		dst[dst_num].counter_id = counter_id;
4141 		dst_num++;
4142 	}
4143 
4144 	handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
4145 					flow_context, flow_act,
4146 					cmd_in, inlen, dst_num);
4147 
4148 	if (IS_ERR(handler)) {
4149 		err = PTR_ERR(handler);
4150 		goto destroy_ft;
4151 	}
4152 
4153 	mutex_unlock(&dev->flow_db->lock);
4154 	atomic_inc(&fs_matcher->usecnt);
4155 	handler->flow_matcher = fs_matcher;
4156 
4157 	kfree(dst);
4158 
4159 	return handler;
4160 
4161 destroy_ft:
4162 	put_flow_table(dev, ft_prio, false);
4163 unlock:
4164 	mutex_unlock(&dev->flow_db->lock);
4165 	kfree(dst);
4166 
4167 	return ERR_PTR(err);
4168 }
4169 
4170 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
4171 {
4172 	u32 flags = 0;
4173 
4174 	if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
4175 		flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
4176 
4177 	return flags;
4178 }
4179 
4180 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED	MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
4181 static struct ib_flow_action *
4182 mlx5_ib_create_flow_action_esp(struct ib_device *device,
4183 			       const struct ib_flow_action_attrs_esp *attr,
4184 			       struct uverbs_attr_bundle *attrs)
4185 {
4186 	struct mlx5_ib_dev *mdev = to_mdev(device);
4187 	struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
4188 	struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
4189 	struct mlx5_ib_flow_action *action;
4190 	u64 action_flags;
4191 	u64 flags;
4192 	int err = 0;
4193 
4194 	err = uverbs_get_flags64(
4195 		&action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
4196 		((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
4197 	if (err)
4198 		return ERR_PTR(err);
4199 
4200 	flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
4201 
4202 	/* We current only support a subset of the standard features. Only a
4203 	 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
4204 	 * (with overlap). Full offload mode isn't supported.
4205 	 */
4206 	if (!attr->keymat || attr->replay || attr->encap ||
4207 	    attr->spi || attr->seq || attr->tfc_pad ||
4208 	    attr->hard_limit_pkts ||
4209 	    (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4210 			     IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
4211 		return ERR_PTR(-EOPNOTSUPP);
4212 
4213 	if (attr->keymat->protocol !=
4214 	    IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
4215 		return ERR_PTR(-EOPNOTSUPP);
4216 
4217 	aes_gcm = &attr->keymat->keymat.aes_gcm;
4218 
4219 	if (aes_gcm->icv_len != 16 ||
4220 	    aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
4221 		return ERR_PTR(-EOPNOTSUPP);
4222 
4223 	action = kmalloc(sizeof(*action), GFP_KERNEL);
4224 	if (!action)
4225 		return ERR_PTR(-ENOMEM);
4226 
4227 	action->esp_aes_gcm.ib_flags = attr->flags;
4228 	memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
4229 	       sizeof(accel_attrs.keymat.aes_gcm.aes_key));
4230 	accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
4231 	memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
4232 	       sizeof(accel_attrs.keymat.aes_gcm.salt));
4233 	memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
4234 	       sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
4235 	accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
4236 	accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
4237 	accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
4238 
4239 	accel_attrs.esn = attr->esn;
4240 	if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
4241 		accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
4242 	if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4243 		accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4244 
4245 	if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
4246 		accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
4247 
4248 	action->esp_aes_gcm.ctx =
4249 		mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
4250 	if (IS_ERR(action->esp_aes_gcm.ctx)) {
4251 		err = PTR_ERR(action->esp_aes_gcm.ctx);
4252 		goto err_parse;
4253 	}
4254 
4255 	action->esp_aes_gcm.ib_flags = attr->flags;
4256 
4257 	return &action->ib_action;
4258 
4259 err_parse:
4260 	kfree(action);
4261 	return ERR_PTR(err);
4262 }
4263 
4264 static int
4265 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
4266 			       const struct ib_flow_action_attrs_esp *attr,
4267 			       struct uverbs_attr_bundle *attrs)
4268 {
4269 	struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4270 	struct mlx5_accel_esp_xfrm_attrs accel_attrs;
4271 	int err = 0;
4272 
4273 	if (attr->keymat || attr->replay || attr->encap ||
4274 	    attr->spi || attr->seq || attr->tfc_pad ||
4275 	    attr->hard_limit_pkts ||
4276 	    (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4277 			     IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
4278 			     IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
4279 		return -EOPNOTSUPP;
4280 
4281 	/* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
4282 	 * be modified.
4283 	 */
4284 	if (!(maction->esp_aes_gcm.ib_flags &
4285 	      IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
4286 	    attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4287 			   IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
4288 		return -EINVAL;
4289 
4290 	memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
4291 	       sizeof(accel_attrs));
4292 
4293 	accel_attrs.esn = attr->esn;
4294 	if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4295 		accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4296 	else
4297 		accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4298 
4299 	err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
4300 					 &accel_attrs);
4301 	if (err)
4302 		return err;
4303 
4304 	maction->esp_aes_gcm.ib_flags &=
4305 		~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4306 	maction->esp_aes_gcm.ib_flags |=
4307 		attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4308 
4309 	return 0;
4310 }
4311 
4312 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
4313 {
4314 	struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4315 
4316 	switch (action->type) {
4317 	case IB_FLOW_ACTION_ESP:
4318 		/*
4319 		 * We only support aes_gcm by now, so we implicitly know this is
4320 		 * the underline crypto.
4321 		 */
4322 		mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
4323 		break;
4324 	case IB_FLOW_ACTION_UNSPECIFIED:
4325 		mlx5_ib_destroy_flow_action_raw(maction);
4326 		break;
4327 	default:
4328 		WARN_ON(true);
4329 		break;
4330 	}
4331 
4332 	kfree(maction);
4333 	return 0;
4334 }
4335 
4336 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4337 {
4338 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4339 	struct mlx5_ib_qp *mqp = to_mqp(ibqp);
4340 	int err;
4341 	u16 uid;
4342 
4343 	uid = ibqp->pd ?
4344 		to_mpd(ibqp->pd)->uid : 0;
4345 
4346 	if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
4347 		mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
4348 		return -EOPNOTSUPP;
4349 	}
4350 
4351 	err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4352 	if (err)
4353 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
4354 			     ibqp->qp_num, gid->raw);
4355 
4356 	return err;
4357 }
4358 
4359 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4360 {
4361 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4362 	int err;
4363 	u16 uid;
4364 
4365 	uid = ibqp->pd ?
4366 		to_mpd(ibqp->pd)->uid : 0;
4367 	err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4368 	if (err)
4369 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
4370 			     ibqp->qp_num, gid->raw);
4371 
4372 	return err;
4373 }
4374 
4375 static int init_node_data(struct mlx5_ib_dev *dev)
4376 {
4377 	int err;
4378 
4379 	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
4380 	if (err)
4381 		return err;
4382 
4383 	dev->mdev->rev_id = dev->mdev->pdev->revision;
4384 
4385 	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
4386 }
4387 
4388 static ssize_t fw_pages_show(struct device *device,
4389 			     struct device_attribute *attr, char *buf)
4390 {
4391 	struct mlx5_ib_dev *dev =
4392 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4393 
4394 	return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
4395 }
4396 static DEVICE_ATTR_RO(fw_pages);
4397 
4398 static ssize_t reg_pages_show(struct device *device,
4399 			      struct device_attribute *attr, char *buf)
4400 {
4401 	struct mlx5_ib_dev *dev =
4402 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4403 
4404 	return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
4405 }
4406 static DEVICE_ATTR_RO(reg_pages);
4407 
4408 static ssize_t hca_type_show(struct device *device,
4409 			     struct device_attribute *attr, char *buf)
4410 {
4411 	struct mlx5_ib_dev *dev =
4412 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4413 
4414 	return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
4415 }
4416 static DEVICE_ATTR_RO(hca_type);
4417 
4418 static ssize_t hw_rev_show(struct device *device,
4419 			   struct device_attribute *attr, char *buf)
4420 {
4421 	struct mlx5_ib_dev *dev =
4422 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4423 
4424 	return sprintf(buf, "%x\n", dev->mdev->rev_id);
4425 }
4426 static DEVICE_ATTR_RO(hw_rev);
4427 
4428 static ssize_t board_id_show(struct device *device,
4429 			     struct device_attribute *attr, char *buf)
4430 {
4431 	struct mlx5_ib_dev *dev =
4432 		rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4433 
4434 	return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
4435 		       dev->mdev->board_id);
4436 }
4437 static DEVICE_ATTR_RO(board_id);
4438 
4439 static struct attribute *mlx5_class_attributes[] = {
4440 	&dev_attr_hw_rev.attr,
4441 	&dev_attr_hca_type.attr,
4442 	&dev_attr_board_id.attr,
4443 	&dev_attr_fw_pages.attr,
4444 	&dev_attr_reg_pages.attr,
4445 	NULL,
4446 };
4447 
4448 static const struct attribute_group mlx5_attr_group = {
4449 	.attrs = mlx5_class_attributes,
4450 };
4451 
4452 static void pkey_change_handler(struct work_struct *work)
4453 {
4454 	struct mlx5_ib_port_resources *ports =
4455 		container_of(work, struct mlx5_ib_port_resources,
4456 			     pkey_change_work);
4457 
4458 	mutex_lock(&ports->devr->mutex);
4459 	mlx5_ib_gsi_pkey_change(ports->gsi);
4460 	mutex_unlock(&ports->devr->mutex);
4461 }
4462 
4463 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
4464 {
4465 	struct mlx5_ib_qp *mqp;
4466 	struct mlx5_ib_cq *send_mcq, *recv_mcq;
4467 	struct mlx5_core_cq *mcq;
4468 	struct list_head cq_armed_list;
4469 	unsigned long flags_qp;
4470 	unsigned long flags_cq;
4471 	unsigned long flags;
4472 
4473 	INIT_LIST_HEAD(&cq_armed_list);
4474 
4475 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
4476 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
4477 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
4478 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
4479 		if (mqp->sq.tail != mqp->sq.head) {
4480 			send_mcq = to_mcq(mqp->ibqp.send_cq);
4481 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
4482 			if (send_mcq->mcq.comp &&
4483 			    mqp->ibqp.send_cq->comp_handler) {
4484 				if (!send_mcq->mcq.reset_notify_added) {
4485 					send_mcq->mcq.reset_notify_added = 1;
4486 					list_add_tail(&send_mcq->mcq.reset_notify,
4487 						      &cq_armed_list);
4488 				}
4489 			}
4490 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
4491 		}
4492 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
4493 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
4494 		/* no handling is needed for SRQ */
4495 		if (!mqp->ibqp.srq) {
4496 			if (mqp->rq.tail != mqp->rq.head) {
4497 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
4498 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
4499 				if (recv_mcq->mcq.comp &&
4500 				    mqp->ibqp.recv_cq->comp_handler) {
4501 					if (!recv_mcq->mcq.reset_notify_added) {
4502 						recv_mcq->mcq.reset_notify_added = 1;
4503 						list_add_tail(&recv_mcq->mcq.reset_notify,
4504 							      &cq_armed_list);
4505 					}
4506 				}
4507 				spin_unlock_irqrestore(&recv_mcq->lock,
4508 						       flags_cq);
4509 			}
4510 		}
4511 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
4512 	}
4513 	/*At that point all inflight post send were put to be executed as of we
4514 	 * lock/unlock above locks Now need to arm all involved CQs.
4515 	 */
4516 	list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
4517 		mcq->comp(mcq, NULL);
4518 	}
4519 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
4520 }
4521 
4522 static void delay_drop_handler(struct work_struct *work)
4523 {
4524 	int err;
4525 	struct mlx5_ib_delay_drop *delay_drop =
4526 		container_of(work, struct mlx5_ib_delay_drop,
4527 			     delay_drop_work);
4528 
4529 	atomic_inc(&delay_drop->events_cnt);
4530 
4531 	mutex_lock(&delay_drop->lock);
4532 	err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
4533 				       delay_drop->timeout);
4534 	if (err) {
4535 		mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
4536 			     delay_drop->timeout);
4537 		delay_drop->activate = false;
4538 	}
4539 	mutex_unlock(&delay_drop->lock);
4540 }
4541 
4542 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4543 				 struct ib_event *ibev)
4544 {
4545 	u8 port = (eqe->data.port.port >> 4) & 0xf;
4546 
4547 	switch (eqe->sub_type) {
4548 	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
4549 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4550 					    IB_LINK_LAYER_ETHERNET)
4551 			schedule_work(&ibdev->delay_drop.delay_drop_work);
4552 		break;
4553 	default: /* do nothing */
4554 		return;
4555 	}
4556 }
4557 
4558 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4559 			      struct ib_event *ibev)
4560 {
4561 	u8 port = (eqe->data.port.port >> 4) & 0xf;
4562 
4563 	ibev->element.port_num = port;
4564 
4565 	switch (eqe->sub_type) {
4566 	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
4567 	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
4568 	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
4569 		/* In RoCE, port up/down events are handled in
4570 		 * mlx5_netdev_event().
4571 		 */
4572 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4573 					    IB_LINK_LAYER_ETHERNET)
4574 			return -EINVAL;
4575 
4576 		ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
4577 				IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4578 		break;
4579 
4580 	case MLX5_PORT_CHANGE_SUBTYPE_LID:
4581 		ibev->event = IB_EVENT_LID_CHANGE;
4582 		break;
4583 
4584 	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
4585 		ibev->event = IB_EVENT_PKEY_CHANGE;
4586 		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
4587 		break;
4588 
4589 	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
4590 		ibev->event = IB_EVENT_GID_CHANGE;
4591 		break;
4592 
4593 	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
4594 		ibev->event = IB_EVENT_CLIENT_REREGISTER;
4595 		break;
4596 	default:
4597 		return -EINVAL;
4598 	}
4599 
4600 	return 0;
4601 }
4602 
4603 static void mlx5_ib_handle_event(struct work_struct *_work)
4604 {
4605 	struct mlx5_ib_event_work *work =
4606 		container_of(_work, struct mlx5_ib_event_work, work);
4607 	struct mlx5_ib_dev *ibdev;
4608 	struct ib_event ibev;
4609 	bool fatal = false;
4610 
4611 	if (work->is_slave) {
4612 		ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
4613 		if (!ibdev)
4614 			goto out;
4615 	} else {
4616 		ibdev = work->dev;
4617 	}
4618 
4619 	switch (work->event) {
4620 	case MLX5_DEV_EVENT_SYS_ERROR:
4621 		ibev.event = IB_EVENT_DEVICE_FATAL;
4622 		mlx5_ib_handle_internal_error(ibdev);
4623 		ibev.element.port_num  = (u8)(unsigned long)work->param;
4624 		fatal = true;
4625 		break;
4626 	case MLX5_EVENT_TYPE_PORT_CHANGE:
4627 		if (handle_port_change(ibdev, work->param, &ibev))
4628 			goto out;
4629 		break;
4630 	case MLX5_EVENT_TYPE_GENERAL_EVENT:
4631 		handle_general_event(ibdev, work->param, &ibev);
4632 		/* fall through */
4633 	default:
4634 		goto out;
4635 	}
4636 
4637 	ibev.device = &ibdev->ib_dev;
4638 
4639 	if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
4640 		mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
4641 		goto out;
4642 	}
4643 
4644 	if (ibdev->ib_active)
4645 		ib_dispatch_event(&ibev);
4646 
4647 	if (fatal)
4648 		ibdev->ib_active = false;
4649 out:
4650 	kfree(work);
4651 }
4652 
4653 static int mlx5_ib_event(struct notifier_block *nb,
4654 			 unsigned long event, void *param)
4655 {
4656 	struct mlx5_ib_event_work *work;
4657 
4658 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
4659 	if (!work)
4660 		return NOTIFY_DONE;
4661 
4662 	INIT_WORK(&work->work, mlx5_ib_handle_event);
4663 	work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
4664 	work->is_slave = false;
4665 	work->param = param;
4666 	work->event = event;
4667 
4668 	queue_work(mlx5_ib_event_wq, &work->work);
4669 
4670 	return NOTIFY_OK;
4671 }
4672 
4673 static int mlx5_ib_event_slave_port(struct notifier_block *nb,
4674 				    unsigned long event, void *param)
4675 {
4676 	struct mlx5_ib_event_work *work;
4677 
4678 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
4679 	if (!work)
4680 		return NOTIFY_DONE;
4681 
4682 	INIT_WORK(&work->work, mlx5_ib_handle_event);
4683 	work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
4684 	work->is_slave = true;
4685 	work->param = param;
4686 	work->event = event;
4687 	queue_work(mlx5_ib_event_wq, &work->work);
4688 
4689 	return NOTIFY_OK;
4690 }
4691 
4692 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
4693 {
4694 	struct mlx5_hca_vport_context vport_ctx;
4695 	int err;
4696 	int port;
4697 
4698 	for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
4699 		dev->mdev->port_caps[port - 1].has_smi = false;
4700 		if (MLX5_CAP_GEN(dev->mdev, port_type) ==
4701 		    MLX5_CAP_PORT_TYPE_IB) {
4702 			if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
4703 				err = mlx5_query_hca_vport_context(dev->mdev, 0,
4704 								   port, 0,
4705 								   &vport_ctx);
4706 				if (err) {
4707 					mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
4708 						    port, err);
4709 					return err;
4710 				}
4711 				dev->mdev->port_caps[port - 1].has_smi =
4712 					vport_ctx.has_smi;
4713 			} else {
4714 				dev->mdev->port_caps[port - 1].has_smi = true;
4715 			}
4716 		}
4717 	}
4718 	return 0;
4719 }
4720 
4721 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
4722 {
4723 	int port;
4724 
4725 	for (port = 1; port <= dev->num_ports; port++)
4726 		mlx5_query_ext_port_caps(dev, port);
4727 }
4728 
4729 static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4730 {
4731 	struct ib_device_attr *dprops = NULL;
4732 	struct ib_port_attr *pprops = NULL;
4733 	int err = -ENOMEM;
4734 	struct ib_udata uhw = {.inlen = 0, .outlen = 0};
4735 
4736 	pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
4737 	if (!pprops)
4738 		goto out;
4739 
4740 	dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
4741 	if (!dprops)
4742 		goto out;
4743 
4744 	err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
4745 	if (err) {
4746 		mlx5_ib_warn(dev, "query_device failed %d\n", err);
4747 		goto out;
4748 	}
4749 
4750 	err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
4751 	if (err) {
4752 		mlx5_ib_warn(dev, "query_port %d failed %d\n",
4753 			     port, err);
4754 		goto out;
4755 	}
4756 
4757 	dev->mdev->port_caps[port - 1].pkey_table_len =
4758 					dprops->max_pkeys;
4759 	dev->mdev->port_caps[port - 1].gid_table_len =
4760 					pprops->gid_tbl_len;
4761 	mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
4762 		    port, dprops->max_pkeys, pprops->gid_tbl_len);
4763 
4764 out:
4765 	kfree(pprops);
4766 	kfree(dprops);
4767 
4768 	return err;
4769 }
4770 
4771 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4772 {
4773 	/* For representors use port 1, is this is the only native
4774 	 * port
4775 	 */
4776 	if (dev->is_rep)
4777 		return __get_port_caps(dev, 1);
4778 	return __get_port_caps(dev, port);
4779 }
4780 
4781 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
4782 {
4783 	int err;
4784 
4785 	err = mlx5_mr_cache_cleanup(dev);
4786 	if (err)
4787 		mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4788 
4789 	if (dev->umrc.qp)
4790 		mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
4791 	if (dev->umrc.cq)
4792 		ib_free_cq(dev->umrc.cq);
4793 	if (dev->umrc.pd)
4794 		ib_dealloc_pd(dev->umrc.pd);
4795 }
4796 
4797 enum {
4798 	MAX_UMR_WR = 128,
4799 };
4800 
4801 static int create_umr_res(struct mlx5_ib_dev *dev)
4802 {
4803 	struct ib_qp_init_attr *init_attr = NULL;
4804 	struct ib_qp_attr *attr = NULL;
4805 	struct ib_pd *pd;
4806 	struct ib_cq *cq;
4807 	struct ib_qp *qp;
4808 	int ret;
4809 
4810 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
4811 	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
4812 	if (!attr || !init_attr) {
4813 		ret = -ENOMEM;
4814 		goto error_0;
4815 	}
4816 
4817 	pd = ib_alloc_pd(&dev->ib_dev, 0);
4818 	if (IS_ERR(pd)) {
4819 		mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
4820 		ret = PTR_ERR(pd);
4821 		goto error_0;
4822 	}
4823 
4824 	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
4825 	if (IS_ERR(cq)) {
4826 		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
4827 		ret = PTR_ERR(cq);
4828 		goto error_2;
4829 	}
4830 
4831 	init_attr->send_cq = cq;
4832 	init_attr->recv_cq = cq;
4833 	init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
4834 	init_attr->cap.max_send_wr = MAX_UMR_WR;
4835 	init_attr->cap.max_send_sge = 1;
4836 	init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
4837 	init_attr->port_num = 1;
4838 	qp = mlx5_ib_create_qp(pd, init_attr, NULL);
4839 	if (IS_ERR(qp)) {
4840 		mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
4841 		ret = PTR_ERR(qp);
4842 		goto error_3;
4843 	}
4844 	qp->device     = &dev->ib_dev;
4845 	qp->real_qp    = qp;
4846 	qp->uobject    = NULL;
4847 	qp->qp_type    = MLX5_IB_QPT_REG_UMR;
4848 	qp->send_cq    = init_attr->send_cq;
4849 	qp->recv_cq    = init_attr->recv_cq;
4850 
4851 	attr->qp_state = IB_QPS_INIT;
4852 	attr->port_num = 1;
4853 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
4854 				IB_QP_PORT, NULL);
4855 	if (ret) {
4856 		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
4857 		goto error_4;
4858 	}
4859 
4860 	memset(attr, 0, sizeof(*attr));
4861 	attr->qp_state = IB_QPS_RTR;
4862 	attr->path_mtu = IB_MTU_256;
4863 
4864 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4865 	if (ret) {
4866 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
4867 		goto error_4;
4868 	}
4869 
4870 	memset(attr, 0, sizeof(*attr));
4871 	attr->qp_state = IB_QPS_RTS;
4872 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4873 	if (ret) {
4874 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
4875 		goto error_4;
4876 	}
4877 
4878 	dev->umrc.qp = qp;
4879 	dev->umrc.cq = cq;
4880 	dev->umrc.pd = pd;
4881 
4882 	sema_init(&dev->umrc.sem, MAX_UMR_WR);
4883 	ret = mlx5_mr_cache_init(dev);
4884 	if (ret) {
4885 		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4886 		goto error_4;
4887 	}
4888 
4889 	kfree(attr);
4890 	kfree(init_attr);
4891 
4892 	return 0;
4893 
4894 error_4:
4895 	mlx5_ib_destroy_qp(qp, NULL);
4896 	dev->umrc.qp = NULL;
4897 
4898 error_3:
4899 	ib_free_cq(cq);
4900 	dev->umrc.cq = NULL;
4901 
4902 error_2:
4903 	ib_dealloc_pd(pd);
4904 	dev->umrc.pd = NULL;
4905 
4906 error_0:
4907 	kfree(attr);
4908 	kfree(init_attr);
4909 	return ret;
4910 }
4911 
4912 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
4913 {
4914 	switch (umr_fence_cap) {
4915 	case MLX5_CAP_UMR_FENCE_NONE:
4916 		return MLX5_FENCE_MODE_NONE;
4917 	case MLX5_CAP_UMR_FENCE_SMALL:
4918 		return MLX5_FENCE_MODE_INITIATOR_SMALL;
4919 	default:
4920 		return MLX5_FENCE_MODE_STRONG_ORDERING;
4921 	}
4922 }
4923 
4924 static int create_dev_resources(struct mlx5_ib_resources *devr)
4925 {
4926 	struct ib_srq_init_attr attr;
4927 	struct mlx5_ib_dev *dev;
4928 	struct ib_device *ibdev;
4929 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
4930 	int port;
4931 	int ret = 0;
4932 
4933 	dev = container_of(devr, struct mlx5_ib_dev, devr);
4934 	ibdev = &dev->ib_dev;
4935 
4936 	mutex_init(&devr->mutex);
4937 
4938 	devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
4939 	if (!devr->p0)
4940 		return -ENOMEM;
4941 
4942 	devr->p0->device  = ibdev;
4943 	devr->p0->uobject = NULL;
4944 	atomic_set(&devr->p0->usecnt, 0);
4945 
4946 	ret = mlx5_ib_alloc_pd(devr->p0, NULL);
4947 	if (ret)
4948 		goto error0;
4949 
4950 	devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
4951 	if (!devr->c0) {
4952 		ret = -ENOMEM;
4953 		goto error1;
4954 	}
4955 
4956 	devr->c0->device = &dev->ib_dev;
4957 	atomic_set(&devr->c0->usecnt, 0);
4958 
4959 	ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL);
4960 	if (ret)
4961 		goto err_create_cq;
4962 
4963 	devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4964 	if (IS_ERR(devr->x0)) {
4965 		ret = PTR_ERR(devr->x0);
4966 		goto error2;
4967 	}
4968 	devr->x0->device = &dev->ib_dev;
4969 	devr->x0->inode = NULL;
4970 	atomic_set(&devr->x0->usecnt, 0);
4971 	mutex_init(&devr->x0->tgt_qp_mutex);
4972 	INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
4973 
4974 	devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4975 	if (IS_ERR(devr->x1)) {
4976 		ret = PTR_ERR(devr->x1);
4977 		goto error3;
4978 	}
4979 	devr->x1->device = &dev->ib_dev;
4980 	devr->x1->inode = NULL;
4981 	atomic_set(&devr->x1->usecnt, 0);
4982 	mutex_init(&devr->x1->tgt_qp_mutex);
4983 	INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
4984 
4985 	memset(&attr, 0, sizeof(attr));
4986 	attr.attr.max_sge = 1;
4987 	attr.attr.max_wr = 1;
4988 	attr.srq_type = IB_SRQT_XRC;
4989 	attr.ext.cq = devr->c0;
4990 	attr.ext.xrc.xrcd = devr->x0;
4991 
4992 	devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
4993 	if (!devr->s0) {
4994 		ret = -ENOMEM;
4995 		goto error4;
4996 	}
4997 
4998 	devr->s0->device	= &dev->ib_dev;
4999 	devr->s0->pd		= devr->p0;
5000 	devr->s0->srq_type      = IB_SRQT_XRC;
5001 	devr->s0->ext.xrc.xrcd	= devr->x0;
5002 	devr->s0->ext.cq	= devr->c0;
5003 	ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
5004 	if (ret)
5005 		goto err_create;
5006 
5007 	atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
5008 	atomic_inc(&devr->s0->ext.cq->usecnt);
5009 	atomic_inc(&devr->p0->usecnt);
5010 	atomic_set(&devr->s0->usecnt, 0);
5011 
5012 	memset(&attr, 0, sizeof(attr));
5013 	attr.attr.max_sge = 1;
5014 	attr.attr.max_wr = 1;
5015 	attr.srq_type = IB_SRQT_BASIC;
5016 	devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
5017 	if (!devr->s1) {
5018 		ret = -ENOMEM;
5019 		goto error5;
5020 	}
5021 
5022 	devr->s1->device	= &dev->ib_dev;
5023 	devr->s1->pd		= devr->p0;
5024 	devr->s1->srq_type      = IB_SRQT_BASIC;
5025 	devr->s1->ext.cq	= devr->c0;
5026 
5027 	ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
5028 	if (ret)
5029 		goto error6;
5030 
5031 	atomic_inc(&devr->p0->usecnt);
5032 	atomic_set(&devr->s1->usecnt, 0);
5033 
5034 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
5035 		INIT_WORK(&devr->ports[port].pkey_change_work,
5036 			  pkey_change_handler);
5037 		devr->ports[port].devr = devr;
5038 	}
5039 
5040 	return 0;
5041 
5042 error6:
5043 	kfree(devr->s1);
5044 error5:
5045 	mlx5_ib_destroy_srq(devr->s0, NULL);
5046 err_create:
5047 	kfree(devr->s0);
5048 error4:
5049 	mlx5_ib_dealloc_xrcd(devr->x1, NULL);
5050 error3:
5051 	mlx5_ib_dealloc_xrcd(devr->x0, NULL);
5052 error2:
5053 	mlx5_ib_destroy_cq(devr->c0, NULL);
5054 err_create_cq:
5055 	kfree(devr->c0);
5056 error1:
5057 	mlx5_ib_dealloc_pd(devr->p0, NULL);
5058 error0:
5059 	kfree(devr->p0);
5060 	return ret;
5061 }
5062 
5063 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
5064 {
5065 	int port;
5066 
5067 	mlx5_ib_destroy_srq(devr->s1, NULL);
5068 	kfree(devr->s1);
5069 	mlx5_ib_destroy_srq(devr->s0, NULL);
5070 	kfree(devr->s0);
5071 	mlx5_ib_dealloc_xrcd(devr->x0, NULL);
5072 	mlx5_ib_dealloc_xrcd(devr->x1, NULL);
5073 	mlx5_ib_destroy_cq(devr->c0, NULL);
5074 	kfree(devr->c0);
5075 	mlx5_ib_dealloc_pd(devr->p0, NULL);
5076 	kfree(devr->p0);
5077 
5078 	/* Make sure no change P_Key work items are still executing */
5079 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
5080 		cancel_work_sync(&devr->ports[port].pkey_change_work);
5081 }
5082 
5083 static u32 get_core_cap_flags(struct ib_device *ibdev,
5084 			      struct mlx5_hca_vport_context *rep)
5085 {
5086 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5087 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
5088 	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
5089 	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
5090 	bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
5091 	u32 ret = 0;
5092 
5093 	if (rep->grh_required)
5094 		ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
5095 
5096 	if (ll == IB_LINK_LAYER_INFINIBAND)
5097 		return ret | RDMA_CORE_PORT_IBA_IB;
5098 
5099 	if (raw_support)
5100 		ret |= RDMA_CORE_PORT_RAW_PACKET;
5101 
5102 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
5103 		return ret;
5104 
5105 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
5106 		return ret;
5107 
5108 	if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
5109 		ret |= RDMA_CORE_PORT_IBA_ROCE;
5110 
5111 	if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
5112 		ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
5113 
5114 	return ret;
5115 }
5116 
5117 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
5118 			       struct ib_port_immutable *immutable)
5119 {
5120 	struct ib_port_attr attr;
5121 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5122 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
5123 	struct mlx5_hca_vport_context rep = {0};
5124 	int err;
5125 
5126 	err = ib_query_port(ibdev, port_num, &attr);
5127 	if (err)
5128 		return err;
5129 
5130 	if (ll == IB_LINK_LAYER_INFINIBAND) {
5131 		err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
5132 						   &rep);
5133 		if (err)
5134 			return err;
5135 	}
5136 
5137 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
5138 	immutable->gid_tbl_len = attr.gid_tbl_len;
5139 	immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
5140 	if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
5141 		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
5142 
5143 	return 0;
5144 }
5145 
5146 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
5147 				   struct ib_port_immutable *immutable)
5148 {
5149 	struct ib_port_attr attr;
5150 	int err;
5151 
5152 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5153 
5154 	err = ib_query_port(ibdev, port_num, &attr);
5155 	if (err)
5156 		return err;
5157 
5158 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
5159 	immutable->gid_tbl_len = attr.gid_tbl_len;
5160 	immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5161 
5162 	return 0;
5163 }
5164 
5165 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
5166 {
5167 	struct mlx5_ib_dev *dev =
5168 		container_of(ibdev, struct mlx5_ib_dev, ib_dev);
5169 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
5170 		 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
5171 		 fw_rev_sub(dev->mdev));
5172 }
5173 
5174 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
5175 {
5176 	struct mlx5_core_dev *mdev = dev->mdev;
5177 	struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
5178 								 MLX5_FLOW_NAMESPACE_LAG);
5179 	struct mlx5_flow_table *ft;
5180 	int err;
5181 
5182 	if (!ns || !mlx5_lag_is_roce(mdev))
5183 		return 0;
5184 
5185 	err = mlx5_cmd_create_vport_lag(mdev);
5186 	if (err)
5187 		return err;
5188 
5189 	ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
5190 	if (IS_ERR(ft)) {
5191 		err = PTR_ERR(ft);
5192 		goto err_destroy_vport_lag;
5193 	}
5194 
5195 	dev->flow_db->lag_demux_ft = ft;
5196 	dev->lag_active = true;
5197 	return 0;
5198 
5199 err_destroy_vport_lag:
5200 	mlx5_cmd_destroy_vport_lag(mdev);
5201 	return err;
5202 }
5203 
5204 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
5205 {
5206 	struct mlx5_core_dev *mdev = dev->mdev;
5207 
5208 	if (dev->lag_active) {
5209 		dev->lag_active = false;
5210 
5211 		mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
5212 		dev->flow_db->lag_demux_ft = NULL;
5213 
5214 		mlx5_cmd_destroy_vport_lag(mdev);
5215 	}
5216 }
5217 
5218 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5219 {
5220 	int err;
5221 
5222 	dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
5223 	err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
5224 	if (err) {
5225 		dev->port[port_num].roce.nb.notifier_call = NULL;
5226 		return err;
5227 	}
5228 
5229 	return 0;
5230 }
5231 
5232 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5233 {
5234 	if (dev->port[port_num].roce.nb.notifier_call) {
5235 		unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
5236 		dev->port[port_num].roce.nb.notifier_call = NULL;
5237 	}
5238 }
5239 
5240 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
5241 {
5242 	int err;
5243 
5244 	if (MLX5_CAP_GEN(dev->mdev, roce)) {
5245 		err = mlx5_nic_vport_enable_roce(dev->mdev);
5246 		if (err)
5247 			return err;
5248 	}
5249 
5250 	err = mlx5_eth_lag_init(dev);
5251 	if (err)
5252 		goto err_disable_roce;
5253 
5254 	return 0;
5255 
5256 err_disable_roce:
5257 	if (MLX5_CAP_GEN(dev->mdev, roce))
5258 		mlx5_nic_vport_disable_roce(dev->mdev);
5259 
5260 	return err;
5261 }
5262 
5263 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
5264 {
5265 	mlx5_eth_lag_cleanup(dev);
5266 	if (MLX5_CAP_GEN(dev->mdev, roce))
5267 		mlx5_nic_vport_disable_roce(dev->mdev);
5268 }
5269 
5270 struct mlx5_ib_counter {
5271 	const char *name;
5272 	size_t offset;
5273 };
5274 
5275 #define INIT_Q_COUNTER(_name)		\
5276 	{ .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
5277 
5278 static const struct mlx5_ib_counter basic_q_cnts[] = {
5279 	INIT_Q_COUNTER(rx_write_requests),
5280 	INIT_Q_COUNTER(rx_read_requests),
5281 	INIT_Q_COUNTER(rx_atomic_requests),
5282 	INIT_Q_COUNTER(out_of_buffer),
5283 };
5284 
5285 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
5286 	INIT_Q_COUNTER(out_of_sequence),
5287 };
5288 
5289 static const struct mlx5_ib_counter retrans_q_cnts[] = {
5290 	INIT_Q_COUNTER(duplicate_request),
5291 	INIT_Q_COUNTER(rnr_nak_retry_err),
5292 	INIT_Q_COUNTER(packet_seq_err),
5293 	INIT_Q_COUNTER(implied_nak_seq_err),
5294 	INIT_Q_COUNTER(local_ack_timeout_err),
5295 };
5296 
5297 #define INIT_CONG_COUNTER(_name)		\
5298 	{ .name = #_name, .offset =	\
5299 		MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
5300 
5301 static const struct mlx5_ib_counter cong_cnts[] = {
5302 	INIT_CONG_COUNTER(rp_cnp_ignored),
5303 	INIT_CONG_COUNTER(rp_cnp_handled),
5304 	INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
5305 	INIT_CONG_COUNTER(np_cnp_sent),
5306 };
5307 
5308 static const struct mlx5_ib_counter extended_err_cnts[] = {
5309 	INIT_Q_COUNTER(resp_local_length_error),
5310 	INIT_Q_COUNTER(resp_cqe_error),
5311 	INIT_Q_COUNTER(req_cqe_error),
5312 	INIT_Q_COUNTER(req_remote_invalid_request),
5313 	INIT_Q_COUNTER(req_remote_access_errors),
5314 	INIT_Q_COUNTER(resp_remote_access_errors),
5315 	INIT_Q_COUNTER(resp_cqe_flush_error),
5316 	INIT_Q_COUNTER(req_cqe_flush_error),
5317 };
5318 
5319 #define INIT_EXT_PPCNT_COUNTER(_name)		\
5320 	{ .name = #_name, .offset =	\
5321 	MLX5_BYTE_OFF(ppcnt_reg, \
5322 		      counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
5323 
5324 static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
5325 	INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
5326 };
5327 
5328 static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
5329 {
5330 	return MLX5_ESWITCH_MANAGER(mdev) &&
5331 	       mlx5_ib_eswitch_mode(mdev->priv.eswitch) ==
5332 		       MLX5_ESWITCH_OFFLOADS;
5333 }
5334 
5335 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
5336 {
5337 	int num_cnt_ports;
5338 	int i;
5339 
5340 	num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
5341 
5342 	for (i = 0; i < num_cnt_ports; i++) {
5343 		if (dev->port[i].cnts.set_id_valid)
5344 			mlx5_core_dealloc_q_counter(dev->mdev,
5345 						    dev->port[i].cnts.set_id);
5346 		kfree(dev->port[i].cnts.names);
5347 		kfree(dev->port[i].cnts.offsets);
5348 	}
5349 }
5350 
5351 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
5352 				    struct mlx5_ib_counters *cnts)
5353 {
5354 	u32 num_counters;
5355 
5356 	num_counters = ARRAY_SIZE(basic_q_cnts);
5357 
5358 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
5359 		num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
5360 
5361 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
5362 		num_counters += ARRAY_SIZE(retrans_q_cnts);
5363 
5364 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
5365 		num_counters += ARRAY_SIZE(extended_err_cnts);
5366 
5367 	cnts->num_q_counters = num_counters;
5368 
5369 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5370 		cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
5371 		num_counters += ARRAY_SIZE(cong_cnts);
5372 	}
5373 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5374 		cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
5375 		num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
5376 	}
5377 	cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
5378 	if (!cnts->names)
5379 		return -ENOMEM;
5380 
5381 	cnts->offsets = kcalloc(num_counters,
5382 				sizeof(cnts->offsets), GFP_KERNEL);
5383 	if (!cnts->offsets)
5384 		goto err_names;
5385 
5386 	return 0;
5387 
5388 err_names:
5389 	kfree(cnts->names);
5390 	cnts->names = NULL;
5391 	return -ENOMEM;
5392 }
5393 
5394 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
5395 				  const char **names,
5396 				  size_t *offsets)
5397 {
5398 	int i;
5399 	int j = 0;
5400 
5401 	for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
5402 		names[j] = basic_q_cnts[i].name;
5403 		offsets[j] = basic_q_cnts[i].offset;
5404 	}
5405 
5406 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
5407 		for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
5408 			names[j] = out_of_seq_q_cnts[i].name;
5409 			offsets[j] = out_of_seq_q_cnts[i].offset;
5410 		}
5411 	}
5412 
5413 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
5414 		for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
5415 			names[j] = retrans_q_cnts[i].name;
5416 			offsets[j] = retrans_q_cnts[i].offset;
5417 		}
5418 	}
5419 
5420 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
5421 		for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
5422 			names[j] = extended_err_cnts[i].name;
5423 			offsets[j] = extended_err_cnts[i].offset;
5424 		}
5425 	}
5426 
5427 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5428 		for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
5429 			names[j] = cong_cnts[i].name;
5430 			offsets[j] = cong_cnts[i].offset;
5431 		}
5432 	}
5433 
5434 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5435 		for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
5436 			names[j] = ext_ppcnt_cnts[i].name;
5437 			offsets[j] = ext_ppcnt_cnts[i].offset;
5438 		}
5439 	}
5440 }
5441 
5442 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
5443 {
5444 	int num_cnt_ports;
5445 	int err = 0;
5446 	int i;
5447 	bool is_shared;
5448 
5449 	is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
5450 	num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
5451 
5452 	for (i = 0; i < num_cnt_ports; i++) {
5453 		err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
5454 		if (err)
5455 			goto err_alloc;
5456 
5457 		mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
5458 				      dev->port[i].cnts.offsets);
5459 
5460 		err = mlx5_cmd_alloc_q_counter(dev->mdev,
5461 					       &dev->port[i].cnts.set_id,
5462 					       is_shared ?
5463 					       MLX5_SHARED_RESOURCE_UID : 0);
5464 		if (err) {
5465 			mlx5_ib_warn(dev,
5466 				     "couldn't allocate queue counter for port %d, err %d\n",
5467 				     i + 1, err);
5468 			goto err_alloc;
5469 		}
5470 		dev->port[i].cnts.set_id_valid = true;
5471 	}
5472 	return 0;
5473 
5474 err_alloc:
5475 	mlx5_ib_dealloc_counters(dev);
5476 	return err;
5477 }
5478 
5479 static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
5480 						   u8 port_num)
5481 {
5482 	return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
5483 						   &dev->port[port_num].cnts;
5484 }
5485 
5486 /**
5487  * mlx5_ib_get_counters_id - Returns counters id to use for device+port
5488  * @dev:	Pointer to mlx5 IB device
5489  * @port_num:	Zero based port number
5490  *
5491  * mlx5_ib_get_counters_id() Returns counters set id to use for given
5492  * device port combination in switchdev and non switchdev mode of the
5493  * parent device.
5494  */
5495 u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
5496 {
5497 	const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
5498 
5499 	return cnts->set_id;
5500 }
5501 
5502 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
5503 						    u8 port_num)
5504 {
5505 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5506 	const struct mlx5_ib_counters *cnts;
5507 	bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
5508 
5509 	if ((is_switchdev && port_num) || (!is_switchdev && !port_num))
5510 		return NULL;
5511 
5512 	cnts = get_counters(dev, port_num - 1);
5513 
5514 	return rdma_alloc_hw_stats_struct(cnts->names,
5515 					  cnts->num_q_counters +
5516 					  cnts->num_cong_counters +
5517 					  cnts->num_ext_ppcnt_counters,
5518 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
5519 }
5520 
5521 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
5522 				    const struct mlx5_ib_counters *cnts,
5523 				    struct rdma_hw_stats *stats,
5524 				    u16 set_id)
5525 {
5526 	int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
5527 	void *out;
5528 	__be32 val;
5529 	int ret, i;
5530 
5531 	out = kvzalloc(outlen, GFP_KERNEL);
5532 	if (!out)
5533 		return -ENOMEM;
5534 
5535 	ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
5536 	if (ret)
5537 		goto free;
5538 
5539 	for (i = 0; i < cnts->num_q_counters; i++) {
5540 		val = *(__be32 *)(out + cnts->offsets[i]);
5541 		stats->value[i] = (u64)be32_to_cpu(val);
5542 	}
5543 
5544 free:
5545 	kvfree(out);
5546 	return ret;
5547 }
5548 
5549 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
5550 					    const struct mlx5_ib_counters *cnts,
5551 					    struct rdma_hw_stats *stats)
5552 {
5553 	int offset = cnts->num_q_counters + cnts->num_cong_counters;
5554 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
5555 	int ret, i;
5556 	void *out;
5557 
5558 	out = kvzalloc(sz, GFP_KERNEL);
5559 	if (!out)
5560 		return -ENOMEM;
5561 
5562 	ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
5563 	if (ret)
5564 		goto free;
5565 
5566 	for (i = 0; i < cnts->num_ext_ppcnt_counters; i++)
5567 		stats->value[i + offset] =
5568 			be64_to_cpup((__be64 *)(out +
5569 				    cnts->offsets[i + offset]));
5570 free:
5571 	kvfree(out);
5572 	return ret;
5573 }
5574 
5575 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
5576 				struct rdma_hw_stats *stats,
5577 				u8 port_num, int index)
5578 {
5579 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5580 	const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
5581 	struct mlx5_core_dev *mdev;
5582 	int ret, num_counters;
5583 	u8 mdev_port_num;
5584 
5585 	if (!stats)
5586 		return -EINVAL;
5587 
5588 	num_counters = cnts->num_q_counters +
5589 		       cnts->num_cong_counters +
5590 		       cnts->num_ext_ppcnt_counters;
5591 
5592 	/* q_counters are per IB device, query the master mdev */
5593 	ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
5594 	if (ret)
5595 		return ret;
5596 
5597 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5598 		ret =  mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
5599 		if (ret)
5600 			return ret;
5601 	}
5602 
5603 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5604 		mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
5605 						    &mdev_port_num);
5606 		if (!mdev) {
5607 			/* If port is not affiliated yet, its in down state
5608 			 * which doesn't have any counters yet, so it would be
5609 			 * zero. So no need to read from the HCA.
5610 			 */
5611 			goto done;
5612 		}
5613 		ret = mlx5_lag_query_cong_counters(dev->mdev,
5614 						   stats->value +
5615 						   cnts->num_q_counters,
5616 						   cnts->num_cong_counters,
5617 						   cnts->offsets +
5618 						   cnts->num_q_counters);
5619 
5620 		mlx5_ib_put_native_port_mdev(dev, port_num);
5621 		if (ret)
5622 			return ret;
5623 	}
5624 
5625 done:
5626 	return num_counters;
5627 }
5628 
5629 static struct rdma_hw_stats *
5630 mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
5631 {
5632 	struct mlx5_ib_dev *dev = to_mdev(counter->device);
5633 	const struct mlx5_ib_counters *cnts =
5634 		get_counters(dev, counter->port - 1);
5635 
5636 	/* Q counters are in the beginning of all counters */
5637 	return rdma_alloc_hw_stats_struct(cnts->names,
5638 					  cnts->num_q_counters,
5639 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
5640 }
5641 
5642 static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
5643 {
5644 	struct mlx5_ib_dev *dev = to_mdev(counter->device);
5645 	const struct mlx5_ib_counters *cnts =
5646 		get_counters(dev, counter->port - 1);
5647 
5648 	return mlx5_ib_query_q_counters(dev->mdev, cnts,
5649 					counter->stats, counter->id);
5650 }
5651 
5652 static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
5653 				   struct ib_qp *qp)
5654 {
5655 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
5656 	u16 cnt_set_id = 0;
5657 	int err;
5658 
5659 	if (!counter->id) {
5660 		err = mlx5_cmd_alloc_q_counter(dev->mdev,
5661 					       &cnt_set_id,
5662 					       MLX5_SHARED_RESOURCE_UID);
5663 		if (err)
5664 			return err;
5665 		counter->id = cnt_set_id;
5666 	}
5667 
5668 	err = mlx5_ib_qp_set_counter(qp, counter);
5669 	if (err)
5670 		goto fail_set_counter;
5671 
5672 	return 0;
5673 
5674 fail_set_counter:
5675 	mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id);
5676 	counter->id = 0;
5677 
5678 	return err;
5679 }
5680 
5681 static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
5682 {
5683 	return mlx5_ib_qp_set_counter(qp, NULL);
5684 }
5685 
5686 static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
5687 {
5688 	struct mlx5_ib_dev *dev = to_mdev(counter->device);
5689 
5690 	return mlx5_core_dealloc_q_counter(dev->mdev, counter->id);
5691 }
5692 
5693 static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
5694 				 enum rdma_netdev_t type,
5695 				 struct rdma_netdev_alloc_params *params)
5696 {
5697 	if (type != RDMA_NETDEV_IPOIB)
5698 		return -EOPNOTSUPP;
5699 
5700 	return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
5701 }
5702 
5703 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
5704 {
5705 	if (!dev->delay_drop.dbg)
5706 		return;
5707 	debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
5708 	kfree(dev->delay_drop.dbg);
5709 	dev->delay_drop.dbg = NULL;
5710 }
5711 
5712 static void cancel_delay_drop(struct mlx5_ib_dev *dev)
5713 {
5714 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5715 		return;
5716 
5717 	cancel_work_sync(&dev->delay_drop.delay_drop_work);
5718 	delay_drop_debugfs_cleanup(dev);
5719 }
5720 
5721 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
5722 				       size_t count, loff_t *pos)
5723 {
5724 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5725 	char lbuf[20];
5726 	int len;
5727 
5728 	len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
5729 	return simple_read_from_buffer(buf, count, pos, lbuf, len);
5730 }
5731 
5732 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
5733 					size_t count, loff_t *pos)
5734 {
5735 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5736 	u32 timeout;
5737 	u32 var;
5738 
5739 	if (kstrtouint_from_user(buf, count, 0, &var))
5740 		return -EFAULT;
5741 
5742 	timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
5743 			1000);
5744 	if (timeout != var)
5745 		mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
5746 			    timeout);
5747 
5748 	delay_drop->timeout = timeout;
5749 
5750 	return count;
5751 }
5752 
5753 static const struct file_operations fops_delay_drop_timeout = {
5754 	.owner	= THIS_MODULE,
5755 	.open	= simple_open,
5756 	.write	= delay_drop_timeout_write,
5757 	.read	= delay_drop_timeout_read,
5758 };
5759 
5760 static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
5761 {
5762 	struct mlx5_ib_dbg_delay_drop *dbg;
5763 
5764 	if (!mlx5_debugfs_root)
5765 		return 0;
5766 
5767 	dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
5768 	if (!dbg)
5769 		return -ENOMEM;
5770 
5771 	dev->delay_drop.dbg = dbg;
5772 
5773 	dbg->dir_debugfs =
5774 		debugfs_create_dir("delay_drop",
5775 				   dev->mdev->priv.dbg_root);
5776 	if (!dbg->dir_debugfs)
5777 		goto out_debugfs;
5778 
5779 	dbg->events_cnt_debugfs =
5780 		debugfs_create_atomic_t("num_timeout_events", 0400,
5781 					dbg->dir_debugfs,
5782 					&dev->delay_drop.events_cnt);
5783 	if (!dbg->events_cnt_debugfs)
5784 		goto out_debugfs;
5785 
5786 	dbg->rqs_cnt_debugfs =
5787 		debugfs_create_atomic_t("num_rqs", 0400,
5788 					dbg->dir_debugfs,
5789 					&dev->delay_drop.rqs_cnt);
5790 	if (!dbg->rqs_cnt_debugfs)
5791 		goto out_debugfs;
5792 
5793 	dbg->timeout_debugfs =
5794 		debugfs_create_file("timeout", 0600,
5795 				    dbg->dir_debugfs,
5796 				    &dev->delay_drop,
5797 				    &fops_delay_drop_timeout);
5798 	if (!dbg->timeout_debugfs)
5799 		goto out_debugfs;
5800 
5801 	return 0;
5802 
5803 out_debugfs:
5804 	delay_drop_debugfs_cleanup(dev);
5805 	return -ENOMEM;
5806 }
5807 
5808 static void init_delay_drop(struct mlx5_ib_dev *dev)
5809 {
5810 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5811 		return;
5812 
5813 	mutex_init(&dev->delay_drop.lock);
5814 	dev->delay_drop.dev = dev;
5815 	dev->delay_drop.activate = false;
5816 	dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
5817 	INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
5818 	atomic_set(&dev->delay_drop.rqs_cnt, 0);
5819 	atomic_set(&dev->delay_drop.events_cnt, 0);
5820 
5821 	if (delay_drop_debugfs_init(dev))
5822 		mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
5823 }
5824 
5825 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
5826 				      struct mlx5_ib_multiport_info *mpi)
5827 {
5828 	u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5829 	struct mlx5_ib_port *port = &ibdev->port[port_num];
5830 	int comps;
5831 	int err;
5832 	int i;
5833 
5834 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
5835 
5836 	mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5837 
5838 	spin_lock(&port->mp.mpi_lock);
5839 	if (!mpi->ibdev) {
5840 		spin_unlock(&port->mp.mpi_lock);
5841 		return;
5842 	}
5843 
5844 	mpi->ibdev = NULL;
5845 
5846 	spin_unlock(&port->mp.mpi_lock);
5847 	if (mpi->mdev_events.notifier_call)
5848 		mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5849 	mpi->mdev_events.notifier_call = NULL;
5850 	mlx5_remove_netdev_notifier(ibdev, port_num);
5851 	spin_lock(&port->mp.mpi_lock);
5852 
5853 	comps = mpi->mdev_refcnt;
5854 	if (comps) {
5855 		mpi->unaffiliate = true;
5856 		init_completion(&mpi->unref_comp);
5857 		spin_unlock(&port->mp.mpi_lock);
5858 
5859 		for (i = 0; i < comps; i++)
5860 			wait_for_completion(&mpi->unref_comp);
5861 
5862 		spin_lock(&port->mp.mpi_lock);
5863 		mpi->unaffiliate = false;
5864 	}
5865 
5866 	port->mp.mpi = NULL;
5867 
5868 	list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5869 
5870 	spin_unlock(&port->mp.mpi_lock);
5871 
5872 	err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
5873 
5874 	mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
5875 	/* Log an error, still needed to cleanup the pointers and add
5876 	 * it back to the list.
5877 	 */
5878 	if (err)
5879 		mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
5880 			    port_num + 1);
5881 
5882 	ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
5883 }
5884 
5885 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
5886 				    struct mlx5_ib_multiport_info *mpi)
5887 {
5888 	u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5889 	int err;
5890 
5891 	lockdep_assert_held(&mlx5_ib_multiport_mutex);
5892 
5893 	spin_lock(&ibdev->port[port_num].mp.mpi_lock);
5894 	if (ibdev->port[port_num].mp.mpi) {
5895 		mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
5896 			    port_num + 1);
5897 		spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5898 		return false;
5899 	}
5900 
5901 	ibdev->port[port_num].mp.mpi = mpi;
5902 	mpi->ibdev = ibdev;
5903 	mpi->mdev_events.notifier_call = NULL;
5904 	spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5905 
5906 	err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
5907 	if (err)
5908 		goto unbind;
5909 
5910 	err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
5911 	if (err)
5912 		goto unbind;
5913 
5914 	err = mlx5_add_netdev_notifier(ibdev, port_num);
5915 	if (err) {
5916 		mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
5917 			    port_num + 1);
5918 		goto unbind;
5919 	}
5920 
5921 	mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
5922 	mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
5923 
5924 	mlx5_ib_init_cong_debugfs(ibdev, port_num);
5925 
5926 	return true;
5927 
5928 unbind:
5929 	mlx5_ib_unbind_slave_port(ibdev, mpi);
5930 	return false;
5931 }
5932 
5933 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
5934 {
5935 	int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5936 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5937 							  port_num + 1);
5938 	struct mlx5_ib_multiport_info *mpi;
5939 	int err;
5940 	int i;
5941 
5942 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5943 		return 0;
5944 
5945 	err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
5946 						     &dev->sys_image_guid);
5947 	if (err)
5948 		return err;
5949 
5950 	err = mlx5_nic_vport_enable_roce(dev->mdev);
5951 	if (err)
5952 		return err;
5953 
5954 	mutex_lock(&mlx5_ib_multiport_mutex);
5955 	for (i = 0; i < dev->num_ports; i++) {
5956 		bool bound = false;
5957 
5958 		/* build a stub multiport info struct for the native port. */
5959 		if (i == port_num) {
5960 			mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
5961 			if (!mpi) {
5962 				mutex_unlock(&mlx5_ib_multiport_mutex);
5963 				mlx5_nic_vport_disable_roce(dev->mdev);
5964 				return -ENOMEM;
5965 			}
5966 
5967 			mpi->is_master = true;
5968 			mpi->mdev = dev->mdev;
5969 			mpi->sys_image_guid = dev->sys_image_guid;
5970 			dev->port[i].mp.mpi = mpi;
5971 			mpi->ibdev = dev;
5972 			mpi = NULL;
5973 			continue;
5974 		}
5975 
5976 		list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
5977 				    list) {
5978 			if (dev->sys_image_guid == mpi->sys_image_guid &&
5979 			    (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
5980 				bound = mlx5_ib_bind_slave_port(dev, mpi);
5981 			}
5982 
5983 			if (bound) {
5984 				dev_dbg(mpi->mdev->device,
5985 					"removing port from unaffiliated list.\n");
5986 				mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
5987 				list_del(&mpi->list);
5988 				break;
5989 			}
5990 		}
5991 		if (!bound) {
5992 			get_port_caps(dev, i + 1);
5993 			mlx5_ib_dbg(dev, "no free port found for port %d\n",
5994 				    i + 1);
5995 		}
5996 	}
5997 
5998 	list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
5999 	mutex_unlock(&mlx5_ib_multiport_mutex);
6000 	return err;
6001 }
6002 
6003 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
6004 {
6005 	int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6006 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
6007 							  port_num + 1);
6008 	int i;
6009 
6010 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
6011 		return;
6012 
6013 	mutex_lock(&mlx5_ib_multiport_mutex);
6014 	for (i = 0; i < dev->num_ports; i++) {
6015 		if (dev->port[i].mp.mpi) {
6016 			/* Destroy the native port stub */
6017 			if (i == port_num) {
6018 				kfree(dev->port[i].mp.mpi);
6019 				dev->port[i].mp.mpi = NULL;
6020 			} else {
6021 				mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
6022 				mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
6023 			}
6024 		}
6025 	}
6026 
6027 	mlx5_ib_dbg(dev, "removing from devlist\n");
6028 	list_del(&dev->ib_dev_list);
6029 	mutex_unlock(&mlx5_ib_multiport_mutex);
6030 
6031 	mlx5_nic_vport_disable_roce(dev->mdev);
6032 }
6033 
6034 ADD_UVERBS_ATTRIBUTES_SIMPLE(
6035 	mlx5_ib_dm,
6036 	UVERBS_OBJECT_DM,
6037 	UVERBS_METHOD_DM_ALLOC,
6038 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
6039 			    UVERBS_ATTR_TYPE(u64),
6040 			    UA_MANDATORY),
6041 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
6042 			    UVERBS_ATTR_TYPE(u16),
6043 			    UA_OPTIONAL),
6044 	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
6045 			     enum mlx5_ib_uapi_dm_type,
6046 			     UA_OPTIONAL));
6047 
6048 ADD_UVERBS_ATTRIBUTES_SIMPLE(
6049 	mlx5_ib_flow_action,
6050 	UVERBS_OBJECT_FLOW_ACTION,
6051 	UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
6052 	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
6053 			     enum mlx5_ib_uapi_flow_action_flags));
6054 
6055 static const struct uapi_definition mlx5_ib_defs[] = {
6056 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
6057 	UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
6058 	UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
6059 #endif
6060 
6061 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
6062 				&mlx5_ib_flow_action),
6063 	UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
6064 	{}
6065 };
6066 
6067 static int mlx5_ib_read_counters(struct ib_counters *counters,
6068 				 struct ib_counters_read_attr *read_attr,
6069 				 struct uverbs_attr_bundle *attrs)
6070 {
6071 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
6072 	struct mlx5_read_counters_attr mread_attr = {};
6073 	struct mlx5_ib_flow_counters_desc *desc;
6074 	int ret, i;
6075 
6076 	mutex_lock(&mcounters->mcntrs_mutex);
6077 	if (mcounters->cntrs_max_index > read_attr->ncounters) {
6078 		ret = -EINVAL;
6079 		goto err_bound;
6080 	}
6081 
6082 	mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
6083 				 GFP_KERNEL);
6084 	if (!mread_attr.out) {
6085 		ret = -ENOMEM;
6086 		goto err_bound;
6087 	}
6088 
6089 	mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
6090 	mread_attr.flags = read_attr->flags;
6091 	ret = mcounters->read_counters(counters->device, &mread_attr);
6092 	if (ret)
6093 		goto err_read;
6094 
6095 	/* do the pass over the counters data array to assign according to the
6096 	 * descriptions and indexing pairs
6097 	 */
6098 	desc = mcounters->counters_data;
6099 	for (i = 0; i < mcounters->ncounters; i++)
6100 		read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
6101 
6102 err_read:
6103 	kfree(mread_attr.out);
6104 err_bound:
6105 	mutex_unlock(&mcounters->mcntrs_mutex);
6106 	return ret;
6107 }
6108 
6109 static int mlx5_ib_destroy_counters(struct ib_counters *counters)
6110 {
6111 	struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
6112 
6113 	counters_clear_description(counters);
6114 	if (mcounters->hw_cntrs_hndl)
6115 		mlx5_fc_destroy(to_mdev(counters->device)->mdev,
6116 				mcounters->hw_cntrs_hndl);
6117 
6118 	kfree(mcounters);
6119 
6120 	return 0;
6121 }
6122 
6123 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
6124 						   struct uverbs_attr_bundle *attrs)
6125 {
6126 	struct mlx5_ib_mcounters *mcounters;
6127 
6128 	mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
6129 	if (!mcounters)
6130 		return ERR_PTR(-ENOMEM);
6131 
6132 	mutex_init(&mcounters->mcntrs_mutex);
6133 
6134 	return &mcounters->ibcntrs;
6135 }
6136 
6137 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
6138 {
6139 	struct mlx5_core_dev *mdev = dev->mdev;
6140 
6141 	mlx5_ib_cleanup_multiport_master(dev);
6142 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
6143 		srcu_barrier(&dev->mr_srcu);
6144 		cleanup_srcu_struct(&dev->mr_srcu);
6145 	}
6146 
6147 	WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
6148 
6149 	WARN_ON(dev->dm.steering_sw_icm_alloc_blocks &&
6150 		!bitmap_empty(
6151 			dev->dm.steering_sw_icm_alloc_blocks,
6152 			BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) -
6153 			    MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
6154 
6155 	kfree(dev->dm.steering_sw_icm_alloc_blocks);
6156 
6157 	WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks &&
6158 		!bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks,
6159 			      BIT(MLX5_CAP_DEV_MEM(
6160 					  mdev, log_header_modify_sw_icm_size) -
6161 				  MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
6162 
6163 	kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
6164 }
6165 
6166 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
6167 {
6168 	struct mlx5_core_dev *mdev = dev->mdev;
6169 	u64 header_modify_icm_blocks = 0;
6170 	u64 steering_icm_blocks = 0;
6171 	int err;
6172 	int i;
6173 
6174 	for (i = 0; i < dev->num_ports; i++) {
6175 		spin_lock_init(&dev->port[i].mp.mpi_lock);
6176 		rwlock_init(&dev->port[i].roce.netdev_lock);
6177 		dev->port[i].roce.dev = dev;
6178 		dev->port[i].roce.native_port_num = i + 1;
6179 		dev->port[i].roce.last_port_state = IB_PORT_DOWN;
6180 	}
6181 
6182 	err = mlx5_ib_init_multiport_master(dev);
6183 	if (err)
6184 		return err;
6185 
6186 	err = set_has_smi_cap(dev);
6187 	if (err)
6188 		return err;
6189 
6190 	if (!mlx5_core_mp_enabled(mdev)) {
6191 		for (i = 1; i <= dev->num_ports; i++) {
6192 			err = get_port_caps(dev, i);
6193 			if (err)
6194 				break;
6195 		}
6196 	} else {
6197 		err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
6198 	}
6199 	if (err)
6200 		goto err_mp;
6201 
6202 	if (mlx5_use_mad_ifc(dev))
6203 		get_ext_port_caps(dev);
6204 
6205 	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
6206 	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
6207 	dev->ib_dev.phys_port_cnt	= dev->num_ports;
6208 	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
6209 	dev->ib_dev.dev.parent		= mdev->device;
6210 
6211 	mutex_init(&dev->cap_mask_mutex);
6212 	INIT_LIST_HEAD(&dev->qp_list);
6213 	spin_lock_init(&dev->reset_flow_resource_lock);
6214 
6215 	if (MLX5_CAP_GEN_64(mdev, general_obj_types) &
6216 	    MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) {
6217 		if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) {
6218 			steering_icm_blocks =
6219 				BIT(MLX5_CAP_DEV_MEM(mdev,
6220 						     log_steering_sw_icm_size) -
6221 				    MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
6222 
6223 			dev->dm.steering_sw_icm_alloc_blocks =
6224 				kcalloc(BITS_TO_LONGS(steering_icm_blocks),
6225 					sizeof(unsigned long), GFP_KERNEL);
6226 			if (!dev->dm.steering_sw_icm_alloc_blocks)
6227 				goto err_mp;
6228 		}
6229 
6230 		if (MLX5_CAP64_DEV_MEM(mdev,
6231 				       header_modify_sw_icm_start_address)) {
6232 			header_modify_icm_blocks = BIT(
6233 				MLX5_CAP_DEV_MEM(
6234 					mdev, log_header_modify_sw_icm_size) -
6235 				MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
6236 
6237 			dev->dm.header_modify_sw_icm_alloc_blocks =
6238 				kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
6239 					sizeof(unsigned long), GFP_KERNEL);
6240 			if (!dev->dm.header_modify_sw_icm_alloc_blocks)
6241 				goto err_dm;
6242 		}
6243 	}
6244 
6245 	spin_lock_init(&dev->dm.lock);
6246 	dev->dm.dev = mdev;
6247 
6248 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
6249 		err = init_srcu_struct(&dev->mr_srcu);
6250 		if (err)
6251 			goto err_dm;
6252 	}
6253 
6254 	return 0;
6255 
6256 err_dm:
6257 	kfree(dev->dm.steering_sw_icm_alloc_blocks);
6258 	kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
6259 
6260 err_mp:
6261 	mlx5_ib_cleanup_multiport_master(dev);
6262 
6263 	return -ENOMEM;
6264 }
6265 
6266 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
6267 {
6268 	dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
6269 
6270 	if (!dev->flow_db)
6271 		return -ENOMEM;
6272 
6273 	mutex_init(&dev->flow_db->lock);
6274 
6275 	return 0;
6276 }
6277 
6278 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
6279 {
6280 	kfree(dev->flow_db);
6281 }
6282 
6283 static const struct ib_device_ops mlx5_ib_dev_ops = {
6284 	.owner = THIS_MODULE,
6285 	.driver_id = RDMA_DRIVER_MLX5,
6286 	.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION,
6287 
6288 	.add_gid = mlx5_ib_add_gid,
6289 	.alloc_mr = mlx5_ib_alloc_mr,
6290 	.alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
6291 	.alloc_pd = mlx5_ib_alloc_pd,
6292 	.alloc_ucontext = mlx5_ib_alloc_ucontext,
6293 	.attach_mcast = mlx5_ib_mcg_attach,
6294 	.check_mr_status = mlx5_ib_check_mr_status,
6295 	.create_ah = mlx5_ib_create_ah,
6296 	.create_counters = mlx5_ib_create_counters,
6297 	.create_cq = mlx5_ib_create_cq,
6298 	.create_flow = mlx5_ib_create_flow,
6299 	.create_qp = mlx5_ib_create_qp,
6300 	.create_srq = mlx5_ib_create_srq,
6301 	.dealloc_pd = mlx5_ib_dealloc_pd,
6302 	.dealloc_ucontext = mlx5_ib_dealloc_ucontext,
6303 	.del_gid = mlx5_ib_del_gid,
6304 	.dereg_mr = mlx5_ib_dereg_mr,
6305 	.destroy_ah = mlx5_ib_destroy_ah,
6306 	.destroy_counters = mlx5_ib_destroy_counters,
6307 	.destroy_cq = mlx5_ib_destroy_cq,
6308 	.destroy_flow = mlx5_ib_destroy_flow,
6309 	.destroy_flow_action = mlx5_ib_destroy_flow_action,
6310 	.destroy_qp = mlx5_ib_destroy_qp,
6311 	.destroy_srq = mlx5_ib_destroy_srq,
6312 	.detach_mcast = mlx5_ib_mcg_detach,
6313 	.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
6314 	.drain_rq = mlx5_ib_drain_rq,
6315 	.drain_sq = mlx5_ib_drain_sq,
6316 	.get_dev_fw_str = get_dev_fw_str,
6317 	.get_dma_mr = mlx5_ib_get_dma_mr,
6318 	.get_link_layer = mlx5_ib_port_link_layer,
6319 	.map_mr_sg = mlx5_ib_map_mr_sg,
6320 	.map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
6321 	.mmap = mlx5_ib_mmap,
6322 	.modify_cq = mlx5_ib_modify_cq,
6323 	.modify_device = mlx5_ib_modify_device,
6324 	.modify_port = mlx5_ib_modify_port,
6325 	.modify_qp = mlx5_ib_modify_qp,
6326 	.modify_srq = mlx5_ib_modify_srq,
6327 	.poll_cq = mlx5_ib_poll_cq,
6328 	.post_recv = mlx5_ib_post_recv,
6329 	.post_send = mlx5_ib_post_send,
6330 	.post_srq_recv = mlx5_ib_post_srq_recv,
6331 	.process_mad = mlx5_ib_process_mad,
6332 	.query_ah = mlx5_ib_query_ah,
6333 	.query_device = mlx5_ib_query_device,
6334 	.query_gid = mlx5_ib_query_gid,
6335 	.query_pkey = mlx5_ib_query_pkey,
6336 	.query_qp = mlx5_ib_query_qp,
6337 	.query_srq = mlx5_ib_query_srq,
6338 	.read_counters = mlx5_ib_read_counters,
6339 	.reg_user_mr = mlx5_ib_reg_user_mr,
6340 	.req_notify_cq = mlx5_ib_arm_cq,
6341 	.rereg_user_mr = mlx5_ib_rereg_user_mr,
6342 	.resize_cq = mlx5_ib_resize_cq,
6343 
6344 	INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
6345 	INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
6346 	INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
6347 	INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
6348 	INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
6349 };
6350 
6351 static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
6352 	.create_flow_action_esp = mlx5_ib_create_flow_action_esp,
6353 	.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
6354 };
6355 
6356 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
6357 	.rdma_netdev_get_params = mlx5_ib_rn_get_params,
6358 };
6359 
6360 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
6361 	.get_vf_config = mlx5_ib_get_vf_config,
6362 	.get_vf_stats = mlx5_ib_get_vf_stats,
6363 	.set_vf_guid = mlx5_ib_set_vf_guid,
6364 	.set_vf_link_state = mlx5_ib_set_vf_link_state,
6365 };
6366 
6367 static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
6368 	.alloc_mw = mlx5_ib_alloc_mw,
6369 	.dealloc_mw = mlx5_ib_dealloc_mw,
6370 };
6371 
6372 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
6373 	.alloc_xrcd = mlx5_ib_alloc_xrcd,
6374 	.dealloc_xrcd = mlx5_ib_dealloc_xrcd,
6375 };
6376 
6377 static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
6378 	.alloc_dm = mlx5_ib_alloc_dm,
6379 	.dealloc_dm = mlx5_ib_dealloc_dm,
6380 	.reg_dm_mr = mlx5_ib_reg_dm_mr,
6381 };
6382 
6383 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
6384 {
6385 	struct mlx5_core_dev *mdev = dev->mdev;
6386 	int err;
6387 
6388 	dev->ib_dev.uverbs_cmd_mask	=
6389 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
6390 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
6391 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
6392 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
6393 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
6394 		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
6395 		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)		|
6396 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
6397 		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
6398 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
6399 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
6400 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
6401 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
6402 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
6403 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
6404 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
6405 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
6406 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
6407 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
6408 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
6409 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
6410 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
6411 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
6412 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
6413 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
6414 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
6415 	dev->ib_dev.uverbs_ex_cmd_mask =
6416 		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)	|
6417 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)	|
6418 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP)	|
6419 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP)	|
6420 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ)	|
6421 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW)	|
6422 		(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
6423 
6424 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
6425 	    IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
6426 		ib_set_device_ops(&dev->ib_dev,
6427 				  &mlx5_ib_dev_ipoib_enhanced_ops);
6428 
6429 	if (mlx5_core_is_pf(mdev))
6430 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
6431 
6432 	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
6433 
6434 	if (MLX5_CAP_GEN(mdev, imaicl)) {
6435 		dev->ib_dev.uverbs_cmd_mask |=
6436 			(1ull << IB_USER_VERBS_CMD_ALLOC_MW)	|
6437 			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
6438 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
6439 	}
6440 
6441 	if (MLX5_CAP_GEN(mdev, xrc)) {
6442 		dev->ib_dev.uverbs_cmd_mask |=
6443 			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
6444 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
6445 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
6446 	}
6447 
6448 	if (MLX5_CAP_DEV_MEM(mdev, memic) ||
6449 	    MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
6450 	    MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
6451 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
6452 
6453 	if (mlx5_accel_ipsec_device_caps(dev->mdev) &
6454 	    MLX5_ACCEL_IPSEC_CAP_DEVICE)
6455 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
6456 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
6457 
6458 	if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
6459 		dev->ib_dev.driver_def = mlx5_ib_defs;
6460 
6461 	err = init_node_data(dev);
6462 	if (err)
6463 		return err;
6464 
6465 	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
6466 	    (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
6467 	     MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
6468 		mutex_init(&dev->lb.mutex);
6469 
6470 	dev->ib_dev.use_cq_dim = true;
6471 
6472 	return 0;
6473 }
6474 
6475 static const struct ib_device_ops mlx5_ib_dev_port_ops = {
6476 	.get_port_immutable = mlx5_port_immutable,
6477 	.query_port = mlx5_ib_query_port,
6478 };
6479 
6480 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
6481 {
6482 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
6483 	return 0;
6484 }
6485 
6486 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
6487 	.get_port_immutable = mlx5_port_rep_immutable,
6488 	.query_port = mlx5_ib_rep_query_port,
6489 };
6490 
6491 static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
6492 {
6493 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
6494 	return 0;
6495 }
6496 
6497 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
6498 	.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
6499 	.create_wq = mlx5_ib_create_wq,
6500 	.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
6501 	.destroy_wq = mlx5_ib_destroy_wq,
6502 	.get_netdev = mlx5_ib_get_netdev,
6503 	.modify_wq = mlx5_ib_modify_wq,
6504 };
6505 
6506 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
6507 {
6508 	u8 port_num;
6509 
6510 	dev->ib_dev.uverbs_ex_cmd_mask |=
6511 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
6512 			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
6513 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
6514 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
6515 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
6516 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
6517 
6518 	port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6519 
6520 	/* Register only for native ports */
6521 	return mlx5_add_netdev_notifier(dev, port_num);
6522 }
6523 
6524 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
6525 {
6526 	u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6527 
6528 	mlx5_remove_netdev_notifier(dev, port_num);
6529 }
6530 
6531 static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
6532 {
6533 	struct mlx5_core_dev *mdev = dev->mdev;
6534 	enum rdma_link_layer ll;
6535 	int port_type_cap;
6536 	int err = 0;
6537 
6538 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6539 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6540 
6541 	if (ll == IB_LINK_LAYER_ETHERNET)
6542 		err = mlx5_ib_stage_common_roce_init(dev);
6543 
6544 	return err;
6545 }
6546 
6547 static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
6548 {
6549 	mlx5_ib_stage_common_roce_cleanup(dev);
6550 }
6551 
6552 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
6553 {
6554 	struct mlx5_core_dev *mdev = dev->mdev;
6555 	enum rdma_link_layer ll;
6556 	int port_type_cap;
6557 	int err;
6558 
6559 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6560 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6561 
6562 	if (ll == IB_LINK_LAYER_ETHERNET) {
6563 		err = mlx5_ib_stage_common_roce_init(dev);
6564 		if (err)
6565 			return err;
6566 
6567 		err = mlx5_enable_eth(dev);
6568 		if (err)
6569 			goto cleanup;
6570 	}
6571 
6572 	return 0;
6573 cleanup:
6574 	mlx5_ib_stage_common_roce_cleanup(dev);
6575 
6576 	return err;
6577 }
6578 
6579 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
6580 {
6581 	struct mlx5_core_dev *mdev = dev->mdev;
6582 	enum rdma_link_layer ll;
6583 	int port_type_cap;
6584 
6585 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6586 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6587 
6588 	if (ll == IB_LINK_LAYER_ETHERNET) {
6589 		mlx5_disable_eth(dev);
6590 		mlx5_ib_stage_common_roce_cleanup(dev);
6591 	}
6592 }
6593 
6594 static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
6595 {
6596 	return create_dev_resources(&dev->devr);
6597 }
6598 
6599 static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6600 {
6601 	destroy_dev_resources(&dev->devr);
6602 }
6603 
6604 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6605 {
6606 	mlx5_ib_internal_fill_odp_caps(dev);
6607 
6608 	return mlx5_ib_odp_init_one(dev);
6609 }
6610 
6611 static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
6612 {
6613 	mlx5_ib_odp_cleanup_one(dev);
6614 }
6615 
6616 static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
6617 	.alloc_hw_stats = mlx5_ib_alloc_hw_stats,
6618 	.get_hw_stats = mlx5_ib_get_hw_stats,
6619 	.counter_bind_qp = mlx5_ib_counter_bind_qp,
6620 	.counter_unbind_qp = mlx5_ib_counter_unbind_qp,
6621 	.counter_dealloc = mlx5_ib_counter_dealloc,
6622 	.counter_alloc_stats = mlx5_ib_counter_alloc_stats,
6623 	.counter_update_stats = mlx5_ib_counter_update_stats,
6624 };
6625 
6626 static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
6627 {
6628 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
6629 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
6630 
6631 		return mlx5_ib_alloc_counters(dev);
6632 	}
6633 
6634 	return 0;
6635 }
6636 
6637 static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
6638 {
6639 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
6640 		mlx5_ib_dealloc_counters(dev);
6641 }
6642 
6643 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
6644 {
6645 	mlx5_ib_init_cong_debugfs(dev,
6646 				  mlx5_core_native_port_num(dev->mdev) - 1);
6647 	return 0;
6648 }
6649 
6650 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
6651 {
6652 	mlx5_ib_cleanup_cong_debugfs(dev,
6653 				     mlx5_core_native_port_num(dev->mdev) - 1);
6654 }
6655 
6656 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
6657 {
6658 	dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
6659 	return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
6660 }
6661 
6662 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
6663 {
6664 	mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
6665 }
6666 
6667 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
6668 {
6669 	int err;
6670 
6671 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
6672 	if (err)
6673 		return err;
6674 
6675 	err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
6676 	if (err)
6677 		mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6678 
6679 	return err;
6680 }
6681 
6682 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
6683 {
6684 	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6685 	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
6686 }
6687 
6688 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
6689 {
6690 	const char *name;
6691 
6692 	rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
6693 	if (!mlx5_lag_is_roce(dev->mdev))
6694 		name = "mlx5_%d";
6695 	else
6696 		name = "mlx5_bond_%d";
6697 	return ib_register_device(&dev->ib_dev, name);
6698 }
6699 
6700 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
6701 {
6702 	destroy_umrc_res(dev);
6703 }
6704 
6705 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
6706 {
6707 	ib_unregister_device(&dev->ib_dev);
6708 }
6709 
6710 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
6711 {
6712 	return create_umr_res(dev);
6713 }
6714 
6715 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
6716 {
6717 	init_delay_drop(dev);
6718 
6719 	return 0;
6720 }
6721 
6722 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
6723 {
6724 	cancel_delay_drop(dev);
6725 }
6726 
6727 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
6728 {
6729 	dev->mdev_events.notifier_call = mlx5_ib_event;
6730 	mlx5_notifier_register(dev->mdev, &dev->mdev_events);
6731 	return 0;
6732 }
6733 
6734 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
6735 {
6736 	mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
6737 }
6738 
6739 static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
6740 {
6741 	int uid;
6742 
6743 	uid = mlx5_ib_devx_create(dev, false);
6744 	if (uid > 0) {
6745 		dev->devx_whitelist_uid = uid;
6746 		mlx5_ib_devx_init_event_table(dev);
6747 	}
6748 
6749 	return 0;
6750 }
6751 static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
6752 {
6753 	if (dev->devx_whitelist_uid) {
6754 		mlx5_ib_devx_cleanup_event_table(dev);
6755 		mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
6756 	}
6757 }
6758 
6759 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
6760 		      const struct mlx5_ib_profile *profile,
6761 		      int stage)
6762 {
6763 	/* Number of stages to cleanup */
6764 	while (stage) {
6765 		stage--;
6766 		if (profile->stage[stage].cleanup)
6767 			profile->stage[stage].cleanup(dev);
6768 	}
6769 
6770 	kfree(dev->port);
6771 	ib_dealloc_device(&dev->ib_dev);
6772 }
6773 
6774 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
6775 		    const struct mlx5_ib_profile *profile)
6776 {
6777 	int err;
6778 	int i;
6779 
6780 	for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
6781 		if (profile->stage[i].init) {
6782 			err = profile->stage[i].init(dev);
6783 			if (err)
6784 				goto err_out;
6785 		}
6786 	}
6787 
6788 	dev->profile = profile;
6789 	dev->ib_active = true;
6790 
6791 	return dev;
6792 
6793 err_out:
6794 	__mlx5_ib_remove(dev, profile, i);
6795 
6796 	return NULL;
6797 }
6798 
6799 static const struct mlx5_ib_profile pf_profile = {
6800 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
6801 		     mlx5_ib_stage_init_init,
6802 		     mlx5_ib_stage_init_cleanup),
6803 	STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6804 		     mlx5_ib_stage_flow_db_init,
6805 		     mlx5_ib_stage_flow_db_cleanup),
6806 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6807 		     mlx5_ib_stage_caps_init,
6808 		     NULL),
6809 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6810 		     mlx5_ib_stage_non_default_cb,
6811 		     NULL),
6812 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6813 		     mlx5_ib_stage_roce_init,
6814 		     mlx5_ib_stage_roce_cleanup),
6815 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6816 		     mlx5_init_srq_table,
6817 		     mlx5_cleanup_srq_table),
6818 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6819 		     mlx5_ib_stage_dev_res_init,
6820 		     mlx5_ib_stage_dev_res_cleanup),
6821 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6822 		     mlx5_ib_stage_dev_notifier_init,
6823 		     mlx5_ib_stage_dev_notifier_cleanup),
6824 	STAGE_CREATE(MLX5_IB_STAGE_ODP,
6825 		     mlx5_ib_stage_odp_init,
6826 		     mlx5_ib_stage_odp_cleanup),
6827 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6828 		     mlx5_ib_stage_counters_init,
6829 		     mlx5_ib_stage_counters_cleanup),
6830 	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
6831 		     mlx5_ib_stage_cong_debugfs_init,
6832 		     mlx5_ib_stage_cong_debugfs_cleanup),
6833 	STAGE_CREATE(MLX5_IB_STAGE_UAR,
6834 		     mlx5_ib_stage_uar_init,
6835 		     mlx5_ib_stage_uar_cleanup),
6836 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6837 		     mlx5_ib_stage_bfrag_init,
6838 		     mlx5_ib_stage_bfrag_cleanup),
6839 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6840 		     NULL,
6841 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6842 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6843 		     mlx5_ib_stage_devx_init,
6844 		     mlx5_ib_stage_devx_cleanup),
6845 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6846 		     mlx5_ib_stage_ib_reg_init,
6847 		     mlx5_ib_stage_ib_reg_cleanup),
6848 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6849 		     mlx5_ib_stage_post_ib_reg_umr_init,
6850 		     NULL),
6851 	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
6852 		     mlx5_ib_stage_delay_drop_init,
6853 		     mlx5_ib_stage_delay_drop_cleanup),
6854 };
6855 
6856 const struct mlx5_ib_profile uplink_rep_profile = {
6857 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
6858 		     mlx5_ib_stage_init_init,
6859 		     mlx5_ib_stage_init_cleanup),
6860 	STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6861 		     mlx5_ib_stage_flow_db_init,
6862 		     mlx5_ib_stage_flow_db_cleanup),
6863 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6864 		     mlx5_ib_stage_caps_init,
6865 		     NULL),
6866 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6867 		     mlx5_ib_stage_rep_non_default_cb,
6868 		     NULL),
6869 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6870 		     mlx5_ib_stage_rep_roce_init,
6871 		     mlx5_ib_stage_rep_roce_cleanup),
6872 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6873 		     mlx5_init_srq_table,
6874 		     mlx5_cleanup_srq_table),
6875 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6876 		     mlx5_ib_stage_dev_res_init,
6877 		     mlx5_ib_stage_dev_res_cleanup),
6878 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6879 		     mlx5_ib_stage_dev_notifier_init,
6880 		     mlx5_ib_stage_dev_notifier_cleanup),
6881 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6882 		     mlx5_ib_stage_counters_init,
6883 		     mlx5_ib_stage_counters_cleanup),
6884 	STAGE_CREATE(MLX5_IB_STAGE_UAR,
6885 		     mlx5_ib_stage_uar_init,
6886 		     mlx5_ib_stage_uar_cleanup),
6887 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6888 		     mlx5_ib_stage_bfrag_init,
6889 		     mlx5_ib_stage_bfrag_cleanup),
6890 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6891 		     NULL,
6892 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6893 	STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6894 		     mlx5_ib_stage_devx_init,
6895 		     mlx5_ib_stage_devx_cleanup),
6896 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6897 		     mlx5_ib_stage_ib_reg_init,
6898 		     mlx5_ib_stage_ib_reg_cleanup),
6899 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6900 		     mlx5_ib_stage_post_ib_reg_umr_init,
6901 		     NULL),
6902 };
6903 
6904 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
6905 {
6906 	struct mlx5_ib_multiport_info *mpi;
6907 	struct mlx5_ib_dev *dev;
6908 	bool bound = false;
6909 	int err;
6910 
6911 	mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
6912 	if (!mpi)
6913 		return NULL;
6914 
6915 	mpi->mdev = mdev;
6916 
6917 	err = mlx5_query_nic_vport_system_image_guid(mdev,
6918 						     &mpi->sys_image_guid);
6919 	if (err) {
6920 		kfree(mpi);
6921 		return NULL;
6922 	}
6923 
6924 	mutex_lock(&mlx5_ib_multiport_mutex);
6925 	list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
6926 		if (dev->sys_image_guid == mpi->sys_image_guid)
6927 			bound = mlx5_ib_bind_slave_port(dev, mpi);
6928 
6929 		if (bound) {
6930 			rdma_roce_rescan_device(&dev->ib_dev);
6931 			break;
6932 		}
6933 	}
6934 
6935 	if (!bound) {
6936 		list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
6937 		dev_dbg(mdev->device,
6938 			"no suitable IB device found to bind to, added to unaffiliated list.\n");
6939 	}
6940 	mutex_unlock(&mlx5_ib_multiport_mutex);
6941 
6942 	return mpi;
6943 }
6944 
6945 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6946 {
6947 	enum rdma_link_layer ll;
6948 	struct mlx5_ib_dev *dev;
6949 	int port_type_cap;
6950 	int num_ports;
6951 
6952 	printk_once(KERN_INFO "%s", mlx5_version);
6953 
6954 	if (MLX5_ESWITCH_MANAGER(mdev) &&
6955 	    mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
6956 		if (!mlx5_core_mp_enabled(mdev))
6957 			mlx5_ib_register_vport_reps(mdev);
6958 		return mdev;
6959 	}
6960 
6961 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6962 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6963 
6964 	if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
6965 		return mlx5_ib_add_slave_port(mdev);
6966 
6967 	num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6968 			MLX5_CAP_GEN(mdev, num_vhca_ports));
6969 	dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
6970 	if (!dev)
6971 		return NULL;
6972 	dev->port = kcalloc(num_ports, sizeof(*dev->port),
6973 			     GFP_KERNEL);
6974 	if (!dev->port) {
6975 		ib_dealloc_device(&dev->ib_dev);
6976 		return NULL;
6977 	}
6978 
6979 	dev->mdev = mdev;
6980 	dev->num_ports = num_ports;
6981 
6982 	return __mlx5_ib_add(dev, &pf_profile);
6983 }
6984 
6985 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
6986 {
6987 	struct mlx5_ib_multiport_info *mpi;
6988 	struct mlx5_ib_dev *dev;
6989 
6990 	if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
6991 		mlx5_ib_unregister_vport_reps(mdev);
6992 		return;
6993 	}
6994 
6995 	if (mlx5_core_is_mp_slave(mdev)) {
6996 		mpi = context;
6997 		mutex_lock(&mlx5_ib_multiport_mutex);
6998 		if (mpi->ibdev)
6999 			mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
7000 		list_del(&mpi->list);
7001 		mutex_unlock(&mlx5_ib_multiport_mutex);
7002 		return;
7003 	}
7004 
7005 	dev = context;
7006 	__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
7007 }
7008 
7009 static struct mlx5_interface mlx5_ib_interface = {
7010 	.add            = mlx5_ib_add,
7011 	.remove         = mlx5_ib_remove,
7012 	.protocol	= MLX5_INTERFACE_PROTOCOL_IB,
7013 };
7014 
7015 unsigned long mlx5_ib_get_xlt_emergency_page(void)
7016 {
7017 	mutex_lock(&xlt_emergency_page_mutex);
7018 	return xlt_emergency_page;
7019 }
7020 
7021 void mlx5_ib_put_xlt_emergency_page(void)
7022 {
7023 	mutex_unlock(&xlt_emergency_page_mutex);
7024 }
7025 
7026 static int __init mlx5_ib_init(void)
7027 {
7028 	int err;
7029 
7030 	xlt_emergency_page = __get_free_page(GFP_KERNEL);
7031 	if (!xlt_emergency_page)
7032 		return -ENOMEM;
7033 
7034 	mutex_init(&xlt_emergency_page_mutex);
7035 
7036 	mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
7037 	if (!mlx5_ib_event_wq) {
7038 		free_page(xlt_emergency_page);
7039 		return -ENOMEM;
7040 	}
7041 
7042 	mlx5_ib_odp_init();
7043 
7044 	err = mlx5_register_interface(&mlx5_ib_interface);
7045 
7046 	return err;
7047 }
7048 
7049 static void __exit mlx5_ib_cleanup(void)
7050 {
7051 	mlx5_unregister_interface(&mlx5_ib_interface);
7052 	destroy_workqueue(mlx5_ib_event_wq);
7053 	mutex_destroy(&xlt_emergency_page_mutex);
7054 	free_page(xlt_emergency_page);
7055 }
7056 
7057 module_init(mlx5_ib_init);
7058 module_exit(mlx5_ib_cleanup);
7059