xref: /linux/drivers/infiniband/hw/mlx5/main.c (revision 140eb5227767c6754742020a16d2691222b9c19b)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/errno.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #if defined(CONFIG_X86)
42 #include <asm/pat.h>
43 #endif
44 #include <linux/sched.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/task.h>
47 #include <linux/delay.h>
48 #include <rdma/ib_user_verbs.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_cache.h>
51 #include <linux/mlx5/port.h>
52 #include <linux/mlx5/vport.h>
53 #include <linux/list.h>
54 #include <rdma/ib_smi.h>
55 #include <rdma/ib_umem.h>
56 #include <linux/in.h>
57 #include <linux/etherdevice.h>
58 #include <linux/mlx5/fs.h>
59 #include <linux/mlx5/vport.h>
60 #include "mlx5_ib.h"
61 #include "cmd.h"
62 #include <linux/mlx5/vport.h>
63 
64 #define DRIVER_NAME "mlx5_ib"
65 #define DRIVER_VERSION "5.0-0"
66 
67 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
68 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
69 MODULE_LICENSE("Dual BSD/GPL");
70 
71 static char mlx5_version[] =
72 	DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
73 	DRIVER_VERSION "\n";
74 
75 enum {
76 	MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
77 };
78 
79 static enum rdma_link_layer
80 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
81 {
82 	switch (port_type_cap) {
83 	case MLX5_CAP_PORT_TYPE_IB:
84 		return IB_LINK_LAYER_INFINIBAND;
85 	case MLX5_CAP_PORT_TYPE_ETH:
86 		return IB_LINK_LAYER_ETHERNET;
87 	default:
88 		return IB_LINK_LAYER_UNSPECIFIED;
89 	}
90 }
91 
92 static enum rdma_link_layer
93 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
94 {
95 	struct mlx5_ib_dev *dev = to_mdev(device);
96 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
97 
98 	return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
99 }
100 
101 static int get_port_state(struct ib_device *ibdev,
102 			  u8 port_num,
103 			  enum ib_port_state *state)
104 {
105 	struct ib_port_attr attr;
106 	int ret;
107 
108 	memset(&attr, 0, sizeof(attr));
109 	ret = mlx5_ib_query_port(ibdev, port_num, &attr);
110 	if (!ret)
111 		*state = attr.state;
112 	return ret;
113 }
114 
115 static int mlx5_netdev_event(struct notifier_block *this,
116 			     unsigned long event, void *ptr)
117 {
118 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
119 	struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
120 						 roce.nb);
121 
122 	switch (event) {
123 	case NETDEV_REGISTER:
124 	case NETDEV_UNREGISTER:
125 		write_lock(&ibdev->roce.netdev_lock);
126 		if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
127 			ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
128 					     NULL : ndev;
129 		write_unlock(&ibdev->roce.netdev_lock);
130 		break;
131 
132 	case NETDEV_CHANGE:
133 	case NETDEV_UP:
134 	case NETDEV_DOWN: {
135 		struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
136 		struct net_device *upper = NULL;
137 
138 		if (lag_ndev) {
139 			upper = netdev_master_upper_dev_get(lag_ndev);
140 			dev_put(lag_ndev);
141 		}
142 
143 		if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
144 		    && ibdev->ib_active) {
145 			struct ib_event ibev = { };
146 			enum ib_port_state port_state;
147 
148 			if (get_port_state(&ibdev->ib_dev, 1, &port_state))
149 				return NOTIFY_DONE;
150 
151 			if (ibdev->roce.last_port_state == port_state)
152 				return NOTIFY_DONE;
153 
154 			ibdev->roce.last_port_state = port_state;
155 			ibev.device = &ibdev->ib_dev;
156 			if (port_state == IB_PORT_DOWN)
157 				ibev.event = IB_EVENT_PORT_ERR;
158 			else if (port_state == IB_PORT_ACTIVE)
159 				ibev.event = IB_EVENT_PORT_ACTIVE;
160 			else
161 				return NOTIFY_DONE;
162 
163 			ibev.element.port_num = 1;
164 			ib_dispatch_event(&ibev);
165 		}
166 		break;
167 	}
168 
169 	default:
170 		break;
171 	}
172 
173 	return NOTIFY_DONE;
174 }
175 
176 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
177 					     u8 port_num)
178 {
179 	struct mlx5_ib_dev *ibdev = to_mdev(device);
180 	struct net_device *ndev;
181 
182 	ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
183 	if (ndev)
184 		return ndev;
185 
186 	/* Ensure ndev does not disappear before we invoke dev_hold()
187 	 */
188 	read_lock(&ibdev->roce.netdev_lock);
189 	ndev = ibdev->roce.netdev;
190 	if (ndev)
191 		dev_hold(ndev);
192 	read_unlock(&ibdev->roce.netdev_lock);
193 
194 	return ndev;
195 }
196 
197 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
198 				    u8 *active_width)
199 {
200 	switch (eth_proto_oper) {
201 	case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
202 	case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
203 	case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
204 	case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
205 		*active_width = IB_WIDTH_1X;
206 		*active_speed = IB_SPEED_SDR;
207 		break;
208 	case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
209 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
210 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
211 	case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
212 	case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
213 	case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
214 	case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
215 		*active_width = IB_WIDTH_1X;
216 		*active_speed = IB_SPEED_QDR;
217 		break;
218 	case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
219 	case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
220 	case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
221 		*active_width = IB_WIDTH_1X;
222 		*active_speed = IB_SPEED_EDR;
223 		break;
224 	case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
225 	case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
226 	case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
227 	case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
228 		*active_width = IB_WIDTH_4X;
229 		*active_speed = IB_SPEED_QDR;
230 		break;
231 	case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
232 	case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
233 	case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
234 		*active_width = IB_WIDTH_1X;
235 		*active_speed = IB_SPEED_HDR;
236 		break;
237 	case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
238 		*active_width = IB_WIDTH_4X;
239 		*active_speed = IB_SPEED_FDR;
240 		break;
241 	case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
242 	case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
243 	case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
244 	case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
245 		*active_width = IB_WIDTH_4X;
246 		*active_speed = IB_SPEED_EDR;
247 		break;
248 	default:
249 		return -EINVAL;
250 	}
251 
252 	return 0;
253 }
254 
255 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
256 				struct ib_port_attr *props)
257 {
258 	struct mlx5_ib_dev *dev = to_mdev(device);
259 	struct mlx5_core_dev *mdev = dev->mdev;
260 	struct net_device *ndev, *upper;
261 	enum ib_mtu ndev_ib_mtu;
262 	u16 qkey_viol_cntr;
263 	u32 eth_prot_oper;
264 	int err;
265 
266 	/* Possible bad flows are checked before filling out props so in case
267 	 * of an error it will still be zeroed out.
268 	 */
269 	err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
270 	if (err)
271 		return err;
272 
273 	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
274 				 &props->active_width);
275 
276 	props->port_cap_flags  |= IB_PORT_CM_SUP;
277 	props->port_cap_flags  |= IB_PORT_IP_BASED_GIDS;
278 
279 	props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
280 						roce_address_table_size);
281 	props->max_mtu          = IB_MTU_4096;
282 	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
283 	props->pkey_tbl_len     = 1;
284 	props->state            = IB_PORT_DOWN;
285 	props->phys_state       = 3;
286 
287 	mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
288 	props->qkey_viol_cntr = qkey_viol_cntr;
289 
290 	ndev = mlx5_ib_get_netdev(device, port_num);
291 	if (!ndev)
292 		return 0;
293 
294 	if (mlx5_lag_is_active(dev->mdev)) {
295 		rcu_read_lock();
296 		upper = netdev_master_upper_dev_get_rcu(ndev);
297 		if (upper) {
298 			dev_put(ndev);
299 			ndev = upper;
300 			dev_hold(ndev);
301 		}
302 		rcu_read_unlock();
303 	}
304 
305 	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
306 		props->state      = IB_PORT_ACTIVE;
307 		props->phys_state = 5;
308 	}
309 
310 	ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
311 
312 	dev_put(ndev);
313 
314 	props->active_mtu	= min(props->max_mtu, ndev_ib_mtu);
315 	return 0;
316 }
317 
318 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
319 			 unsigned int index, const union ib_gid *gid,
320 			 const struct ib_gid_attr *attr)
321 {
322 	enum ib_gid_type gid_type = IB_GID_TYPE_IB;
323 	u8 roce_version = 0;
324 	u8 roce_l3_type = 0;
325 	bool vlan = false;
326 	u8 mac[ETH_ALEN];
327 	u16 vlan_id = 0;
328 
329 	if (gid) {
330 		gid_type = attr->gid_type;
331 		ether_addr_copy(mac, attr->ndev->dev_addr);
332 
333 		if (is_vlan_dev(attr->ndev)) {
334 			vlan = true;
335 			vlan_id = vlan_dev_vlan_id(attr->ndev);
336 		}
337 	}
338 
339 	switch (gid_type) {
340 	case IB_GID_TYPE_IB:
341 		roce_version = MLX5_ROCE_VERSION_1;
342 		break;
343 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
344 		roce_version = MLX5_ROCE_VERSION_2;
345 		if (ipv6_addr_v4mapped((void *)gid))
346 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
347 		else
348 			roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
349 		break;
350 
351 	default:
352 		mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
353 	}
354 
355 	return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
356 				      roce_l3_type, gid->raw, mac, vlan,
357 				      vlan_id);
358 }
359 
360 static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
361 			   unsigned int index, const union ib_gid *gid,
362 			   const struct ib_gid_attr *attr,
363 			   __always_unused void **context)
364 {
365 	return set_roce_addr(to_mdev(device), port_num, index, gid, attr);
366 }
367 
368 static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
369 			   unsigned int index, __always_unused void **context)
370 {
371 	return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL);
372 }
373 
374 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
375 			       int index)
376 {
377 	struct ib_gid_attr attr;
378 	union ib_gid gid;
379 
380 	if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
381 		return 0;
382 
383 	if (!attr.ndev)
384 		return 0;
385 
386 	dev_put(attr.ndev);
387 
388 	if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
389 		return 0;
390 
391 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
392 }
393 
394 int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
395 			   int index, enum ib_gid_type *gid_type)
396 {
397 	struct ib_gid_attr attr;
398 	union ib_gid gid;
399 	int ret;
400 
401 	ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
402 	if (ret)
403 		return ret;
404 
405 	if (!attr.ndev)
406 		return -ENODEV;
407 
408 	dev_put(attr.ndev);
409 
410 	*gid_type = attr.gid_type;
411 
412 	return 0;
413 }
414 
415 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
416 {
417 	if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
418 		return !MLX5_CAP_GEN(dev->mdev, ib_virt);
419 	return 0;
420 }
421 
422 enum {
423 	MLX5_VPORT_ACCESS_METHOD_MAD,
424 	MLX5_VPORT_ACCESS_METHOD_HCA,
425 	MLX5_VPORT_ACCESS_METHOD_NIC,
426 };
427 
428 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
429 {
430 	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
431 		return MLX5_VPORT_ACCESS_METHOD_MAD;
432 
433 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
434 	    IB_LINK_LAYER_ETHERNET)
435 		return MLX5_VPORT_ACCESS_METHOD_NIC;
436 
437 	return MLX5_VPORT_ACCESS_METHOD_HCA;
438 }
439 
440 static void get_atomic_caps(struct mlx5_ib_dev *dev,
441 			    struct ib_device_attr *props)
442 {
443 	u8 tmp;
444 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
445 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
446 	u8 atomic_req_8B_endianness_mode =
447 		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
448 
449 	/* Check if HW supports 8 bytes standard atomic operations and capable
450 	 * of host endianness respond
451 	 */
452 	tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
453 	if (((atomic_operations & tmp) == tmp) &&
454 	    (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
455 	    (atomic_req_8B_endianness_mode)) {
456 		props->atomic_cap = IB_ATOMIC_HCA;
457 	} else {
458 		props->atomic_cap = IB_ATOMIC_NONE;
459 	}
460 }
461 
462 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
463 					__be64 *sys_image_guid)
464 {
465 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
466 	struct mlx5_core_dev *mdev = dev->mdev;
467 	u64 tmp;
468 	int err;
469 
470 	switch (mlx5_get_vport_access_method(ibdev)) {
471 	case MLX5_VPORT_ACCESS_METHOD_MAD:
472 		return mlx5_query_mad_ifc_system_image_guid(ibdev,
473 							    sys_image_guid);
474 
475 	case MLX5_VPORT_ACCESS_METHOD_HCA:
476 		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
477 		break;
478 
479 	case MLX5_VPORT_ACCESS_METHOD_NIC:
480 		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
481 		break;
482 
483 	default:
484 		return -EINVAL;
485 	}
486 
487 	if (!err)
488 		*sys_image_guid = cpu_to_be64(tmp);
489 
490 	return err;
491 
492 }
493 
494 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
495 				u16 *max_pkeys)
496 {
497 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
498 	struct mlx5_core_dev *mdev = dev->mdev;
499 
500 	switch (mlx5_get_vport_access_method(ibdev)) {
501 	case MLX5_VPORT_ACCESS_METHOD_MAD:
502 		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
503 
504 	case MLX5_VPORT_ACCESS_METHOD_HCA:
505 	case MLX5_VPORT_ACCESS_METHOD_NIC:
506 		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
507 						pkey_table_size));
508 		return 0;
509 
510 	default:
511 		return -EINVAL;
512 	}
513 }
514 
515 static int mlx5_query_vendor_id(struct ib_device *ibdev,
516 				u32 *vendor_id)
517 {
518 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
519 
520 	switch (mlx5_get_vport_access_method(ibdev)) {
521 	case MLX5_VPORT_ACCESS_METHOD_MAD:
522 		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
523 
524 	case MLX5_VPORT_ACCESS_METHOD_HCA:
525 	case MLX5_VPORT_ACCESS_METHOD_NIC:
526 		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
527 
528 	default:
529 		return -EINVAL;
530 	}
531 }
532 
533 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
534 				__be64 *node_guid)
535 {
536 	u64 tmp;
537 	int err;
538 
539 	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
540 	case MLX5_VPORT_ACCESS_METHOD_MAD:
541 		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
542 
543 	case MLX5_VPORT_ACCESS_METHOD_HCA:
544 		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
545 		break;
546 
547 	case MLX5_VPORT_ACCESS_METHOD_NIC:
548 		err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
549 		break;
550 
551 	default:
552 		return -EINVAL;
553 	}
554 
555 	if (!err)
556 		*node_guid = cpu_to_be64(tmp);
557 
558 	return err;
559 }
560 
561 struct mlx5_reg_node_desc {
562 	u8	desc[IB_DEVICE_NODE_DESC_MAX];
563 };
564 
565 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
566 {
567 	struct mlx5_reg_node_desc in;
568 
569 	if (mlx5_use_mad_ifc(dev))
570 		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
571 
572 	memset(&in, 0, sizeof(in));
573 
574 	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
575 				    sizeof(struct mlx5_reg_node_desc),
576 				    MLX5_REG_NODE_DESC, 0, 0);
577 }
578 
579 static int mlx5_ib_query_device(struct ib_device *ibdev,
580 				struct ib_device_attr *props,
581 				struct ib_udata *uhw)
582 {
583 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
584 	struct mlx5_core_dev *mdev = dev->mdev;
585 	int err = -ENOMEM;
586 	int max_sq_desc;
587 	int max_rq_sg;
588 	int max_sq_sg;
589 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
590 	struct mlx5_ib_query_device_resp resp = {};
591 	size_t resp_len;
592 	u64 max_tso;
593 
594 	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
595 	if (uhw->outlen && uhw->outlen < resp_len)
596 		return -EINVAL;
597 	else
598 		resp.response_length = resp_len;
599 
600 	if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
601 		return -EINVAL;
602 
603 	memset(props, 0, sizeof(*props));
604 	err = mlx5_query_system_image_guid(ibdev,
605 					   &props->sys_image_guid);
606 	if (err)
607 		return err;
608 
609 	err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
610 	if (err)
611 		return err;
612 
613 	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
614 	if (err)
615 		return err;
616 
617 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
618 		(fw_rev_min(dev->mdev) << 16) |
619 		fw_rev_sub(dev->mdev);
620 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
621 		IB_DEVICE_PORT_ACTIVE_EVENT		|
622 		IB_DEVICE_SYS_IMAGE_GUID		|
623 		IB_DEVICE_RC_RNR_NAK_GEN;
624 
625 	if (MLX5_CAP_GEN(mdev, pkv))
626 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
627 	if (MLX5_CAP_GEN(mdev, qkv))
628 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
629 	if (MLX5_CAP_GEN(mdev, apm))
630 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
631 	if (MLX5_CAP_GEN(mdev, xrc))
632 		props->device_cap_flags |= IB_DEVICE_XRC;
633 	if (MLX5_CAP_GEN(mdev, imaicl)) {
634 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
635 					   IB_DEVICE_MEM_WINDOW_TYPE_2B;
636 		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
637 		/* We support 'Gappy' memory registration too */
638 		props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
639 	}
640 	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
641 	if (MLX5_CAP_GEN(mdev, sho)) {
642 		props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
643 		/* At this stage no support for signature handover */
644 		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
645 				      IB_PROT_T10DIF_TYPE_2 |
646 				      IB_PROT_T10DIF_TYPE_3;
647 		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
648 				       IB_GUARD_T10DIF_CSUM;
649 	}
650 	if (MLX5_CAP_GEN(mdev, block_lb_mc))
651 		props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
652 
653 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
654 		if (MLX5_CAP_ETH(mdev, csum_cap)) {
655 			/* Legacy bit to support old userspace libraries */
656 			props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
657 			props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
658 		}
659 
660 		if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
661 			props->raw_packet_caps |=
662 				IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
663 
664 		if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
665 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
666 			if (max_tso) {
667 				resp.tso_caps.max_tso = 1 << max_tso;
668 				resp.tso_caps.supported_qpts |=
669 					1 << IB_QPT_RAW_PACKET;
670 				resp.response_length += sizeof(resp.tso_caps);
671 			}
672 		}
673 
674 		if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
675 			resp.rss_caps.rx_hash_function =
676 						MLX5_RX_HASH_FUNC_TOEPLITZ;
677 			resp.rss_caps.rx_hash_fields_mask =
678 						MLX5_RX_HASH_SRC_IPV4 |
679 						MLX5_RX_HASH_DST_IPV4 |
680 						MLX5_RX_HASH_SRC_IPV6 |
681 						MLX5_RX_HASH_DST_IPV6 |
682 						MLX5_RX_HASH_SRC_PORT_TCP |
683 						MLX5_RX_HASH_DST_PORT_TCP |
684 						MLX5_RX_HASH_SRC_PORT_UDP |
685 						MLX5_RX_HASH_DST_PORT_UDP;
686 			resp.response_length += sizeof(resp.rss_caps);
687 		}
688 	} else {
689 		if (field_avail(typeof(resp), tso_caps, uhw->outlen))
690 			resp.response_length += sizeof(resp.tso_caps);
691 		if (field_avail(typeof(resp), rss_caps, uhw->outlen))
692 			resp.response_length += sizeof(resp.rss_caps);
693 	}
694 
695 	if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
696 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
697 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
698 	}
699 
700 	if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
701 	    MLX5_CAP_GEN(dev->mdev, general_notification_event))
702 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
703 
704 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
705 	    MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
706 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
707 
708 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
709 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
710 		/* Legacy bit to support old userspace libraries */
711 		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
712 		props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
713 	}
714 
715 	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
716 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
717 
718 	if (MLX5_CAP_GEN(mdev, end_pad))
719 		props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
720 
721 	props->vendor_part_id	   = mdev->pdev->device;
722 	props->hw_ver		   = mdev->pdev->revision;
723 
724 	props->max_mr_size	   = ~0ull;
725 	props->page_size_cap	   = ~(min_page_size - 1);
726 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
727 	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
728 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
729 		     sizeof(struct mlx5_wqe_data_seg);
730 	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
731 	max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
732 		     sizeof(struct mlx5_wqe_raddr_seg)) /
733 		sizeof(struct mlx5_wqe_data_seg);
734 	props->max_sge = min(max_rq_sg, max_sq_sg);
735 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
736 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
737 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
738 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
739 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
740 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
741 	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
742 	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
743 	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
744 	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
745 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
746 	props->max_srq_sge	   = max_rq_sg - 1;
747 	props->max_fast_reg_page_list_len =
748 		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
749 	get_atomic_caps(dev, props);
750 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
751 	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
752 	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
753 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
754 					   props->max_mcast_grp;
755 	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
756 	props->max_ah = INT_MAX;
757 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
758 	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
759 
760 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
761 	if (MLX5_CAP_GEN(mdev, pg))
762 		props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
763 	props->odp_caps = dev->odp_caps;
764 #endif
765 
766 	if (MLX5_CAP_GEN(mdev, cd))
767 		props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
768 
769 	if (!mlx5_core_is_pf(mdev))
770 		props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
771 
772 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
773 	    IB_LINK_LAYER_ETHERNET) {
774 		props->rss_caps.max_rwq_indirection_tables =
775 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
776 		props->rss_caps.max_rwq_indirection_table_size =
777 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
778 		props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
779 		props->max_wq_type_rq =
780 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
781 	}
782 
783 	if (MLX5_CAP_GEN(mdev, tag_matching)) {
784 		props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
785 		props->tm_caps.max_num_tags =
786 			(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
787 		props->tm_caps.flags = IB_TM_CAP_RC;
788 		props->tm_caps.max_ops =
789 			1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
790 		props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
791 	}
792 
793 	if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
794 		props->cq_caps.max_cq_moderation_count =
795 						MLX5_MAX_CQ_COUNT;
796 		props->cq_caps.max_cq_moderation_period =
797 						MLX5_MAX_CQ_PERIOD;
798 	}
799 
800 	if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
801 		resp.cqe_comp_caps.max_num =
802 			MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
803 			MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
804 		resp.cqe_comp_caps.supported_format =
805 			MLX5_IB_CQE_RES_FORMAT_HASH |
806 			MLX5_IB_CQE_RES_FORMAT_CSUM;
807 		resp.response_length += sizeof(resp.cqe_comp_caps);
808 	}
809 
810 	if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
811 		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
812 		    MLX5_CAP_GEN(mdev, qos)) {
813 			resp.packet_pacing_caps.qp_rate_limit_max =
814 				MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
815 			resp.packet_pacing_caps.qp_rate_limit_min =
816 				MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
817 			resp.packet_pacing_caps.supported_qpts |=
818 				1 << IB_QPT_RAW_PACKET;
819 		}
820 		resp.response_length += sizeof(resp.packet_pacing_caps);
821 	}
822 
823 	if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
824 			uhw->outlen)) {
825 		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
826 			resp.mlx5_ib_support_multi_pkt_send_wqes =
827 				MLX5_IB_ALLOW_MPW;
828 
829 		if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
830 			resp.mlx5_ib_support_multi_pkt_send_wqes |=
831 				MLX5_IB_SUPPORT_EMPW;
832 
833 		resp.response_length +=
834 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
835 	}
836 
837 	if (field_avail(typeof(resp), flags, uhw->outlen)) {
838 		resp.response_length += sizeof(resp.flags);
839 
840 		if (MLX5_CAP_GEN(mdev, cqe_compression_128))
841 			resp.flags |=
842 				MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
843 
844 		if (MLX5_CAP_GEN(mdev, cqe_128_always))
845 			resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
846 	}
847 
848 	if (field_avail(typeof(resp), sw_parsing_caps,
849 			uhw->outlen)) {
850 		resp.response_length += sizeof(resp.sw_parsing_caps);
851 		if (MLX5_CAP_ETH(mdev, swp)) {
852 			resp.sw_parsing_caps.sw_parsing_offloads |=
853 				MLX5_IB_SW_PARSING;
854 
855 			if (MLX5_CAP_ETH(mdev, swp_csum))
856 				resp.sw_parsing_caps.sw_parsing_offloads |=
857 					MLX5_IB_SW_PARSING_CSUM;
858 
859 			if (MLX5_CAP_ETH(mdev, swp_lso))
860 				resp.sw_parsing_caps.sw_parsing_offloads |=
861 					MLX5_IB_SW_PARSING_LSO;
862 
863 			if (resp.sw_parsing_caps.sw_parsing_offloads)
864 				resp.sw_parsing_caps.supported_qpts =
865 					BIT(IB_QPT_RAW_PACKET);
866 		}
867 	}
868 
869 	if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen)) {
870 		resp.response_length += sizeof(resp.striding_rq_caps);
871 		if (MLX5_CAP_GEN(mdev, striding_rq)) {
872 			resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
873 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
874 			resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
875 				MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
876 			resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
877 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
878 			resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
879 				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
880 			resp.striding_rq_caps.supported_qpts =
881 				BIT(IB_QPT_RAW_PACKET);
882 		}
883 	}
884 
885 	if (field_avail(typeof(resp), tunnel_offloads_caps,
886 			uhw->outlen)) {
887 		resp.response_length += sizeof(resp.tunnel_offloads_caps);
888 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
889 			resp.tunnel_offloads_caps |=
890 				MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
891 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
892 			resp.tunnel_offloads_caps |=
893 				MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
894 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
895 			resp.tunnel_offloads_caps |=
896 				MLX5_IB_TUNNELED_OFFLOADS_GRE;
897 	}
898 
899 	if (uhw->outlen) {
900 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
901 
902 		if (err)
903 			return err;
904 	}
905 
906 	return 0;
907 }
908 
909 enum mlx5_ib_width {
910 	MLX5_IB_WIDTH_1X	= 1 << 0,
911 	MLX5_IB_WIDTH_2X	= 1 << 1,
912 	MLX5_IB_WIDTH_4X	= 1 << 2,
913 	MLX5_IB_WIDTH_8X	= 1 << 3,
914 	MLX5_IB_WIDTH_12X	= 1 << 4
915 };
916 
917 static int translate_active_width(struct ib_device *ibdev, u8 active_width,
918 				  u8 *ib_width)
919 {
920 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
921 	int err = 0;
922 
923 	if (active_width & MLX5_IB_WIDTH_1X) {
924 		*ib_width = IB_WIDTH_1X;
925 	} else if (active_width & MLX5_IB_WIDTH_2X) {
926 		mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
927 			    (int)active_width);
928 		err = -EINVAL;
929 	} else if (active_width & MLX5_IB_WIDTH_4X) {
930 		*ib_width = IB_WIDTH_4X;
931 	} else if (active_width & MLX5_IB_WIDTH_8X) {
932 		*ib_width = IB_WIDTH_8X;
933 	} else if (active_width & MLX5_IB_WIDTH_12X) {
934 		*ib_width = IB_WIDTH_12X;
935 	} else {
936 		mlx5_ib_dbg(dev, "Invalid active_width %d\n",
937 			    (int)active_width);
938 		err = -EINVAL;
939 	}
940 
941 	return err;
942 }
943 
944 static int mlx5_mtu_to_ib_mtu(int mtu)
945 {
946 	switch (mtu) {
947 	case 256: return 1;
948 	case 512: return 2;
949 	case 1024: return 3;
950 	case 2048: return 4;
951 	case 4096: return 5;
952 	default:
953 		pr_warn("invalid mtu\n");
954 		return -1;
955 	}
956 }
957 
958 enum ib_max_vl_num {
959 	__IB_MAX_VL_0		= 1,
960 	__IB_MAX_VL_0_1		= 2,
961 	__IB_MAX_VL_0_3		= 3,
962 	__IB_MAX_VL_0_7		= 4,
963 	__IB_MAX_VL_0_14	= 5,
964 };
965 
966 enum mlx5_vl_hw_cap {
967 	MLX5_VL_HW_0	= 1,
968 	MLX5_VL_HW_0_1	= 2,
969 	MLX5_VL_HW_0_2	= 3,
970 	MLX5_VL_HW_0_3	= 4,
971 	MLX5_VL_HW_0_4	= 5,
972 	MLX5_VL_HW_0_5	= 6,
973 	MLX5_VL_HW_0_6	= 7,
974 	MLX5_VL_HW_0_7	= 8,
975 	MLX5_VL_HW_0_14	= 15
976 };
977 
978 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
979 				u8 *max_vl_num)
980 {
981 	switch (vl_hw_cap) {
982 	case MLX5_VL_HW_0:
983 		*max_vl_num = __IB_MAX_VL_0;
984 		break;
985 	case MLX5_VL_HW_0_1:
986 		*max_vl_num = __IB_MAX_VL_0_1;
987 		break;
988 	case MLX5_VL_HW_0_3:
989 		*max_vl_num = __IB_MAX_VL_0_3;
990 		break;
991 	case MLX5_VL_HW_0_7:
992 		*max_vl_num = __IB_MAX_VL_0_7;
993 		break;
994 	case MLX5_VL_HW_0_14:
995 		*max_vl_num = __IB_MAX_VL_0_14;
996 		break;
997 
998 	default:
999 		return -EINVAL;
1000 	}
1001 
1002 	return 0;
1003 }
1004 
1005 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1006 			       struct ib_port_attr *props)
1007 {
1008 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1009 	struct mlx5_core_dev *mdev = dev->mdev;
1010 	struct mlx5_hca_vport_context *rep;
1011 	u16 max_mtu;
1012 	u16 oper_mtu;
1013 	int err;
1014 	u8 ib_link_width_oper;
1015 	u8 vl_hw_cap;
1016 
1017 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1018 	if (!rep) {
1019 		err = -ENOMEM;
1020 		goto out;
1021 	}
1022 
1023 	/* props being zeroed by the caller, avoid zeroing it here */
1024 
1025 	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1026 	if (err)
1027 		goto out;
1028 
1029 	props->lid		= rep->lid;
1030 	props->lmc		= rep->lmc;
1031 	props->sm_lid		= rep->sm_lid;
1032 	props->sm_sl		= rep->sm_sl;
1033 	props->state		= rep->vport_state;
1034 	props->phys_state	= rep->port_physical_state;
1035 	props->port_cap_flags	= rep->cap_mask1;
1036 	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1037 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1038 	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1039 	props->bad_pkey_cntr	= rep->pkey_violation_counter;
1040 	props->qkey_viol_cntr	= rep->qkey_violation_counter;
1041 	props->subnet_timeout	= rep->subnet_timeout;
1042 	props->init_type_reply	= rep->init_type_reply;
1043 	props->grh_required	= rep->grh_required;
1044 
1045 	err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1046 	if (err)
1047 		goto out;
1048 
1049 	err = translate_active_width(ibdev, ib_link_width_oper,
1050 				     &props->active_width);
1051 	if (err)
1052 		goto out;
1053 	err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
1054 	if (err)
1055 		goto out;
1056 
1057 	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1058 
1059 	props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1060 
1061 	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1062 
1063 	props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1064 
1065 	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1066 	if (err)
1067 		goto out;
1068 
1069 	err = translate_max_vl_num(ibdev, vl_hw_cap,
1070 				   &props->max_vl_num);
1071 out:
1072 	kfree(rep);
1073 	return err;
1074 }
1075 
1076 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1077 		       struct ib_port_attr *props)
1078 {
1079 	unsigned int count;
1080 	int ret;
1081 
1082 	switch (mlx5_get_vport_access_method(ibdev)) {
1083 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1084 		ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1085 		break;
1086 
1087 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1088 		ret = mlx5_query_hca_port(ibdev, port, props);
1089 		break;
1090 
1091 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1092 		ret = mlx5_query_port_roce(ibdev, port, props);
1093 		break;
1094 
1095 	default:
1096 		ret = -EINVAL;
1097 	}
1098 
1099 	if (!ret && props) {
1100 		count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
1101 		props->gid_tbl_len -= count;
1102 	}
1103 	return ret;
1104 }
1105 
1106 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1107 			     union ib_gid *gid)
1108 {
1109 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1110 	struct mlx5_core_dev *mdev = dev->mdev;
1111 
1112 	switch (mlx5_get_vport_access_method(ibdev)) {
1113 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1114 		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1115 
1116 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1117 		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1118 
1119 	default:
1120 		return -EINVAL;
1121 	}
1122 
1123 }
1124 
1125 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1126 			      u16 *pkey)
1127 {
1128 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1129 	struct mlx5_core_dev *mdev = dev->mdev;
1130 
1131 	switch (mlx5_get_vport_access_method(ibdev)) {
1132 	case MLX5_VPORT_ACCESS_METHOD_MAD:
1133 		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1134 
1135 	case MLX5_VPORT_ACCESS_METHOD_HCA:
1136 	case MLX5_VPORT_ACCESS_METHOD_NIC:
1137 		return mlx5_query_hca_vport_pkey(mdev, 0, port,  0, index,
1138 						 pkey);
1139 	default:
1140 		return -EINVAL;
1141 	}
1142 }
1143 
1144 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1145 				 struct ib_device_modify *props)
1146 {
1147 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1148 	struct mlx5_reg_node_desc in;
1149 	struct mlx5_reg_node_desc out;
1150 	int err;
1151 
1152 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1153 		return -EOPNOTSUPP;
1154 
1155 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1156 		return 0;
1157 
1158 	/*
1159 	 * If possible, pass node desc to FW, so it can generate
1160 	 * a 144 trap.  If cmd fails, just ignore.
1161 	 */
1162 	memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1163 	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1164 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1165 	if (err)
1166 		return err;
1167 
1168 	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1169 
1170 	return err;
1171 }
1172 
1173 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1174 				u32 value)
1175 {
1176 	struct mlx5_hca_vport_context ctx = {};
1177 	int err;
1178 
1179 	err = mlx5_query_hca_vport_context(dev->mdev, 0,
1180 					   port_num, 0, &ctx);
1181 	if (err)
1182 		return err;
1183 
1184 	if (~ctx.cap_mask1_perm & mask) {
1185 		mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1186 			     mask, ctx.cap_mask1_perm);
1187 		return -EINVAL;
1188 	}
1189 
1190 	ctx.cap_mask1 = value;
1191 	ctx.cap_mask1_perm = mask;
1192 	err = mlx5_core_modify_hca_vport_context(dev->mdev, 0,
1193 						 port_num, 0, &ctx);
1194 
1195 	return err;
1196 }
1197 
1198 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1199 			       struct ib_port_modify *props)
1200 {
1201 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1202 	struct ib_port_attr attr;
1203 	u32 tmp;
1204 	int err;
1205 	u32 change_mask;
1206 	u32 value;
1207 	bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1208 		      IB_LINK_LAYER_INFINIBAND);
1209 
1210 	/* CM layer calls ib_modify_port() regardless of the link layer. For
1211 	 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1212 	 */
1213 	if (!is_ib)
1214 		return 0;
1215 
1216 	if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1217 		change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1218 		value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1219 		return set_port_caps_atomic(dev, port, change_mask, value);
1220 	}
1221 
1222 	mutex_lock(&dev->cap_mask_mutex);
1223 
1224 	err = ib_query_port(ibdev, port, &attr);
1225 	if (err)
1226 		goto out;
1227 
1228 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1229 		~props->clr_port_cap_mask;
1230 
1231 	err = mlx5_set_port_caps(dev->mdev, port, tmp);
1232 
1233 out:
1234 	mutex_unlock(&dev->cap_mask_mutex);
1235 	return err;
1236 }
1237 
1238 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1239 {
1240 	mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1241 		    caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1242 }
1243 
1244 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1245 			     struct mlx5_ib_alloc_ucontext_req_v2 *req,
1246 			     u32 *num_sys_pages)
1247 {
1248 	int uars_per_sys_page;
1249 	int bfregs_per_sys_page;
1250 	int ref_bfregs = req->total_num_bfregs;
1251 
1252 	if (req->total_num_bfregs == 0)
1253 		return -EINVAL;
1254 
1255 	BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1256 	BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1257 
1258 	if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1259 		return -ENOMEM;
1260 
1261 	uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1262 	bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1263 	req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1264 	*num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1265 
1266 	if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1267 		return -EINVAL;
1268 
1269 	mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, using %d sys pages\n",
1270 		    MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1271 		    lib_uar_4k ? "yes" : "no", ref_bfregs,
1272 		    req->total_num_bfregs, *num_sys_pages);
1273 
1274 	return 0;
1275 }
1276 
1277 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1278 {
1279 	struct mlx5_bfreg_info *bfregi;
1280 	int err;
1281 	int i;
1282 
1283 	bfregi = &context->bfregi;
1284 	for (i = 0; i < bfregi->num_sys_pages; i++) {
1285 		err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1286 		if (err)
1287 			goto error;
1288 
1289 		mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1290 	}
1291 	return 0;
1292 
1293 error:
1294 	for (--i; i >= 0; i--)
1295 		if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1296 			mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1297 
1298 	return err;
1299 }
1300 
1301 static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1302 {
1303 	struct mlx5_bfreg_info *bfregi;
1304 	int err;
1305 	int i;
1306 
1307 	bfregi = &context->bfregi;
1308 	for (i = 0; i < bfregi->num_sys_pages; i++) {
1309 		err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1310 		if (err) {
1311 			mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1312 			return err;
1313 		}
1314 	}
1315 	return 0;
1316 }
1317 
1318 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
1319 {
1320 	int err;
1321 
1322 	err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
1323 	if (err)
1324 		return err;
1325 
1326 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1327 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1328 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1329 		return err;
1330 
1331 	mutex_lock(&dev->lb_mutex);
1332 	dev->user_td++;
1333 
1334 	if (dev->user_td == 2)
1335 		err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1336 
1337 	mutex_unlock(&dev->lb_mutex);
1338 	return err;
1339 }
1340 
1341 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
1342 {
1343 	mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
1344 
1345 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1346 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1347 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1348 		return;
1349 
1350 	mutex_lock(&dev->lb_mutex);
1351 	dev->user_td--;
1352 
1353 	if (dev->user_td < 2)
1354 		mlx5_nic_vport_update_local_lb(dev->mdev, false);
1355 
1356 	mutex_unlock(&dev->lb_mutex);
1357 }
1358 
1359 static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1360 						  struct ib_udata *udata)
1361 {
1362 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
1363 	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1364 	struct mlx5_ib_alloc_ucontext_resp resp = {};
1365 	struct mlx5_ib_ucontext *context;
1366 	struct mlx5_bfreg_info *bfregi;
1367 	int ver;
1368 	int err;
1369 	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1370 				     max_cqe_version);
1371 	bool lib_uar_4k;
1372 
1373 	if (!dev->ib_active)
1374 		return ERR_PTR(-EAGAIN);
1375 
1376 	if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1377 		ver = 0;
1378 	else if (udata->inlen >= min_req_v2)
1379 		ver = 2;
1380 	else
1381 		return ERR_PTR(-EINVAL);
1382 
1383 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1384 	if (err)
1385 		return ERR_PTR(err);
1386 
1387 	if (req.flags)
1388 		return ERR_PTR(-EINVAL);
1389 
1390 	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1391 		return ERR_PTR(-EOPNOTSUPP);
1392 
1393 	req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1394 				    MLX5_NON_FP_BFREGS_PER_UAR);
1395 	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1396 		return ERR_PTR(-EINVAL);
1397 
1398 	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1399 	if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1400 		resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1401 	resp.cache_line_size = cache_line_size();
1402 	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1403 	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1404 	resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1405 	resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1406 	resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1407 	resp.cqe_version = min_t(__u8,
1408 				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1409 				 req.max_cqe_version);
1410 	resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1411 				MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1412 	resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1413 					MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1414 	resp.response_length = min(offsetof(typeof(resp), response_length) +
1415 				   sizeof(resp.response_length), udata->outlen);
1416 
1417 	context = kzalloc(sizeof(*context), GFP_KERNEL);
1418 	if (!context)
1419 		return ERR_PTR(-ENOMEM);
1420 
1421 	lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1422 	bfregi = &context->bfregi;
1423 
1424 	/* updates req->total_num_bfregs */
1425 	err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
1426 	if (err)
1427 		goto out_ctx;
1428 
1429 	mutex_init(&bfregi->lock);
1430 	bfregi->lib_uar_4k = lib_uar_4k;
1431 	bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
1432 				GFP_KERNEL);
1433 	if (!bfregi->count) {
1434 		err = -ENOMEM;
1435 		goto out_ctx;
1436 	}
1437 
1438 	bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1439 				    sizeof(*bfregi->sys_pages),
1440 				    GFP_KERNEL);
1441 	if (!bfregi->sys_pages) {
1442 		err = -ENOMEM;
1443 		goto out_count;
1444 	}
1445 
1446 	err = allocate_uars(dev, context);
1447 	if (err)
1448 		goto out_sys_pages;
1449 
1450 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1451 	context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1452 #endif
1453 
1454 	context->upd_xlt_page = __get_free_page(GFP_KERNEL);
1455 	if (!context->upd_xlt_page) {
1456 		err = -ENOMEM;
1457 		goto out_uars;
1458 	}
1459 	mutex_init(&context->upd_xlt_page_mutex);
1460 
1461 	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1462 		err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
1463 		if (err)
1464 			goto out_page;
1465 	}
1466 
1467 	INIT_LIST_HEAD(&context->vma_private_list);
1468 	mutex_init(&context->vma_private_list_mutex);
1469 	INIT_LIST_HEAD(&context->db_page_list);
1470 	mutex_init(&context->db_page_mutex);
1471 
1472 	resp.tot_bfregs = req.total_num_bfregs;
1473 	resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
1474 
1475 	if (field_avail(typeof(resp), cqe_version, udata->outlen))
1476 		resp.response_length += sizeof(resp.cqe_version);
1477 
1478 	if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1479 		resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1480 				      MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1481 		resp.response_length += sizeof(resp.cmds_supp_uhw);
1482 	}
1483 
1484 	if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1485 		if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1486 			mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1487 			resp.eth_min_inline++;
1488 		}
1489 		resp.response_length += sizeof(resp.eth_min_inline);
1490 	}
1491 
1492 	/*
1493 	 * We don't want to expose information from the PCI bar that is located
1494 	 * after 4096 bytes, so if the arch only supports larger pages, let's
1495 	 * pretend we don't support reading the HCA's core clock. This is also
1496 	 * forced by mmap function.
1497 	 */
1498 	if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1499 		if (PAGE_SIZE <= 4096) {
1500 			resp.comp_mask |=
1501 				MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1502 			resp.hca_core_clock_offset =
1503 				offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1504 		}
1505 		resp.response_length += sizeof(resp.hca_core_clock_offset) +
1506 					sizeof(resp.reserved2);
1507 	}
1508 
1509 	if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1510 		resp.response_length += sizeof(resp.log_uar_size);
1511 
1512 	if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1513 		resp.response_length += sizeof(resp.num_uars_per_page);
1514 
1515 	err = ib_copy_to_udata(udata, &resp, resp.response_length);
1516 	if (err)
1517 		goto out_td;
1518 
1519 	bfregi->ver = ver;
1520 	bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1521 	context->cqe_version = resp.cqe_version;
1522 	context->lib_caps = req.lib_caps;
1523 	print_lib_caps(dev, context->lib_caps);
1524 
1525 	return &context->ibucontext;
1526 
1527 out_td:
1528 	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1529 		mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1530 
1531 out_page:
1532 	free_page(context->upd_xlt_page);
1533 
1534 out_uars:
1535 	deallocate_uars(dev, context);
1536 
1537 out_sys_pages:
1538 	kfree(bfregi->sys_pages);
1539 
1540 out_count:
1541 	kfree(bfregi->count);
1542 
1543 out_ctx:
1544 	kfree(context);
1545 
1546 	return ERR_PTR(err);
1547 }
1548 
1549 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1550 {
1551 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1552 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1553 	struct mlx5_bfreg_info *bfregi;
1554 
1555 	bfregi = &context->bfregi;
1556 	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1557 		mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1558 
1559 	free_page(context->upd_xlt_page);
1560 	deallocate_uars(dev, context);
1561 	kfree(bfregi->sys_pages);
1562 	kfree(bfregi->count);
1563 	kfree(context);
1564 
1565 	return 0;
1566 }
1567 
1568 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
1569 				 struct mlx5_bfreg_info *bfregi,
1570 				 int idx)
1571 {
1572 	int fw_uars_per_page;
1573 
1574 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
1575 
1576 	return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
1577 			bfregi->sys_pages[idx] / fw_uars_per_page;
1578 }
1579 
1580 static int get_command(unsigned long offset)
1581 {
1582 	return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
1583 }
1584 
1585 static int get_arg(unsigned long offset)
1586 {
1587 	return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
1588 }
1589 
1590 static int get_index(unsigned long offset)
1591 {
1592 	return get_arg(offset);
1593 }
1594 
1595 static void  mlx5_ib_vma_open(struct vm_area_struct *area)
1596 {
1597 	/* vma_open is called when a new VMA is created on top of our VMA.  This
1598 	 * is done through either mremap flow or split_vma (usually due to
1599 	 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
1600 	 * as this VMA is strongly hardware related.  Therefore we set the
1601 	 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1602 	 * calling us again and trying to do incorrect actions.  We assume that
1603 	 * the original VMA size is exactly a single page, and therefore all
1604 	 * "splitting" operation will not happen to it.
1605 	 */
1606 	area->vm_ops = NULL;
1607 }
1608 
1609 static void  mlx5_ib_vma_close(struct vm_area_struct *area)
1610 {
1611 	struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
1612 
1613 	/* It's guaranteed that all VMAs opened on a FD are closed before the
1614 	 * file itself is closed, therefore no sync is needed with the regular
1615 	 * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
1616 	 * However need a sync with accessing the vma as part of
1617 	 * mlx5_ib_disassociate_ucontext.
1618 	 * The close operation is usually called under mm->mmap_sem except when
1619 	 * process is exiting.
1620 	 * The exiting case is handled explicitly as part of
1621 	 * mlx5_ib_disassociate_ucontext.
1622 	 */
1623 	mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
1624 
1625 	/* setting the vma context pointer to null in the mlx5_ib driver's
1626 	 * private data, to protect a race condition in
1627 	 * mlx5_ib_disassociate_ucontext().
1628 	 */
1629 	mlx5_ib_vma_priv_data->vma = NULL;
1630 	mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
1631 	list_del(&mlx5_ib_vma_priv_data->list);
1632 	mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
1633 	kfree(mlx5_ib_vma_priv_data);
1634 }
1635 
1636 static const struct vm_operations_struct mlx5_ib_vm_ops = {
1637 	.open = mlx5_ib_vma_open,
1638 	.close = mlx5_ib_vma_close
1639 };
1640 
1641 static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
1642 				struct mlx5_ib_ucontext *ctx)
1643 {
1644 	struct mlx5_ib_vma_private_data *vma_prv;
1645 	struct list_head *vma_head = &ctx->vma_private_list;
1646 
1647 	vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
1648 	if (!vma_prv)
1649 		return -ENOMEM;
1650 
1651 	vma_prv->vma = vma;
1652 	vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
1653 	vma->vm_private_data = vma_prv;
1654 	vma->vm_ops =  &mlx5_ib_vm_ops;
1655 
1656 	mutex_lock(&ctx->vma_private_list_mutex);
1657 	list_add(&vma_prv->list, vma_head);
1658 	mutex_unlock(&ctx->vma_private_list_mutex);
1659 
1660 	return 0;
1661 }
1662 
1663 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1664 {
1665 	int ret;
1666 	struct vm_area_struct *vma;
1667 	struct mlx5_ib_vma_private_data *vma_private, *n;
1668 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1669 	struct task_struct *owning_process  = NULL;
1670 	struct mm_struct   *owning_mm       = NULL;
1671 
1672 	owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1673 	if (!owning_process)
1674 		return;
1675 
1676 	owning_mm = get_task_mm(owning_process);
1677 	if (!owning_mm) {
1678 		pr_info("no mm, disassociate ucontext is pending task termination\n");
1679 		while (1) {
1680 			put_task_struct(owning_process);
1681 			usleep_range(1000, 2000);
1682 			owning_process = get_pid_task(ibcontext->tgid,
1683 						      PIDTYPE_PID);
1684 			if (!owning_process ||
1685 			    owning_process->state == TASK_DEAD) {
1686 				pr_info("disassociate ucontext done, task was terminated\n");
1687 				/* in case task was dead need to release the
1688 				 * task struct.
1689 				 */
1690 				if (owning_process)
1691 					put_task_struct(owning_process);
1692 				return;
1693 			}
1694 		}
1695 	}
1696 
1697 	/* need to protect from a race on closing the vma as part of
1698 	 * mlx5_ib_vma_close.
1699 	 */
1700 	down_write(&owning_mm->mmap_sem);
1701 	mutex_lock(&context->vma_private_list_mutex);
1702 	list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
1703 				 list) {
1704 		vma = vma_private->vma;
1705 		ret = zap_vma_ptes(vma, vma->vm_start,
1706 				   PAGE_SIZE);
1707 		WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
1708 		/* context going to be destroyed, should
1709 		 * not access ops any more.
1710 		 */
1711 		vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
1712 		vma->vm_ops = NULL;
1713 		list_del(&vma_private->list);
1714 		kfree(vma_private);
1715 	}
1716 	mutex_unlock(&context->vma_private_list_mutex);
1717 	up_write(&owning_mm->mmap_sem);
1718 	mmput(owning_mm);
1719 	put_task_struct(owning_process);
1720 }
1721 
1722 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1723 {
1724 	switch (cmd) {
1725 	case MLX5_IB_MMAP_WC_PAGE:
1726 		return "WC";
1727 	case MLX5_IB_MMAP_REGULAR_PAGE:
1728 		return "best effort WC";
1729 	case MLX5_IB_MMAP_NC_PAGE:
1730 		return "NC";
1731 	default:
1732 		return NULL;
1733 	}
1734 }
1735 
1736 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
1737 		    struct vm_area_struct *vma,
1738 		    struct mlx5_ib_ucontext *context)
1739 {
1740 	struct mlx5_bfreg_info *bfregi = &context->bfregi;
1741 	int err;
1742 	unsigned long idx;
1743 	phys_addr_t pfn, pa;
1744 	pgprot_t prot;
1745 	int uars_per_page;
1746 
1747 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1748 		return -EINVAL;
1749 
1750 	uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
1751 	idx = get_index(vma->vm_pgoff);
1752 	if (idx % uars_per_page ||
1753 	    idx * uars_per_page >= bfregi->num_sys_pages) {
1754 		mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
1755 		return -EINVAL;
1756 	}
1757 
1758 	switch (cmd) {
1759 	case MLX5_IB_MMAP_WC_PAGE:
1760 /* Some architectures don't support WC memory */
1761 #if defined(CONFIG_X86)
1762 		if (!pat_enabled())
1763 			return -EPERM;
1764 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
1765 			return -EPERM;
1766 #endif
1767 	/* fall through */
1768 	case MLX5_IB_MMAP_REGULAR_PAGE:
1769 		/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
1770 		prot = pgprot_writecombine(vma->vm_page_prot);
1771 		break;
1772 	case MLX5_IB_MMAP_NC_PAGE:
1773 		prot = pgprot_noncached(vma->vm_page_prot);
1774 		break;
1775 	default:
1776 		return -EINVAL;
1777 	}
1778 
1779 	pfn = uar_index2pfn(dev, bfregi, idx);
1780 	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
1781 
1782 	vma->vm_page_prot = prot;
1783 	err = io_remap_pfn_range(vma, vma->vm_start, pfn,
1784 				 PAGE_SIZE, vma->vm_page_prot);
1785 	if (err) {
1786 		mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
1787 			    err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
1788 		return -EAGAIN;
1789 	}
1790 
1791 	pa = pfn << PAGE_SHIFT;
1792 	mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
1793 		    vma->vm_start, &pa);
1794 
1795 	return mlx5_ib_set_vma_data(vma, context);
1796 }
1797 
1798 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
1799 {
1800 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1801 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1802 	unsigned long command;
1803 	phys_addr_t pfn;
1804 
1805 	command = get_command(vma->vm_pgoff);
1806 	switch (command) {
1807 	case MLX5_IB_MMAP_WC_PAGE:
1808 	case MLX5_IB_MMAP_NC_PAGE:
1809 	case MLX5_IB_MMAP_REGULAR_PAGE:
1810 		return uar_mmap(dev, command, vma, context);
1811 
1812 	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
1813 		return -ENOSYS;
1814 
1815 	case MLX5_IB_MMAP_CORE_CLOCK:
1816 		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1817 			return -EINVAL;
1818 
1819 		if (vma->vm_flags & VM_WRITE)
1820 			return -EPERM;
1821 
1822 		/* Don't expose to user-space information it shouldn't have */
1823 		if (PAGE_SIZE > 4096)
1824 			return -EOPNOTSUPP;
1825 
1826 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1827 		pfn = (dev->mdev->iseg_base +
1828 		       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
1829 			PAGE_SHIFT;
1830 		if (io_remap_pfn_range(vma, vma->vm_start, pfn,
1831 				       PAGE_SIZE, vma->vm_page_prot))
1832 			return -EAGAIN;
1833 
1834 		mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
1835 			    vma->vm_start,
1836 			    (unsigned long long)pfn << PAGE_SHIFT);
1837 		break;
1838 
1839 	default:
1840 		return -EINVAL;
1841 	}
1842 
1843 	return 0;
1844 }
1845 
1846 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
1847 				      struct ib_ucontext *context,
1848 				      struct ib_udata *udata)
1849 {
1850 	struct mlx5_ib_alloc_pd_resp resp;
1851 	struct mlx5_ib_pd *pd;
1852 	int err;
1853 
1854 	pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1855 	if (!pd)
1856 		return ERR_PTR(-ENOMEM);
1857 
1858 	err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
1859 	if (err) {
1860 		kfree(pd);
1861 		return ERR_PTR(err);
1862 	}
1863 
1864 	if (context) {
1865 		resp.pdn = pd->pdn;
1866 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1867 			mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
1868 			kfree(pd);
1869 			return ERR_PTR(-EFAULT);
1870 		}
1871 	}
1872 
1873 	return &pd->ibpd;
1874 }
1875 
1876 static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
1877 {
1878 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
1879 	struct mlx5_ib_pd *mpd = to_mpd(pd);
1880 
1881 	mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
1882 	kfree(mpd);
1883 
1884 	return 0;
1885 }
1886 
1887 enum {
1888 	MATCH_CRITERIA_ENABLE_OUTER_BIT,
1889 	MATCH_CRITERIA_ENABLE_MISC_BIT,
1890 	MATCH_CRITERIA_ENABLE_INNER_BIT
1891 };
1892 
1893 #define HEADER_IS_ZERO(match_criteria, headers)			           \
1894 	!(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1895 		    0, MLX5_FLD_SZ_BYTES(fte_match_param, headers)))       \
1896 
1897 static u8 get_match_criteria_enable(u32 *match_criteria)
1898 {
1899 	u8 match_criteria_enable;
1900 
1901 	match_criteria_enable =
1902 		(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1903 		MATCH_CRITERIA_ENABLE_OUTER_BIT;
1904 	match_criteria_enable |=
1905 		(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1906 		MATCH_CRITERIA_ENABLE_MISC_BIT;
1907 	match_criteria_enable |=
1908 		(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1909 		MATCH_CRITERIA_ENABLE_INNER_BIT;
1910 
1911 	return match_criteria_enable;
1912 }
1913 
1914 static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
1915 {
1916 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
1917 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
1918 }
1919 
1920 static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
1921 			   bool inner)
1922 {
1923 	if (inner) {
1924 		MLX5_SET(fte_match_set_misc,
1925 			 misc_c, inner_ipv6_flow_label, mask);
1926 		MLX5_SET(fte_match_set_misc,
1927 			 misc_v, inner_ipv6_flow_label, val);
1928 	} else {
1929 		MLX5_SET(fte_match_set_misc,
1930 			 misc_c, outer_ipv6_flow_label, mask);
1931 		MLX5_SET(fte_match_set_misc,
1932 			 misc_v, outer_ipv6_flow_label, val);
1933 	}
1934 }
1935 
1936 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
1937 {
1938 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
1939 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
1940 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
1941 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
1942 }
1943 
1944 #define LAST_ETH_FIELD vlan_tag
1945 #define LAST_IB_FIELD sl
1946 #define LAST_IPV4_FIELD tos
1947 #define LAST_IPV6_FIELD traffic_class
1948 #define LAST_TCP_UDP_FIELD src_port
1949 #define LAST_TUNNEL_FIELD tunnel_id
1950 #define LAST_FLOW_TAG_FIELD tag_id
1951 #define LAST_DROP_FIELD size
1952 
1953 /* Field is the last supported field */
1954 #define FIELDS_NOT_SUPPORTED(filter, field)\
1955 	memchr_inv((void *)&filter.field  +\
1956 		   sizeof(filter.field), 0,\
1957 		   sizeof(filter) -\
1958 		   offsetof(typeof(filter), field) -\
1959 		   sizeof(filter.field))
1960 
1961 #define IPV4_VERSION 4
1962 #define IPV6_VERSION 6
1963 static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
1964 			   u32 *match_v, const union ib_flow_spec *ib_spec,
1965 			   u32 *tag_id, bool *is_drop)
1966 {
1967 	void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
1968 					   misc_parameters);
1969 	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
1970 					   misc_parameters);
1971 	void *headers_c;
1972 	void *headers_v;
1973 	int match_ipv;
1974 
1975 	if (ib_spec->type & IB_FLOW_SPEC_INNER) {
1976 		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
1977 					 inner_headers);
1978 		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
1979 					 inner_headers);
1980 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1981 					ft_field_support.inner_ip_version);
1982 	} else {
1983 		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
1984 					 outer_headers);
1985 		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
1986 					 outer_headers);
1987 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1988 					ft_field_support.outer_ip_version);
1989 	}
1990 
1991 	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
1992 	case IB_FLOW_SPEC_ETH:
1993 		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1994 			return -EOPNOTSUPP;
1995 
1996 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1997 					     dmac_47_16),
1998 				ib_spec->eth.mask.dst_mac);
1999 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2000 					     dmac_47_16),
2001 				ib_spec->eth.val.dst_mac);
2002 
2003 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2004 					     smac_47_16),
2005 				ib_spec->eth.mask.src_mac);
2006 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2007 					     smac_47_16),
2008 				ib_spec->eth.val.src_mac);
2009 
2010 		if (ib_spec->eth.mask.vlan_tag) {
2011 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2012 				 cvlan_tag, 1);
2013 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2014 				 cvlan_tag, 1);
2015 
2016 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2017 				 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
2018 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2019 				 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2020 
2021 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2022 				 first_cfi,
2023 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
2024 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2025 				 first_cfi,
2026 				 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2027 
2028 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2029 				 first_prio,
2030 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
2031 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2032 				 first_prio,
2033 				 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2034 		}
2035 		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2036 			 ethertype, ntohs(ib_spec->eth.mask.ether_type));
2037 		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2038 			 ethertype, ntohs(ib_spec->eth.val.ether_type));
2039 		break;
2040 	case IB_FLOW_SPEC_IPV4:
2041 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
2042 			return -EOPNOTSUPP;
2043 
2044 		if (match_ipv) {
2045 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2046 				 ip_version, 0xf);
2047 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2048 				 ip_version, IPV4_VERSION);
2049 		} else {
2050 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2051 				 ethertype, 0xffff);
2052 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2053 				 ethertype, ETH_P_IP);
2054 		}
2055 
2056 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2057 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2058 		       &ib_spec->ipv4.mask.src_ip,
2059 		       sizeof(ib_spec->ipv4.mask.src_ip));
2060 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2061 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
2062 		       &ib_spec->ipv4.val.src_ip,
2063 		       sizeof(ib_spec->ipv4.val.src_ip));
2064 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2065 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2066 		       &ib_spec->ipv4.mask.dst_ip,
2067 		       sizeof(ib_spec->ipv4.mask.dst_ip));
2068 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2069 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2070 		       &ib_spec->ipv4.val.dst_ip,
2071 		       sizeof(ib_spec->ipv4.val.dst_ip));
2072 
2073 		set_tos(headers_c, headers_v,
2074 			ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2075 
2076 		set_proto(headers_c, headers_v,
2077 			  ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
2078 		break;
2079 	case IB_FLOW_SPEC_IPV6:
2080 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
2081 			return -EOPNOTSUPP;
2082 
2083 		if (match_ipv) {
2084 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2085 				 ip_version, 0xf);
2086 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2087 				 ip_version, IPV6_VERSION);
2088 		} else {
2089 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2090 				 ethertype, 0xffff);
2091 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2092 				 ethertype, ETH_P_IPV6);
2093 		}
2094 
2095 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2096 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2097 		       &ib_spec->ipv6.mask.src_ip,
2098 		       sizeof(ib_spec->ipv6.mask.src_ip));
2099 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2100 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
2101 		       &ib_spec->ipv6.val.src_ip,
2102 		       sizeof(ib_spec->ipv6.val.src_ip));
2103 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2104 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2105 		       &ib_spec->ipv6.mask.dst_ip,
2106 		       sizeof(ib_spec->ipv6.mask.dst_ip));
2107 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2108 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2109 		       &ib_spec->ipv6.val.dst_ip,
2110 		       sizeof(ib_spec->ipv6.val.dst_ip));
2111 
2112 		set_tos(headers_c, headers_v,
2113 			ib_spec->ipv6.mask.traffic_class,
2114 			ib_spec->ipv6.val.traffic_class);
2115 
2116 		set_proto(headers_c, headers_v,
2117 			  ib_spec->ipv6.mask.next_hdr,
2118 			  ib_spec->ipv6.val.next_hdr);
2119 
2120 		set_flow_label(misc_params_c, misc_params_v,
2121 			       ntohl(ib_spec->ipv6.mask.flow_label),
2122 			       ntohl(ib_spec->ipv6.val.flow_label),
2123 			       ib_spec->type & IB_FLOW_SPEC_INNER);
2124 
2125 		break;
2126 	case IB_FLOW_SPEC_TCP:
2127 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2128 					 LAST_TCP_UDP_FIELD))
2129 			return -EOPNOTSUPP;
2130 
2131 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2132 			 0xff);
2133 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2134 			 IPPROTO_TCP);
2135 
2136 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
2137 			 ntohs(ib_spec->tcp_udp.mask.src_port));
2138 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2139 			 ntohs(ib_spec->tcp_udp.val.src_port));
2140 
2141 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
2142 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
2143 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2144 			 ntohs(ib_spec->tcp_udp.val.dst_port));
2145 		break;
2146 	case IB_FLOW_SPEC_UDP:
2147 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2148 					 LAST_TCP_UDP_FIELD))
2149 			return -EOPNOTSUPP;
2150 
2151 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2152 			 0xff);
2153 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2154 			 IPPROTO_UDP);
2155 
2156 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
2157 			 ntohs(ib_spec->tcp_udp.mask.src_port));
2158 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2159 			 ntohs(ib_spec->tcp_udp.val.src_port));
2160 
2161 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
2162 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
2163 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2164 			 ntohs(ib_spec->tcp_udp.val.dst_port));
2165 		break;
2166 	case IB_FLOW_SPEC_VXLAN_TUNNEL:
2167 		if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
2168 					 LAST_TUNNEL_FIELD))
2169 			return -EOPNOTSUPP;
2170 
2171 		MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
2172 			 ntohl(ib_spec->tunnel.mask.tunnel_id));
2173 		MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
2174 			 ntohl(ib_spec->tunnel.val.tunnel_id));
2175 		break;
2176 	case IB_FLOW_SPEC_ACTION_TAG:
2177 		if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2178 					 LAST_FLOW_TAG_FIELD))
2179 			return -EOPNOTSUPP;
2180 		if (ib_spec->flow_tag.tag_id >= BIT(24))
2181 			return -EINVAL;
2182 
2183 		*tag_id = ib_spec->flow_tag.tag_id;
2184 		break;
2185 	case IB_FLOW_SPEC_ACTION_DROP:
2186 		if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2187 					 LAST_DROP_FIELD))
2188 			return -EOPNOTSUPP;
2189 		*is_drop = true;
2190 		break;
2191 	default:
2192 		return -EINVAL;
2193 	}
2194 
2195 	return 0;
2196 }
2197 
2198 /* If a flow could catch both multicast and unicast packets,
2199  * it won't fall into the multicast flow steering table and this rule
2200  * could steal other multicast packets.
2201  */
2202 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
2203 {
2204 	union ib_flow_spec *flow_spec;
2205 
2206 	if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
2207 	    ib_attr->num_of_specs < 1)
2208 		return false;
2209 
2210 	flow_spec = (union ib_flow_spec *)(ib_attr + 1);
2211 	if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
2212 		struct ib_flow_spec_ipv4 *ipv4_spec;
2213 
2214 		ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
2215 		if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
2216 			return true;
2217 
2218 		return false;
2219 	}
2220 
2221 	if (flow_spec->type == IB_FLOW_SPEC_ETH) {
2222 		struct ib_flow_spec_eth *eth_spec;
2223 
2224 		eth_spec = (struct ib_flow_spec_eth *)flow_spec;
2225 		return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
2226 		       is_multicast_ether_addr(eth_spec->val.dst_mac);
2227 	}
2228 
2229 	return false;
2230 }
2231 
2232 static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
2233 			       const struct ib_flow_attr *flow_attr,
2234 			       bool check_inner)
2235 {
2236 	union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
2237 	int match_ipv = check_inner ?
2238 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2239 					ft_field_support.inner_ip_version) :
2240 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2241 					ft_field_support.outer_ip_version);
2242 	int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
2243 	bool ipv4_spec_valid, ipv6_spec_valid;
2244 	unsigned int ip_spec_type = 0;
2245 	bool has_ethertype = false;
2246 	unsigned int spec_index;
2247 	bool mask_valid = true;
2248 	u16 eth_type = 0;
2249 	bool type_valid;
2250 
2251 	/* Validate that ethertype is correct */
2252 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
2253 		if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
2254 		    ib_spec->eth.mask.ether_type) {
2255 			mask_valid = (ib_spec->eth.mask.ether_type ==
2256 				      htons(0xffff));
2257 			has_ethertype = true;
2258 			eth_type = ntohs(ib_spec->eth.val.ether_type);
2259 		} else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
2260 			   (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
2261 			ip_spec_type = ib_spec->type;
2262 		}
2263 		ib_spec = (void *)ib_spec + ib_spec->size;
2264 	}
2265 
2266 	type_valid = (!has_ethertype) || (!ip_spec_type);
2267 	if (!type_valid && mask_valid) {
2268 		ipv4_spec_valid = (eth_type == ETH_P_IP) &&
2269 			(ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
2270 		ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
2271 			(ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
2272 
2273 		type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
2274 			     (((eth_type == ETH_P_MPLS_UC) ||
2275 			       (eth_type == ETH_P_MPLS_MC)) && match_ipv);
2276 	}
2277 
2278 	return type_valid;
2279 }
2280 
2281 static bool is_valid_attr(struct mlx5_core_dev *mdev,
2282 			  const struct ib_flow_attr *flow_attr)
2283 {
2284 	return is_valid_ethertype(mdev, flow_attr, false) &&
2285 	       is_valid_ethertype(mdev, flow_attr, true);
2286 }
2287 
2288 static void put_flow_table(struct mlx5_ib_dev *dev,
2289 			   struct mlx5_ib_flow_prio *prio, bool ft_added)
2290 {
2291 	prio->refcount -= !!ft_added;
2292 	if (!prio->refcount) {
2293 		mlx5_destroy_flow_table(prio->flow_table);
2294 		prio->flow_table = NULL;
2295 	}
2296 }
2297 
2298 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2299 {
2300 	struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
2301 	struct mlx5_ib_flow_handler *handler = container_of(flow_id,
2302 							  struct mlx5_ib_flow_handler,
2303 							  ibflow);
2304 	struct mlx5_ib_flow_handler *iter, *tmp;
2305 
2306 	mutex_lock(&dev->flow_db.lock);
2307 
2308 	list_for_each_entry_safe(iter, tmp, &handler->list, list) {
2309 		mlx5_del_flow_rules(iter->rule);
2310 		put_flow_table(dev, iter->prio, true);
2311 		list_del(&iter->list);
2312 		kfree(iter);
2313 	}
2314 
2315 	mlx5_del_flow_rules(handler->rule);
2316 	put_flow_table(dev, handler->prio, true);
2317 	mutex_unlock(&dev->flow_db.lock);
2318 
2319 	kfree(handler);
2320 
2321 	return 0;
2322 }
2323 
2324 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
2325 {
2326 	priority *= 2;
2327 	if (!dont_trap)
2328 		priority++;
2329 	return priority;
2330 }
2331 
2332 enum flow_table_type {
2333 	MLX5_IB_FT_RX,
2334 	MLX5_IB_FT_TX
2335 };
2336 
2337 #define MLX5_FS_MAX_TYPES	 6
2338 #define MLX5_FS_MAX_ENTRIES	 BIT(16)
2339 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
2340 						struct ib_flow_attr *flow_attr,
2341 						enum flow_table_type ft_type)
2342 {
2343 	bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
2344 	struct mlx5_flow_namespace *ns = NULL;
2345 	struct mlx5_ib_flow_prio *prio;
2346 	struct mlx5_flow_table *ft;
2347 	int max_table_size;
2348 	int num_entries;
2349 	int num_groups;
2350 	int priority;
2351 	int err = 0;
2352 
2353 	max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2354 						       log_max_ft_size));
2355 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
2356 		if (flow_is_multicast_only(flow_attr) &&
2357 		    !dont_trap)
2358 			priority = MLX5_IB_FLOW_MCAST_PRIO;
2359 		else
2360 			priority = ib_prio_to_core_prio(flow_attr->priority,
2361 							dont_trap);
2362 		ns = mlx5_get_flow_namespace(dev->mdev,
2363 					     MLX5_FLOW_NAMESPACE_BYPASS);
2364 		num_entries = MLX5_FS_MAX_ENTRIES;
2365 		num_groups = MLX5_FS_MAX_TYPES;
2366 		prio = &dev->flow_db.prios[priority];
2367 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2368 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2369 		ns = mlx5_get_flow_namespace(dev->mdev,
2370 					     MLX5_FLOW_NAMESPACE_LEFTOVERS);
2371 		build_leftovers_ft_param(&priority,
2372 					 &num_entries,
2373 					 &num_groups);
2374 		prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
2375 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2376 		if (!MLX5_CAP_FLOWTABLE(dev->mdev,
2377 					allow_sniffer_and_nic_rx_shared_tir))
2378 			return ERR_PTR(-ENOTSUPP);
2379 
2380 		ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
2381 					     MLX5_FLOW_NAMESPACE_SNIFFER_RX :
2382 					     MLX5_FLOW_NAMESPACE_SNIFFER_TX);
2383 
2384 		prio = &dev->flow_db.sniffer[ft_type];
2385 		priority = 0;
2386 		num_entries = 1;
2387 		num_groups = 1;
2388 	}
2389 
2390 	if (!ns)
2391 		return ERR_PTR(-ENOTSUPP);
2392 
2393 	if (num_entries > max_table_size)
2394 		return ERR_PTR(-ENOMEM);
2395 
2396 	ft = prio->flow_table;
2397 	if (!ft) {
2398 		ft = mlx5_create_auto_grouped_flow_table(ns, priority,
2399 							 num_entries,
2400 							 num_groups,
2401 							 0, 0);
2402 
2403 		if (!IS_ERR(ft)) {
2404 			prio->refcount = 0;
2405 			prio->flow_table = ft;
2406 		} else {
2407 			err = PTR_ERR(ft);
2408 		}
2409 	}
2410 
2411 	return err ? ERR_PTR(err) : prio;
2412 }
2413 
2414 static void set_underlay_qp(struct mlx5_ib_dev *dev,
2415 			    struct mlx5_flow_spec *spec,
2416 			    u32 underlay_qpn)
2417 {
2418 	void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
2419 					   spec->match_criteria,
2420 					   misc_parameters);
2421 	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2422 					   misc_parameters);
2423 
2424 	if (underlay_qpn &&
2425 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2426 				      ft_field_support.bth_dst_qp)) {
2427 		MLX5_SET(fte_match_set_misc,
2428 			 misc_params_v, bth_dst_qp, underlay_qpn);
2429 		MLX5_SET(fte_match_set_misc,
2430 			 misc_params_c, bth_dst_qp, 0xffffff);
2431 	}
2432 }
2433 
2434 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
2435 						      struct mlx5_ib_flow_prio *ft_prio,
2436 						      const struct ib_flow_attr *flow_attr,
2437 						      struct mlx5_flow_destination *dst,
2438 						      u32 underlay_qpn)
2439 {
2440 	struct mlx5_flow_table	*ft = ft_prio->flow_table;
2441 	struct mlx5_ib_flow_handler *handler;
2442 	struct mlx5_flow_act flow_act = {0};
2443 	struct mlx5_flow_spec *spec;
2444 	struct mlx5_flow_destination *rule_dst = dst;
2445 	const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
2446 	unsigned int spec_index;
2447 	u32 flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2448 	bool is_drop = false;
2449 	int err = 0;
2450 	int dest_num = 1;
2451 
2452 	if (!is_valid_attr(dev->mdev, flow_attr))
2453 		return ERR_PTR(-EINVAL);
2454 
2455 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2456 	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
2457 	if (!handler || !spec) {
2458 		err = -ENOMEM;
2459 		goto free;
2460 	}
2461 
2462 	INIT_LIST_HEAD(&handler->list);
2463 
2464 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
2465 		err = parse_flow_attr(dev->mdev, spec->match_criteria,
2466 				      spec->match_value,
2467 				      ib_flow, &flow_tag, &is_drop);
2468 		if (err < 0)
2469 			goto free;
2470 
2471 		ib_flow += ((union ib_flow_spec *)ib_flow)->size;
2472 	}
2473 
2474 	if (!flow_is_multicast_only(flow_attr))
2475 		set_underlay_qp(dev, spec, underlay_qpn);
2476 
2477 	spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
2478 	if (is_drop) {
2479 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
2480 		rule_dst = NULL;
2481 		dest_num = 0;
2482 	} else {
2483 		flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
2484 		    MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
2485 	}
2486 
2487 	if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG &&
2488 	    (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2489 	     flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
2490 		mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
2491 			     flow_tag, flow_attr->type);
2492 		err = -EINVAL;
2493 		goto free;
2494 	}
2495 	flow_act.flow_tag = flow_tag;
2496 	handler->rule = mlx5_add_flow_rules(ft, spec,
2497 					    &flow_act,
2498 					    rule_dst, dest_num);
2499 
2500 	if (IS_ERR(handler->rule)) {
2501 		err = PTR_ERR(handler->rule);
2502 		goto free;
2503 	}
2504 
2505 	ft_prio->refcount++;
2506 	handler->prio = ft_prio;
2507 
2508 	ft_prio->flow_table = ft;
2509 free:
2510 	if (err)
2511 		kfree(handler);
2512 	kvfree(spec);
2513 	return err ? ERR_PTR(err) : handler;
2514 }
2515 
2516 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
2517 						     struct mlx5_ib_flow_prio *ft_prio,
2518 						     const struct ib_flow_attr *flow_attr,
2519 						     struct mlx5_flow_destination *dst)
2520 {
2521 	return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0);
2522 }
2523 
2524 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
2525 							  struct mlx5_ib_flow_prio *ft_prio,
2526 							  struct ib_flow_attr *flow_attr,
2527 							  struct mlx5_flow_destination *dst)
2528 {
2529 	struct mlx5_ib_flow_handler *handler_dst = NULL;
2530 	struct mlx5_ib_flow_handler *handler = NULL;
2531 
2532 	handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
2533 	if (!IS_ERR(handler)) {
2534 		handler_dst = create_flow_rule(dev, ft_prio,
2535 					       flow_attr, dst);
2536 		if (IS_ERR(handler_dst)) {
2537 			mlx5_del_flow_rules(handler->rule);
2538 			ft_prio->refcount--;
2539 			kfree(handler);
2540 			handler = handler_dst;
2541 		} else {
2542 			list_add(&handler_dst->list, &handler->list);
2543 		}
2544 	}
2545 
2546 	return handler;
2547 }
2548 enum {
2549 	LEFTOVERS_MC,
2550 	LEFTOVERS_UC,
2551 };
2552 
2553 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
2554 							  struct mlx5_ib_flow_prio *ft_prio,
2555 							  struct ib_flow_attr *flow_attr,
2556 							  struct mlx5_flow_destination *dst)
2557 {
2558 	struct mlx5_ib_flow_handler *handler_ucast = NULL;
2559 	struct mlx5_ib_flow_handler *handler = NULL;
2560 
2561 	static struct {
2562 		struct ib_flow_attr	flow_attr;
2563 		struct ib_flow_spec_eth eth_flow;
2564 	} leftovers_specs[] = {
2565 		[LEFTOVERS_MC] = {
2566 			.flow_attr = {
2567 				.num_of_specs = 1,
2568 				.size = sizeof(leftovers_specs[0])
2569 			},
2570 			.eth_flow = {
2571 				.type = IB_FLOW_SPEC_ETH,
2572 				.size = sizeof(struct ib_flow_spec_eth),
2573 				.mask = {.dst_mac = {0x1} },
2574 				.val =  {.dst_mac = {0x1} }
2575 			}
2576 		},
2577 		[LEFTOVERS_UC] = {
2578 			.flow_attr = {
2579 				.num_of_specs = 1,
2580 				.size = sizeof(leftovers_specs[0])
2581 			},
2582 			.eth_flow = {
2583 				.type = IB_FLOW_SPEC_ETH,
2584 				.size = sizeof(struct ib_flow_spec_eth),
2585 				.mask = {.dst_mac = {0x1} },
2586 				.val = {.dst_mac = {} }
2587 			}
2588 		}
2589 	};
2590 
2591 	handler = create_flow_rule(dev, ft_prio,
2592 				   &leftovers_specs[LEFTOVERS_MC].flow_attr,
2593 				   dst);
2594 	if (!IS_ERR(handler) &&
2595 	    flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
2596 		handler_ucast = create_flow_rule(dev, ft_prio,
2597 						 &leftovers_specs[LEFTOVERS_UC].flow_attr,
2598 						 dst);
2599 		if (IS_ERR(handler_ucast)) {
2600 			mlx5_del_flow_rules(handler->rule);
2601 			ft_prio->refcount--;
2602 			kfree(handler);
2603 			handler = handler_ucast;
2604 		} else {
2605 			list_add(&handler_ucast->list, &handler->list);
2606 		}
2607 	}
2608 
2609 	return handler;
2610 }
2611 
2612 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
2613 							struct mlx5_ib_flow_prio *ft_rx,
2614 							struct mlx5_ib_flow_prio *ft_tx,
2615 							struct mlx5_flow_destination *dst)
2616 {
2617 	struct mlx5_ib_flow_handler *handler_rx;
2618 	struct mlx5_ib_flow_handler *handler_tx;
2619 	int err;
2620 	static const struct ib_flow_attr flow_attr  = {
2621 		.num_of_specs = 0,
2622 		.size = sizeof(flow_attr)
2623 	};
2624 
2625 	handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
2626 	if (IS_ERR(handler_rx)) {
2627 		err = PTR_ERR(handler_rx);
2628 		goto err;
2629 	}
2630 
2631 	handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
2632 	if (IS_ERR(handler_tx)) {
2633 		err = PTR_ERR(handler_tx);
2634 		goto err_tx;
2635 	}
2636 
2637 	list_add(&handler_tx->list, &handler_rx->list);
2638 
2639 	return handler_rx;
2640 
2641 err_tx:
2642 	mlx5_del_flow_rules(handler_rx->rule);
2643 	ft_rx->refcount--;
2644 	kfree(handler_rx);
2645 err:
2646 	return ERR_PTR(err);
2647 }
2648 
2649 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
2650 					   struct ib_flow_attr *flow_attr,
2651 					   int domain)
2652 {
2653 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
2654 	struct mlx5_ib_qp *mqp = to_mqp(qp);
2655 	struct mlx5_ib_flow_handler *handler = NULL;
2656 	struct mlx5_flow_destination *dst = NULL;
2657 	struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
2658 	struct mlx5_ib_flow_prio *ft_prio;
2659 	int err;
2660 	int underlay_qpn;
2661 
2662 	if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
2663 		return ERR_PTR(-ENOMEM);
2664 
2665 	if (domain != IB_FLOW_DOMAIN_USER ||
2666 	    flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
2667 	    (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
2668 		return ERR_PTR(-EINVAL);
2669 
2670 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
2671 	if (!dst)
2672 		return ERR_PTR(-ENOMEM);
2673 
2674 	mutex_lock(&dev->flow_db.lock);
2675 
2676 	ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
2677 	if (IS_ERR(ft_prio)) {
2678 		err = PTR_ERR(ft_prio);
2679 		goto unlock;
2680 	}
2681 	if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2682 		ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
2683 		if (IS_ERR(ft_prio_tx)) {
2684 			err = PTR_ERR(ft_prio_tx);
2685 			ft_prio_tx = NULL;
2686 			goto destroy_ft;
2687 		}
2688 	}
2689 
2690 	dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
2691 	if (mqp->flags & MLX5_IB_QP_RSS)
2692 		dst->tir_num = mqp->rss_qp.tirn;
2693 	else
2694 		dst->tir_num = mqp->raw_packet_qp.rq.tirn;
2695 
2696 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
2697 		if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
2698 			handler = create_dont_trap_rule(dev, ft_prio,
2699 							flow_attr, dst);
2700 		} else {
2701 			underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
2702 					mqp->underlay_qpn : 0;
2703 			handler = _create_flow_rule(dev, ft_prio, flow_attr,
2704 						    dst, underlay_qpn);
2705 		}
2706 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2707 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2708 		handler = create_leftovers_rule(dev, ft_prio, flow_attr,
2709 						dst);
2710 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2711 		handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
2712 	} else {
2713 		err = -EINVAL;
2714 		goto destroy_ft;
2715 	}
2716 
2717 	if (IS_ERR(handler)) {
2718 		err = PTR_ERR(handler);
2719 		handler = NULL;
2720 		goto destroy_ft;
2721 	}
2722 
2723 	mutex_unlock(&dev->flow_db.lock);
2724 	kfree(dst);
2725 
2726 	return &handler->ibflow;
2727 
2728 destroy_ft:
2729 	put_flow_table(dev, ft_prio, false);
2730 	if (ft_prio_tx)
2731 		put_flow_table(dev, ft_prio_tx, false);
2732 unlock:
2733 	mutex_unlock(&dev->flow_db.lock);
2734 	kfree(dst);
2735 	kfree(handler);
2736 	return ERR_PTR(err);
2737 }
2738 
2739 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2740 {
2741 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2742 	struct mlx5_ib_qp *mqp = to_mqp(ibqp);
2743 	int err;
2744 
2745 	if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
2746 		mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
2747 		return -EOPNOTSUPP;
2748 	}
2749 
2750 	err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
2751 	if (err)
2752 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2753 			     ibqp->qp_num, gid->raw);
2754 
2755 	return err;
2756 }
2757 
2758 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2759 {
2760 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2761 	int err;
2762 
2763 	err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
2764 	if (err)
2765 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2766 			     ibqp->qp_num, gid->raw);
2767 
2768 	return err;
2769 }
2770 
2771 static int init_node_data(struct mlx5_ib_dev *dev)
2772 {
2773 	int err;
2774 
2775 	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
2776 	if (err)
2777 		return err;
2778 
2779 	dev->mdev->rev_id = dev->mdev->pdev->revision;
2780 
2781 	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
2782 }
2783 
2784 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
2785 			     char *buf)
2786 {
2787 	struct mlx5_ib_dev *dev =
2788 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2789 
2790 	return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
2791 }
2792 
2793 static ssize_t show_reg_pages(struct device *device,
2794 			      struct device_attribute *attr, char *buf)
2795 {
2796 	struct mlx5_ib_dev *dev =
2797 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2798 
2799 	return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2800 }
2801 
2802 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2803 			char *buf)
2804 {
2805 	struct mlx5_ib_dev *dev =
2806 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2807 	return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
2808 }
2809 
2810 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2811 			char *buf)
2812 {
2813 	struct mlx5_ib_dev *dev =
2814 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2815 	return sprintf(buf, "%x\n", dev->mdev->rev_id);
2816 }
2817 
2818 static ssize_t show_board(struct device *device, struct device_attribute *attr,
2819 			  char *buf)
2820 {
2821 	struct mlx5_ib_dev *dev =
2822 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2823 	return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
2824 		       dev->mdev->board_id);
2825 }
2826 
2827 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
2828 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
2829 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
2830 static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
2831 static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
2832 
2833 static struct device_attribute *mlx5_class_attributes[] = {
2834 	&dev_attr_hw_rev,
2835 	&dev_attr_hca_type,
2836 	&dev_attr_board_id,
2837 	&dev_attr_fw_pages,
2838 	&dev_attr_reg_pages,
2839 };
2840 
2841 static void pkey_change_handler(struct work_struct *work)
2842 {
2843 	struct mlx5_ib_port_resources *ports =
2844 		container_of(work, struct mlx5_ib_port_resources,
2845 			     pkey_change_work);
2846 
2847 	mutex_lock(&ports->devr->mutex);
2848 	mlx5_ib_gsi_pkey_change(ports->gsi);
2849 	mutex_unlock(&ports->devr->mutex);
2850 }
2851 
2852 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2853 {
2854 	struct mlx5_ib_qp *mqp;
2855 	struct mlx5_ib_cq *send_mcq, *recv_mcq;
2856 	struct mlx5_core_cq *mcq;
2857 	struct list_head cq_armed_list;
2858 	unsigned long flags_qp;
2859 	unsigned long flags_cq;
2860 	unsigned long flags;
2861 
2862 	INIT_LIST_HEAD(&cq_armed_list);
2863 
2864 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2865 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2866 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2867 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2868 		if (mqp->sq.tail != mqp->sq.head) {
2869 			send_mcq = to_mcq(mqp->ibqp.send_cq);
2870 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
2871 			if (send_mcq->mcq.comp &&
2872 			    mqp->ibqp.send_cq->comp_handler) {
2873 				if (!send_mcq->mcq.reset_notify_added) {
2874 					send_mcq->mcq.reset_notify_added = 1;
2875 					list_add_tail(&send_mcq->mcq.reset_notify,
2876 						      &cq_armed_list);
2877 				}
2878 			}
2879 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2880 		}
2881 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2882 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2883 		/* no handling is needed for SRQ */
2884 		if (!mqp->ibqp.srq) {
2885 			if (mqp->rq.tail != mqp->rq.head) {
2886 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2887 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2888 				if (recv_mcq->mcq.comp &&
2889 				    mqp->ibqp.recv_cq->comp_handler) {
2890 					if (!recv_mcq->mcq.reset_notify_added) {
2891 						recv_mcq->mcq.reset_notify_added = 1;
2892 						list_add_tail(&recv_mcq->mcq.reset_notify,
2893 							      &cq_armed_list);
2894 					}
2895 				}
2896 				spin_unlock_irqrestore(&recv_mcq->lock,
2897 						       flags_cq);
2898 			}
2899 		}
2900 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2901 	}
2902 	/*At that point all inflight post send were put to be executed as of we
2903 	 * lock/unlock above locks Now need to arm all involved CQs.
2904 	 */
2905 	list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2906 		mcq->comp(mcq);
2907 	}
2908 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2909 }
2910 
2911 static void delay_drop_handler(struct work_struct *work)
2912 {
2913 	int err;
2914 	struct mlx5_ib_delay_drop *delay_drop =
2915 		container_of(work, struct mlx5_ib_delay_drop,
2916 			     delay_drop_work);
2917 
2918 	atomic_inc(&delay_drop->events_cnt);
2919 
2920 	mutex_lock(&delay_drop->lock);
2921 	err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
2922 				       delay_drop->timeout);
2923 	if (err) {
2924 		mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
2925 			     delay_drop->timeout);
2926 		delay_drop->activate = false;
2927 	}
2928 	mutex_unlock(&delay_drop->lock);
2929 }
2930 
2931 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
2932 			  enum mlx5_dev_event event, unsigned long param)
2933 {
2934 	struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
2935 	struct ib_event ibev;
2936 	bool fatal = false;
2937 	u8 port = 0;
2938 
2939 	switch (event) {
2940 	case MLX5_DEV_EVENT_SYS_ERROR:
2941 		ibev.event = IB_EVENT_DEVICE_FATAL;
2942 		mlx5_ib_handle_internal_error(ibdev);
2943 		fatal = true;
2944 		break;
2945 
2946 	case MLX5_DEV_EVENT_PORT_UP:
2947 	case MLX5_DEV_EVENT_PORT_DOWN:
2948 	case MLX5_DEV_EVENT_PORT_INITIALIZED:
2949 		port = (u8)param;
2950 
2951 		/* In RoCE, port up/down events are handled in
2952 		 * mlx5_netdev_event().
2953 		 */
2954 		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2955 			IB_LINK_LAYER_ETHERNET)
2956 			return;
2957 
2958 		ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
2959 			     IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2960 		break;
2961 
2962 	case MLX5_DEV_EVENT_LID_CHANGE:
2963 		ibev.event = IB_EVENT_LID_CHANGE;
2964 		port = (u8)param;
2965 		break;
2966 
2967 	case MLX5_DEV_EVENT_PKEY_CHANGE:
2968 		ibev.event = IB_EVENT_PKEY_CHANGE;
2969 		port = (u8)param;
2970 
2971 		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
2972 		break;
2973 
2974 	case MLX5_DEV_EVENT_GUID_CHANGE:
2975 		ibev.event = IB_EVENT_GID_CHANGE;
2976 		port = (u8)param;
2977 		break;
2978 
2979 	case MLX5_DEV_EVENT_CLIENT_REREG:
2980 		ibev.event = IB_EVENT_CLIENT_REREGISTER;
2981 		port = (u8)param;
2982 		break;
2983 	case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
2984 		schedule_work(&ibdev->delay_drop.delay_drop_work);
2985 		goto out;
2986 	default:
2987 		goto out;
2988 	}
2989 
2990 	ibev.device	      = &ibdev->ib_dev;
2991 	ibev.element.port_num = port;
2992 
2993 	if (port < 1 || port > ibdev->num_ports) {
2994 		mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
2995 		goto out;
2996 	}
2997 
2998 	if (ibdev->ib_active)
2999 		ib_dispatch_event(&ibev);
3000 
3001 	if (fatal)
3002 		ibdev->ib_active = false;
3003 
3004 out:
3005 	return;
3006 }
3007 
3008 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
3009 {
3010 	struct mlx5_hca_vport_context vport_ctx;
3011 	int err;
3012 	int port;
3013 
3014 	for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
3015 		dev->mdev->port_caps[port - 1].has_smi = false;
3016 		if (MLX5_CAP_GEN(dev->mdev, port_type) ==
3017 		    MLX5_CAP_PORT_TYPE_IB) {
3018 			if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
3019 				err = mlx5_query_hca_vport_context(dev->mdev, 0,
3020 								   port, 0,
3021 								   &vport_ctx);
3022 				if (err) {
3023 					mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
3024 						    port, err);
3025 					return err;
3026 				}
3027 				dev->mdev->port_caps[port - 1].has_smi =
3028 					vport_ctx.has_smi;
3029 			} else {
3030 				dev->mdev->port_caps[port - 1].has_smi = true;
3031 			}
3032 		}
3033 	}
3034 	return 0;
3035 }
3036 
3037 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
3038 {
3039 	int port;
3040 
3041 	for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
3042 		mlx5_query_ext_port_caps(dev, port);
3043 }
3044 
3045 static int get_port_caps(struct mlx5_ib_dev *dev)
3046 {
3047 	struct ib_device_attr *dprops = NULL;
3048 	struct ib_port_attr *pprops = NULL;
3049 	int err = -ENOMEM;
3050 	int port;
3051 	struct ib_udata uhw = {.inlen = 0, .outlen = 0};
3052 
3053 	pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
3054 	if (!pprops)
3055 		goto out;
3056 
3057 	dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
3058 	if (!dprops)
3059 		goto out;
3060 
3061 	err = set_has_smi_cap(dev);
3062 	if (err)
3063 		goto out;
3064 
3065 	err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
3066 	if (err) {
3067 		mlx5_ib_warn(dev, "query_device failed %d\n", err);
3068 		goto out;
3069 	}
3070 
3071 	for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
3072 		memset(pprops, 0, sizeof(*pprops));
3073 		err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
3074 		if (err) {
3075 			mlx5_ib_warn(dev, "query_port %d failed %d\n",
3076 				     port, err);
3077 			break;
3078 		}
3079 		dev->mdev->port_caps[port - 1].pkey_table_len =
3080 						dprops->max_pkeys;
3081 		dev->mdev->port_caps[port - 1].gid_table_len =
3082 						pprops->gid_tbl_len;
3083 		mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
3084 			    dprops->max_pkeys, pprops->gid_tbl_len);
3085 	}
3086 
3087 out:
3088 	kfree(pprops);
3089 	kfree(dprops);
3090 
3091 	return err;
3092 }
3093 
3094 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
3095 {
3096 	int err;
3097 
3098 	err = mlx5_mr_cache_cleanup(dev);
3099 	if (err)
3100 		mlx5_ib_warn(dev, "mr cache cleanup failed\n");
3101 
3102 	mlx5_ib_destroy_qp(dev->umrc.qp);
3103 	ib_free_cq(dev->umrc.cq);
3104 	ib_dealloc_pd(dev->umrc.pd);
3105 }
3106 
3107 enum {
3108 	MAX_UMR_WR = 128,
3109 };
3110 
3111 static int create_umr_res(struct mlx5_ib_dev *dev)
3112 {
3113 	struct ib_qp_init_attr *init_attr = NULL;
3114 	struct ib_qp_attr *attr = NULL;
3115 	struct ib_pd *pd;
3116 	struct ib_cq *cq;
3117 	struct ib_qp *qp;
3118 	int ret;
3119 
3120 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
3121 	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
3122 	if (!attr || !init_attr) {
3123 		ret = -ENOMEM;
3124 		goto error_0;
3125 	}
3126 
3127 	pd = ib_alloc_pd(&dev->ib_dev, 0);
3128 	if (IS_ERR(pd)) {
3129 		mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
3130 		ret = PTR_ERR(pd);
3131 		goto error_0;
3132 	}
3133 
3134 	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
3135 	if (IS_ERR(cq)) {
3136 		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
3137 		ret = PTR_ERR(cq);
3138 		goto error_2;
3139 	}
3140 
3141 	init_attr->send_cq = cq;
3142 	init_attr->recv_cq = cq;
3143 	init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
3144 	init_attr->cap.max_send_wr = MAX_UMR_WR;
3145 	init_attr->cap.max_send_sge = 1;
3146 	init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
3147 	init_attr->port_num = 1;
3148 	qp = mlx5_ib_create_qp(pd, init_attr, NULL);
3149 	if (IS_ERR(qp)) {
3150 		mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
3151 		ret = PTR_ERR(qp);
3152 		goto error_3;
3153 	}
3154 	qp->device     = &dev->ib_dev;
3155 	qp->real_qp    = qp;
3156 	qp->uobject    = NULL;
3157 	qp->qp_type    = MLX5_IB_QPT_REG_UMR;
3158 	qp->send_cq    = init_attr->send_cq;
3159 	qp->recv_cq    = init_attr->recv_cq;
3160 
3161 	attr->qp_state = IB_QPS_INIT;
3162 	attr->port_num = 1;
3163 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
3164 				IB_QP_PORT, NULL);
3165 	if (ret) {
3166 		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
3167 		goto error_4;
3168 	}
3169 
3170 	memset(attr, 0, sizeof(*attr));
3171 	attr->qp_state = IB_QPS_RTR;
3172 	attr->path_mtu = IB_MTU_256;
3173 
3174 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
3175 	if (ret) {
3176 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
3177 		goto error_4;
3178 	}
3179 
3180 	memset(attr, 0, sizeof(*attr));
3181 	attr->qp_state = IB_QPS_RTS;
3182 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
3183 	if (ret) {
3184 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
3185 		goto error_4;
3186 	}
3187 
3188 	dev->umrc.qp = qp;
3189 	dev->umrc.cq = cq;
3190 	dev->umrc.pd = pd;
3191 
3192 	sema_init(&dev->umrc.sem, MAX_UMR_WR);
3193 	ret = mlx5_mr_cache_init(dev);
3194 	if (ret) {
3195 		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
3196 		goto error_4;
3197 	}
3198 
3199 	kfree(attr);
3200 	kfree(init_attr);
3201 
3202 	return 0;
3203 
3204 error_4:
3205 	mlx5_ib_destroy_qp(qp);
3206 
3207 error_3:
3208 	ib_free_cq(cq);
3209 
3210 error_2:
3211 	ib_dealloc_pd(pd);
3212 
3213 error_0:
3214 	kfree(attr);
3215 	kfree(init_attr);
3216 	return ret;
3217 }
3218 
3219 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
3220 {
3221 	switch (umr_fence_cap) {
3222 	case MLX5_CAP_UMR_FENCE_NONE:
3223 		return MLX5_FENCE_MODE_NONE;
3224 	case MLX5_CAP_UMR_FENCE_SMALL:
3225 		return MLX5_FENCE_MODE_INITIATOR_SMALL;
3226 	default:
3227 		return MLX5_FENCE_MODE_STRONG_ORDERING;
3228 	}
3229 }
3230 
3231 static int create_dev_resources(struct mlx5_ib_resources *devr)
3232 {
3233 	struct ib_srq_init_attr attr;
3234 	struct mlx5_ib_dev *dev;
3235 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
3236 	int port;
3237 	int ret = 0;
3238 
3239 	dev = container_of(devr, struct mlx5_ib_dev, devr);
3240 
3241 	mutex_init(&devr->mutex);
3242 
3243 	devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
3244 	if (IS_ERR(devr->p0)) {
3245 		ret = PTR_ERR(devr->p0);
3246 		goto error0;
3247 	}
3248 	devr->p0->device  = &dev->ib_dev;
3249 	devr->p0->uobject = NULL;
3250 	atomic_set(&devr->p0->usecnt, 0);
3251 
3252 	devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
3253 	if (IS_ERR(devr->c0)) {
3254 		ret = PTR_ERR(devr->c0);
3255 		goto error1;
3256 	}
3257 	devr->c0->device        = &dev->ib_dev;
3258 	devr->c0->uobject       = NULL;
3259 	devr->c0->comp_handler  = NULL;
3260 	devr->c0->event_handler = NULL;
3261 	devr->c0->cq_context    = NULL;
3262 	atomic_set(&devr->c0->usecnt, 0);
3263 
3264 	devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
3265 	if (IS_ERR(devr->x0)) {
3266 		ret = PTR_ERR(devr->x0);
3267 		goto error2;
3268 	}
3269 	devr->x0->device = &dev->ib_dev;
3270 	devr->x0->inode = NULL;
3271 	atomic_set(&devr->x0->usecnt, 0);
3272 	mutex_init(&devr->x0->tgt_qp_mutex);
3273 	INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
3274 
3275 	devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
3276 	if (IS_ERR(devr->x1)) {
3277 		ret = PTR_ERR(devr->x1);
3278 		goto error3;
3279 	}
3280 	devr->x1->device = &dev->ib_dev;
3281 	devr->x1->inode = NULL;
3282 	atomic_set(&devr->x1->usecnt, 0);
3283 	mutex_init(&devr->x1->tgt_qp_mutex);
3284 	INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
3285 
3286 	memset(&attr, 0, sizeof(attr));
3287 	attr.attr.max_sge = 1;
3288 	attr.attr.max_wr = 1;
3289 	attr.srq_type = IB_SRQT_XRC;
3290 	attr.ext.cq = devr->c0;
3291 	attr.ext.xrc.xrcd = devr->x0;
3292 
3293 	devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
3294 	if (IS_ERR(devr->s0)) {
3295 		ret = PTR_ERR(devr->s0);
3296 		goto error4;
3297 	}
3298 	devr->s0->device	= &dev->ib_dev;
3299 	devr->s0->pd		= devr->p0;
3300 	devr->s0->uobject       = NULL;
3301 	devr->s0->event_handler = NULL;
3302 	devr->s0->srq_context   = NULL;
3303 	devr->s0->srq_type      = IB_SRQT_XRC;
3304 	devr->s0->ext.xrc.xrcd	= devr->x0;
3305 	devr->s0->ext.cq	= devr->c0;
3306 	atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
3307 	atomic_inc(&devr->s0->ext.cq->usecnt);
3308 	atomic_inc(&devr->p0->usecnt);
3309 	atomic_set(&devr->s0->usecnt, 0);
3310 
3311 	memset(&attr, 0, sizeof(attr));
3312 	attr.attr.max_sge = 1;
3313 	attr.attr.max_wr = 1;
3314 	attr.srq_type = IB_SRQT_BASIC;
3315 	devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
3316 	if (IS_ERR(devr->s1)) {
3317 		ret = PTR_ERR(devr->s1);
3318 		goto error5;
3319 	}
3320 	devr->s1->device	= &dev->ib_dev;
3321 	devr->s1->pd		= devr->p0;
3322 	devr->s1->uobject       = NULL;
3323 	devr->s1->event_handler = NULL;
3324 	devr->s1->srq_context   = NULL;
3325 	devr->s1->srq_type      = IB_SRQT_BASIC;
3326 	devr->s1->ext.cq	= devr->c0;
3327 	atomic_inc(&devr->p0->usecnt);
3328 	atomic_set(&devr->s1->usecnt, 0);
3329 
3330 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
3331 		INIT_WORK(&devr->ports[port].pkey_change_work,
3332 			  pkey_change_handler);
3333 		devr->ports[port].devr = devr;
3334 	}
3335 
3336 	return 0;
3337 
3338 error5:
3339 	mlx5_ib_destroy_srq(devr->s0);
3340 error4:
3341 	mlx5_ib_dealloc_xrcd(devr->x1);
3342 error3:
3343 	mlx5_ib_dealloc_xrcd(devr->x0);
3344 error2:
3345 	mlx5_ib_destroy_cq(devr->c0);
3346 error1:
3347 	mlx5_ib_dealloc_pd(devr->p0);
3348 error0:
3349 	return ret;
3350 }
3351 
3352 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
3353 {
3354 	struct mlx5_ib_dev *dev =
3355 		container_of(devr, struct mlx5_ib_dev, devr);
3356 	int port;
3357 
3358 	mlx5_ib_destroy_srq(devr->s1);
3359 	mlx5_ib_destroy_srq(devr->s0);
3360 	mlx5_ib_dealloc_xrcd(devr->x0);
3361 	mlx5_ib_dealloc_xrcd(devr->x1);
3362 	mlx5_ib_destroy_cq(devr->c0);
3363 	mlx5_ib_dealloc_pd(devr->p0);
3364 
3365 	/* Make sure no change P_Key work items are still executing */
3366 	for (port = 0; port < dev->num_ports; ++port)
3367 		cancel_work_sync(&devr->ports[port].pkey_change_work);
3368 }
3369 
3370 static u32 get_core_cap_flags(struct ib_device *ibdev)
3371 {
3372 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3373 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
3374 	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
3375 	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
3376 	u32 ret = 0;
3377 
3378 	if (ll == IB_LINK_LAYER_INFINIBAND)
3379 		return RDMA_CORE_PORT_IBA_IB;
3380 
3381 	ret = RDMA_CORE_PORT_RAW_PACKET;
3382 
3383 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
3384 		return ret;
3385 
3386 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
3387 		return ret;
3388 
3389 	if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
3390 		ret |= RDMA_CORE_PORT_IBA_ROCE;
3391 
3392 	if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
3393 		ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3394 
3395 	return ret;
3396 }
3397 
3398 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
3399 			       struct ib_port_immutable *immutable)
3400 {
3401 	struct ib_port_attr attr;
3402 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3403 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
3404 	int err;
3405 
3406 	immutable->core_cap_flags = get_core_cap_flags(ibdev);
3407 
3408 	err = ib_query_port(ibdev, port_num, &attr);
3409 	if (err)
3410 		return err;
3411 
3412 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
3413 	immutable->gid_tbl_len = attr.gid_tbl_len;
3414 	immutable->core_cap_flags = get_core_cap_flags(ibdev);
3415 	if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
3416 		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3417 
3418 	return 0;
3419 }
3420 
3421 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
3422 {
3423 	struct mlx5_ib_dev *dev =
3424 		container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3425 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
3426 		 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3427 		 fw_rev_sub(dev->mdev));
3428 }
3429 
3430 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
3431 {
3432 	struct mlx5_core_dev *mdev = dev->mdev;
3433 	struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
3434 								 MLX5_FLOW_NAMESPACE_LAG);
3435 	struct mlx5_flow_table *ft;
3436 	int err;
3437 
3438 	if (!ns || !mlx5_lag_is_active(mdev))
3439 		return 0;
3440 
3441 	err = mlx5_cmd_create_vport_lag(mdev);
3442 	if (err)
3443 		return err;
3444 
3445 	ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
3446 	if (IS_ERR(ft)) {
3447 		err = PTR_ERR(ft);
3448 		goto err_destroy_vport_lag;
3449 	}
3450 
3451 	dev->flow_db.lag_demux_ft = ft;
3452 	return 0;
3453 
3454 err_destroy_vport_lag:
3455 	mlx5_cmd_destroy_vport_lag(mdev);
3456 	return err;
3457 }
3458 
3459 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
3460 {
3461 	struct mlx5_core_dev *mdev = dev->mdev;
3462 
3463 	if (dev->flow_db.lag_demux_ft) {
3464 		mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
3465 		dev->flow_db.lag_demux_ft = NULL;
3466 
3467 		mlx5_cmd_destroy_vport_lag(mdev);
3468 	}
3469 }
3470 
3471 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
3472 {
3473 	int err;
3474 
3475 	dev->roce.nb.notifier_call = mlx5_netdev_event;
3476 	err = register_netdevice_notifier(&dev->roce.nb);
3477 	if (err) {
3478 		dev->roce.nb.notifier_call = NULL;
3479 		return err;
3480 	}
3481 
3482 	return 0;
3483 }
3484 
3485 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
3486 {
3487 	if (dev->roce.nb.notifier_call) {
3488 		unregister_netdevice_notifier(&dev->roce.nb);
3489 		dev->roce.nb.notifier_call = NULL;
3490 	}
3491 }
3492 
3493 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
3494 {
3495 	int err;
3496 
3497 	err = mlx5_add_netdev_notifier(dev);
3498 	if (err)
3499 		return err;
3500 
3501 	if (MLX5_CAP_GEN(dev->mdev, roce)) {
3502 		err = mlx5_nic_vport_enable_roce(dev->mdev);
3503 		if (err)
3504 			goto err_unregister_netdevice_notifier;
3505 	}
3506 
3507 	err = mlx5_eth_lag_init(dev);
3508 	if (err)
3509 		goto err_disable_roce;
3510 
3511 	return 0;
3512 
3513 err_disable_roce:
3514 	if (MLX5_CAP_GEN(dev->mdev, roce))
3515 		mlx5_nic_vport_disable_roce(dev->mdev);
3516 
3517 err_unregister_netdevice_notifier:
3518 	mlx5_remove_netdev_notifier(dev);
3519 	return err;
3520 }
3521 
3522 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3523 {
3524 	mlx5_eth_lag_cleanup(dev);
3525 	if (MLX5_CAP_GEN(dev->mdev, roce))
3526 		mlx5_nic_vport_disable_roce(dev->mdev);
3527 }
3528 
3529 struct mlx5_ib_counter {
3530 	const char *name;
3531 	size_t offset;
3532 };
3533 
3534 #define INIT_Q_COUNTER(_name)		\
3535 	{ .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
3536 
3537 static const struct mlx5_ib_counter basic_q_cnts[] = {
3538 	INIT_Q_COUNTER(rx_write_requests),
3539 	INIT_Q_COUNTER(rx_read_requests),
3540 	INIT_Q_COUNTER(rx_atomic_requests),
3541 	INIT_Q_COUNTER(out_of_buffer),
3542 };
3543 
3544 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
3545 	INIT_Q_COUNTER(out_of_sequence),
3546 };
3547 
3548 static const struct mlx5_ib_counter retrans_q_cnts[] = {
3549 	INIT_Q_COUNTER(duplicate_request),
3550 	INIT_Q_COUNTER(rnr_nak_retry_err),
3551 	INIT_Q_COUNTER(packet_seq_err),
3552 	INIT_Q_COUNTER(implied_nak_seq_err),
3553 	INIT_Q_COUNTER(local_ack_timeout_err),
3554 };
3555 
3556 #define INIT_CONG_COUNTER(_name)		\
3557 	{ .name = #_name, .offset =	\
3558 		MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
3559 
3560 static const struct mlx5_ib_counter cong_cnts[] = {
3561 	INIT_CONG_COUNTER(rp_cnp_ignored),
3562 	INIT_CONG_COUNTER(rp_cnp_handled),
3563 	INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
3564 	INIT_CONG_COUNTER(np_cnp_sent),
3565 };
3566 
3567 static const struct mlx5_ib_counter extended_err_cnts[] = {
3568 	INIT_Q_COUNTER(resp_local_length_error),
3569 	INIT_Q_COUNTER(resp_cqe_error),
3570 	INIT_Q_COUNTER(req_cqe_error),
3571 	INIT_Q_COUNTER(req_remote_invalid_request),
3572 	INIT_Q_COUNTER(req_remote_access_errors),
3573 	INIT_Q_COUNTER(resp_remote_access_errors),
3574 	INIT_Q_COUNTER(resp_cqe_flush_error),
3575 	INIT_Q_COUNTER(req_cqe_flush_error),
3576 };
3577 
3578 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
3579 {
3580 	unsigned int i;
3581 
3582 	for (i = 0; i < dev->num_ports; i++) {
3583 		mlx5_core_dealloc_q_counter(dev->mdev,
3584 					    dev->port[i].cnts.set_id);
3585 		kfree(dev->port[i].cnts.names);
3586 		kfree(dev->port[i].cnts.offsets);
3587 	}
3588 }
3589 
3590 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
3591 				    struct mlx5_ib_counters *cnts)
3592 {
3593 	u32 num_counters;
3594 
3595 	num_counters = ARRAY_SIZE(basic_q_cnts);
3596 
3597 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
3598 		num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
3599 
3600 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
3601 		num_counters += ARRAY_SIZE(retrans_q_cnts);
3602 
3603 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
3604 		num_counters += ARRAY_SIZE(extended_err_cnts);
3605 
3606 	cnts->num_q_counters = num_counters;
3607 
3608 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
3609 		cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
3610 		num_counters += ARRAY_SIZE(cong_cnts);
3611 	}
3612 
3613 	cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
3614 	if (!cnts->names)
3615 		return -ENOMEM;
3616 
3617 	cnts->offsets = kcalloc(num_counters,
3618 				sizeof(cnts->offsets), GFP_KERNEL);
3619 	if (!cnts->offsets)
3620 		goto err_names;
3621 
3622 	return 0;
3623 
3624 err_names:
3625 	kfree(cnts->names);
3626 	return -ENOMEM;
3627 }
3628 
3629 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
3630 				  const char **names,
3631 				  size_t *offsets)
3632 {
3633 	int i;
3634 	int j = 0;
3635 
3636 	for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
3637 		names[j] = basic_q_cnts[i].name;
3638 		offsets[j] = basic_q_cnts[i].offset;
3639 	}
3640 
3641 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
3642 		for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
3643 			names[j] = out_of_seq_q_cnts[i].name;
3644 			offsets[j] = out_of_seq_q_cnts[i].offset;
3645 		}
3646 	}
3647 
3648 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
3649 		for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
3650 			names[j] = retrans_q_cnts[i].name;
3651 			offsets[j] = retrans_q_cnts[i].offset;
3652 		}
3653 	}
3654 
3655 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
3656 		for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
3657 			names[j] = extended_err_cnts[i].name;
3658 			offsets[j] = extended_err_cnts[i].offset;
3659 		}
3660 	}
3661 
3662 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
3663 		for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
3664 			names[j] = cong_cnts[i].name;
3665 			offsets[j] = cong_cnts[i].offset;
3666 		}
3667 	}
3668 }
3669 
3670 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
3671 {
3672 	int i;
3673 	int ret;
3674 
3675 	for (i = 0; i < dev->num_ports; i++) {
3676 		struct mlx5_ib_port *port = &dev->port[i];
3677 
3678 		ret = mlx5_core_alloc_q_counter(dev->mdev,
3679 						&port->cnts.set_id);
3680 		if (ret) {
3681 			mlx5_ib_warn(dev,
3682 				     "couldn't allocate queue counter for port %d, err %d\n",
3683 				     i + 1, ret);
3684 			goto dealloc_counters;
3685 		}
3686 
3687 		ret = __mlx5_ib_alloc_counters(dev, &port->cnts);
3688 		if (ret)
3689 			goto dealloc_counters;
3690 
3691 		mlx5_ib_fill_counters(dev, port->cnts.names,
3692 				      port->cnts.offsets);
3693 	}
3694 
3695 	return 0;
3696 
3697 dealloc_counters:
3698 	while (--i >= 0)
3699 		mlx5_core_dealloc_q_counter(dev->mdev,
3700 					    dev->port[i].cnts.set_id);
3701 
3702 	return ret;
3703 }
3704 
3705 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
3706 						    u8 port_num)
3707 {
3708 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3709 	struct mlx5_ib_port *port = &dev->port[port_num - 1];
3710 
3711 	/* We support only per port stats */
3712 	if (port_num == 0)
3713 		return NULL;
3714 
3715 	return rdma_alloc_hw_stats_struct(port->cnts.names,
3716 					  port->cnts.num_q_counters +
3717 					  port->cnts.num_cong_counters,
3718 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
3719 }
3720 
3721 static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
3722 				    struct mlx5_ib_port *port,
3723 				    struct rdma_hw_stats *stats)
3724 {
3725 	int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
3726 	void *out;
3727 	__be32 val;
3728 	int ret, i;
3729 
3730 	out = kvzalloc(outlen, GFP_KERNEL);
3731 	if (!out)
3732 		return -ENOMEM;
3733 
3734 	ret = mlx5_core_query_q_counter(dev->mdev,
3735 					port->cnts.set_id, 0,
3736 					out, outlen);
3737 	if (ret)
3738 		goto free;
3739 
3740 	for (i = 0; i < port->cnts.num_q_counters; i++) {
3741 		val = *(__be32 *)(out + port->cnts.offsets[i]);
3742 		stats->value[i] = (u64)be32_to_cpu(val);
3743 	}
3744 
3745 free:
3746 	kvfree(out);
3747 	return ret;
3748 }
3749 
3750 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
3751 				struct rdma_hw_stats *stats,
3752 				u8 port_num, int index)
3753 {
3754 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3755 	struct mlx5_ib_port *port = &dev->port[port_num - 1];
3756 	int ret, num_counters;
3757 
3758 	if (!stats)
3759 		return -EINVAL;
3760 
3761 	ret = mlx5_ib_query_q_counters(dev, port, stats);
3762 	if (ret)
3763 		return ret;
3764 	num_counters = port->cnts.num_q_counters;
3765 
3766 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
3767 		ret = mlx5_lag_query_cong_counters(dev->mdev,
3768 						   stats->value +
3769 						   port->cnts.num_q_counters,
3770 						   port->cnts.num_cong_counters,
3771 						   port->cnts.offsets +
3772 						   port->cnts.num_q_counters);
3773 		if (ret)
3774 			return ret;
3775 		num_counters += port->cnts.num_cong_counters;
3776 	}
3777 
3778 	return num_counters;
3779 }
3780 
3781 static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
3782 {
3783 	return mlx5_rdma_netdev_free(netdev);
3784 }
3785 
3786 static struct net_device*
3787 mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
3788 			  u8 port_num,
3789 			  enum rdma_netdev_t type,
3790 			  const char *name,
3791 			  unsigned char name_assign_type,
3792 			  void (*setup)(struct net_device *))
3793 {
3794 	struct net_device *netdev;
3795 	struct rdma_netdev *rn;
3796 
3797 	if (type != RDMA_NETDEV_IPOIB)
3798 		return ERR_PTR(-EOPNOTSUPP);
3799 
3800 	netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
3801 					name, setup);
3802 	if (likely(!IS_ERR_OR_NULL(netdev))) {
3803 		rn = netdev_priv(netdev);
3804 		rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
3805 	}
3806 	return netdev;
3807 }
3808 
3809 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
3810 {
3811 	if (!dev->delay_drop.dbg)
3812 		return;
3813 	debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
3814 	kfree(dev->delay_drop.dbg);
3815 	dev->delay_drop.dbg = NULL;
3816 }
3817 
3818 static void cancel_delay_drop(struct mlx5_ib_dev *dev)
3819 {
3820 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
3821 		return;
3822 
3823 	cancel_work_sync(&dev->delay_drop.delay_drop_work);
3824 	delay_drop_debugfs_cleanup(dev);
3825 }
3826 
3827 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
3828 				       size_t count, loff_t *pos)
3829 {
3830 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3831 	char lbuf[20];
3832 	int len;
3833 
3834 	len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
3835 	return simple_read_from_buffer(buf, count, pos, lbuf, len);
3836 }
3837 
3838 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
3839 					size_t count, loff_t *pos)
3840 {
3841 	struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3842 	u32 timeout;
3843 	u32 var;
3844 
3845 	if (kstrtouint_from_user(buf, count, 0, &var))
3846 		return -EFAULT;
3847 
3848 	timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
3849 			1000);
3850 	if (timeout != var)
3851 		mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
3852 			    timeout);
3853 
3854 	delay_drop->timeout = timeout;
3855 
3856 	return count;
3857 }
3858 
3859 static const struct file_operations fops_delay_drop_timeout = {
3860 	.owner	= THIS_MODULE,
3861 	.open	= simple_open,
3862 	.write	= delay_drop_timeout_write,
3863 	.read	= delay_drop_timeout_read,
3864 };
3865 
3866 static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
3867 {
3868 	struct mlx5_ib_dbg_delay_drop *dbg;
3869 
3870 	if (!mlx5_debugfs_root)
3871 		return 0;
3872 
3873 	dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
3874 	if (!dbg)
3875 		return -ENOMEM;
3876 
3877 	dev->delay_drop.dbg = dbg;
3878 
3879 	dbg->dir_debugfs =
3880 		debugfs_create_dir("delay_drop",
3881 				   dev->mdev->priv.dbg_root);
3882 	if (!dbg->dir_debugfs)
3883 		goto out_debugfs;
3884 
3885 	dbg->events_cnt_debugfs =
3886 		debugfs_create_atomic_t("num_timeout_events", 0400,
3887 					dbg->dir_debugfs,
3888 					&dev->delay_drop.events_cnt);
3889 	if (!dbg->events_cnt_debugfs)
3890 		goto out_debugfs;
3891 
3892 	dbg->rqs_cnt_debugfs =
3893 		debugfs_create_atomic_t("num_rqs", 0400,
3894 					dbg->dir_debugfs,
3895 					&dev->delay_drop.rqs_cnt);
3896 	if (!dbg->rqs_cnt_debugfs)
3897 		goto out_debugfs;
3898 
3899 	dbg->timeout_debugfs =
3900 		debugfs_create_file("timeout", 0600,
3901 				    dbg->dir_debugfs,
3902 				    &dev->delay_drop,
3903 				    &fops_delay_drop_timeout);
3904 	if (!dbg->timeout_debugfs)
3905 		goto out_debugfs;
3906 
3907 	return 0;
3908 
3909 out_debugfs:
3910 	delay_drop_debugfs_cleanup(dev);
3911 	return -ENOMEM;
3912 }
3913 
3914 static void init_delay_drop(struct mlx5_ib_dev *dev)
3915 {
3916 	if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
3917 		return;
3918 
3919 	mutex_init(&dev->delay_drop.lock);
3920 	dev->delay_drop.dev = dev;
3921 	dev->delay_drop.activate = false;
3922 	dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
3923 	INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
3924 	atomic_set(&dev->delay_drop.rqs_cnt, 0);
3925 	atomic_set(&dev->delay_drop.events_cnt, 0);
3926 
3927 	if (delay_drop_debugfs_init(dev))
3928 		mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
3929 }
3930 
3931 static const struct cpumask *
3932 mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
3933 {
3934 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3935 
3936 	return mlx5_get_vector_affinity(dev->mdev, comp_vector);
3937 }
3938 
3939 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
3940 {
3941 	struct mlx5_ib_dev *dev;
3942 	enum rdma_link_layer ll;
3943 	int port_type_cap;
3944 	const char *name;
3945 	int err;
3946 	int i;
3947 
3948 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3949 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
3950 
3951 	printk_once(KERN_INFO "%s", mlx5_version);
3952 
3953 	dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
3954 	if (!dev)
3955 		return NULL;
3956 
3957 	dev->mdev = mdev;
3958 
3959 	dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
3960 			    GFP_KERNEL);
3961 	if (!dev->port)
3962 		goto err_dealloc;
3963 
3964 	rwlock_init(&dev->roce.netdev_lock);
3965 	err = get_port_caps(dev);
3966 	if (err)
3967 		goto err_free_port;
3968 
3969 	if (mlx5_use_mad_ifc(dev))
3970 		get_ext_port_caps(dev);
3971 
3972 	if (!mlx5_lag_is_active(mdev))
3973 		name = "mlx5_%d";
3974 	else
3975 		name = "mlx5_bond_%d";
3976 
3977 	strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
3978 	dev->ib_dev.owner		= THIS_MODULE;
3979 	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
3980 	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
3981 	dev->num_ports		= MLX5_CAP_GEN(mdev, num_ports);
3982 	dev->ib_dev.phys_port_cnt     = dev->num_ports;
3983 	dev->ib_dev.num_comp_vectors    =
3984 		dev->mdev->priv.eq_table.num_comp_vectors;
3985 	dev->ib_dev.dev.parent		= &mdev->pdev->dev;
3986 
3987 	dev->ib_dev.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION;
3988 	dev->ib_dev.uverbs_cmd_mask	=
3989 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
3990 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
3991 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
3992 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
3993 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
3994 		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
3995 		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)		|
3996 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
3997 		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
3998 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
3999 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
4000 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
4001 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
4002 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
4003 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
4004 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
4005 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
4006 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
4007 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
4008 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
4009 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
4010 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
4011 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
4012 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
4013 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
4014 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
4015 	dev->ib_dev.uverbs_ex_cmd_mask =
4016 		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)	|
4017 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)	|
4018 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP)	|
4019 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP)	|
4020 		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
4021 
4022 	dev->ib_dev.query_device	= mlx5_ib_query_device;
4023 	dev->ib_dev.query_port		= mlx5_ib_query_port;
4024 	dev->ib_dev.get_link_layer	= mlx5_ib_port_link_layer;
4025 	if (ll == IB_LINK_LAYER_ETHERNET)
4026 		dev->ib_dev.get_netdev	= mlx5_ib_get_netdev;
4027 	dev->ib_dev.query_gid		= mlx5_ib_query_gid;
4028 	dev->ib_dev.add_gid		= mlx5_ib_add_gid;
4029 	dev->ib_dev.del_gid		= mlx5_ib_del_gid;
4030 	dev->ib_dev.query_pkey		= mlx5_ib_query_pkey;
4031 	dev->ib_dev.modify_device	= mlx5_ib_modify_device;
4032 	dev->ib_dev.modify_port		= mlx5_ib_modify_port;
4033 	dev->ib_dev.alloc_ucontext	= mlx5_ib_alloc_ucontext;
4034 	dev->ib_dev.dealloc_ucontext	= mlx5_ib_dealloc_ucontext;
4035 	dev->ib_dev.mmap		= mlx5_ib_mmap;
4036 	dev->ib_dev.alloc_pd		= mlx5_ib_alloc_pd;
4037 	dev->ib_dev.dealloc_pd		= mlx5_ib_dealloc_pd;
4038 	dev->ib_dev.create_ah		= mlx5_ib_create_ah;
4039 	dev->ib_dev.query_ah		= mlx5_ib_query_ah;
4040 	dev->ib_dev.destroy_ah		= mlx5_ib_destroy_ah;
4041 	dev->ib_dev.create_srq		= mlx5_ib_create_srq;
4042 	dev->ib_dev.modify_srq		= mlx5_ib_modify_srq;
4043 	dev->ib_dev.query_srq		= mlx5_ib_query_srq;
4044 	dev->ib_dev.destroy_srq		= mlx5_ib_destroy_srq;
4045 	dev->ib_dev.post_srq_recv	= mlx5_ib_post_srq_recv;
4046 	dev->ib_dev.create_qp		= mlx5_ib_create_qp;
4047 	dev->ib_dev.modify_qp		= mlx5_ib_modify_qp;
4048 	dev->ib_dev.query_qp		= mlx5_ib_query_qp;
4049 	dev->ib_dev.destroy_qp		= mlx5_ib_destroy_qp;
4050 	dev->ib_dev.post_send		= mlx5_ib_post_send;
4051 	dev->ib_dev.post_recv		= mlx5_ib_post_recv;
4052 	dev->ib_dev.create_cq		= mlx5_ib_create_cq;
4053 	dev->ib_dev.modify_cq		= mlx5_ib_modify_cq;
4054 	dev->ib_dev.resize_cq		= mlx5_ib_resize_cq;
4055 	dev->ib_dev.destroy_cq		= mlx5_ib_destroy_cq;
4056 	dev->ib_dev.poll_cq		= mlx5_ib_poll_cq;
4057 	dev->ib_dev.req_notify_cq	= mlx5_ib_arm_cq;
4058 	dev->ib_dev.get_dma_mr		= mlx5_ib_get_dma_mr;
4059 	dev->ib_dev.reg_user_mr		= mlx5_ib_reg_user_mr;
4060 	dev->ib_dev.rereg_user_mr	= mlx5_ib_rereg_user_mr;
4061 	dev->ib_dev.dereg_mr		= mlx5_ib_dereg_mr;
4062 	dev->ib_dev.attach_mcast	= mlx5_ib_mcg_attach;
4063 	dev->ib_dev.detach_mcast	= mlx5_ib_mcg_detach;
4064 	dev->ib_dev.process_mad		= mlx5_ib_process_mad;
4065 	dev->ib_dev.alloc_mr		= mlx5_ib_alloc_mr;
4066 	dev->ib_dev.map_mr_sg		= mlx5_ib_map_mr_sg;
4067 	dev->ib_dev.check_mr_status	= mlx5_ib_check_mr_status;
4068 	dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
4069 	dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
4070 	dev->ib_dev.get_vector_affinity	= mlx5_ib_get_vector_affinity;
4071 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
4072 		dev->ib_dev.alloc_rdma_netdev	= mlx5_ib_alloc_rdma_netdev;
4073 
4074 	if (mlx5_core_is_pf(mdev)) {
4075 		dev->ib_dev.get_vf_config	= mlx5_ib_get_vf_config;
4076 		dev->ib_dev.set_vf_link_state	= mlx5_ib_set_vf_link_state;
4077 		dev->ib_dev.get_vf_stats	= mlx5_ib_get_vf_stats;
4078 		dev->ib_dev.set_vf_guid		= mlx5_ib_set_vf_guid;
4079 	}
4080 
4081 	dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
4082 
4083 	mlx5_ib_internal_fill_odp_caps(dev);
4084 
4085 	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
4086 
4087 	if (MLX5_CAP_GEN(mdev, imaicl)) {
4088 		dev->ib_dev.alloc_mw		= mlx5_ib_alloc_mw;
4089 		dev->ib_dev.dealloc_mw		= mlx5_ib_dealloc_mw;
4090 		dev->ib_dev.uverbs_cmd_mask |=
4091 			(1ull << IB_USER_VERBS_CMD_ALLOC_MW)	|
4092 			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
4093 	}
4094 
4095 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
4096 		dev->ib_dev.get_hw_stats	= mlx5_ib_get_hw_stats;
4097 		dev->ib_dev.alloc_hw_stats	= mlx5_ib_alloc_hw_stats;
4098 	}
4099 
4100 	if (MLX5_CAP_GEN(mdev, xrc)) {
4101 		dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
4102 		dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
4103 		dev->ib_dev.uverbs_cmd_mask |=
4104 			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
4105 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
4106 	}
4107 
4108 	dev->ib_dev.create_flow	= mlx5_ib_create_flow;
4109 	dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
4110 	dev->ib_dev.uverbs_ex_cmd_mask |=
4111 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
4112 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
4113 
4114 	if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
4115 	    IB_LINK_LAYER_ETHERNET) {
4116 		dev->ib_dev.create_wq	 = mlx5_ib_create_wq;
4117 		dev->ib_dev.modify_wq	 = mlx5_ib_modify_wq;
4118 		dev->ib_dev.destroy_wq	 = mlx5_ib_destroy_wq;
4119 		dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
4120 		dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
4121 		dev->ib_dev.uverbs_ex_cmd_mask |=
4122 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
4123 			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
4124 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
4125 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
4126 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
4127 	}
4128 	err = init_node_data(dev);
4129 	if (err)
4130 		goto err_free_port;
4131 
4132 	mutex_init(&dev->flow_db.lock);
4133 	mutex_init(&dev->cap_mask_mutex);
4134 	INIT_LIST_HEAD(&dev->qp_list);
4135 	spin_lock_init(&dev->reset_flow_resource_lock);
4136 
4137 	if (ll == IB_LINK_LAYER_ETHERNET) {
4138 		err = mlx5_enable_eth(dev);
4139 		if (err)
4140 			goto err_free_port;
4141 		dev->roce.last_port_state = IB_PORT_DOWN;
4142 	}
4143 
4144 	err = create_dev_resources(&dev->devr);
4145 	if (err)
4146 		goto err_disable_eth;
4147 
4148 	err = mlx5_ib_odp_init_one(dev);
4149 	if (err)
4150 		goto err_rsrc;
4151 
4152 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
4153 		err = mlx5_ib_alloc_counters(dev);
4154 		if (err)
4155 			goto err_odp;
4156 	}
4157 
4158 	err = mlx5_ib_init_cong_debugfs(dev);
4159 	if (err)
4160 		goto err_cnt;
4161 
4162 	dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
4163 	if (IS_ERR(dev->mdev->priv.uar))
4164 		goto err_cong;
4165 
4166 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4167 	if (err)
4168 		goto err_uar_page;
4169 
4170 	err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4171 	if (err)
4172 		goto err_bfreg;
4173 
4174 	err = ib_register_device(&dev->ib_dev, NULL);
4175 	if (err)
4176 		goto err_fp_bfreg;
4177 
4178 	err = create_umr_res(dev);
4179 	if (err)
4180 		goto err_dev;
4181 
4182 	init_delay_drop(dev);
4183 
4184 	for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
4185 		err = device_create_file(&dev->ib_dev.dev,
4186 					 mlx5_class_attributes[i]);
4187 		if (err)
4188 			goto err_delay_drop;
4189 	}
4190 
4191 	if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4192 	    (MLX5_CAP_GEN(mdev, disable_local_lb_uc) ||
4193 	     MLX5_CAP_GEN(mdev, disable_local_lb_mc)))
4194 		mutex_init(&dev->lb_mutex);
4195 
4196 	dev->ib_active = true;
4197 
4198 	return dev;
4199 
4200 err_delay_drop:
4201 	cancel_delay_drop(dev);
4202 	destroy_umrc_res(dev);
4203 
4204 err_dev:
4205 	ib_unregister_device(&dev->ib_dev);
4206 
4207 err_fp_bfreg:
4208 	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4209 
4210 err_bfreg:
4211 	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4212 
4213 err_uar_page:
4214 	mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4215 
4216 err_cong:
4217 	mlx5_ib_cleanup_cong_debugfs(dev);
4218 err_cnt:
4219 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
4220 		mlx5_ib_dealloc_counters(dev);
4221 
4222 err_odp:
4223 	mlx5_ib_odp_remove_one(dev);
4224 
4225 err_rsrc:
4226 	destroy_dev_resources(&dev->devr);
4227 
4228 err_disable_eth:
4229 	if (ll == IB_LINK_LAYER_ETHERNET) {
4230 		mlx5_disable_eth(dev);
4231 		mlx5_remove_netdev_notifier(dev);
4232 	}
4233 
4234 err_free_port:
4235 	kfree(dev->port);
4236 
4237 err_dealloc:
4238 	ib_dealloc_device((struct ib_device *)dev);
4239 
4240 	return NULL;
4241 }
4242 
4243 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
4244 {
4245 	struct mlx5_ib_dev *dev = context;
4246 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
4247 
4248 	cancel_delay_drop(dev);
4249 	mlx5_remove_netdev_notifier(dev);
4250 	ib_unregister_device(&dev->ib_dev);
4251 	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4252 	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4253 	mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
4254 	mlx5_ib_cleanup_cong_debugfs(dev);
4255 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
4256 		mlx5_ib_dealloc_counters(dev);
4257 	destroy_umrc_res(dev);
4258 	mlx5_ib_odp_remove_one(dev);
4259 	destroy_dev_resources(&dev->devr);
4260 	if (ll == IB_LINK_LAYER_ETHERNET)
4261 		mlx5_disable_eth(dev);
4262 	kfree(dev->port);
4263 	ib_dealloc_device(&dev->ib_dev);
4264 }
4265 
4266 static struct mlx5_interface mlx5_ib_interface = {
4267 	.add            = mlx5_ib_add,
4268 	.remove         = mlx5_ib_remove,
4269 	.event          = mlx5_ib_event,
4270 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4271 	.pfault		= mlx5_ib_pfault,
4272 #endif
4273 	.protocol	= MLX5_INTERFACE_PROTOCOL_IB,
4274 };
4275 
4276 static int __init mlx5_ib_init(void)
4277 {
4278 	int err;
4279 
4280 	mlx5_ib_odp_init();
4281 
4282 	err = mlx5_register_interface(&mlx5_ib_interface);
4283 
4284 	return err;
4285 }
4286 
4287 static void __exit mlx5_ib_cleanup(void)
4288 {
4289 	mlx5_unregister_interface(&mlx5_ib_interface);
4290 }
4291 
4292 module_init(mlx5_ib_init);
4293 module_exit(mlx5_ib_cleanup);
4294