xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /*-
2  * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30 
31 #include <dev/mlx5/mlx5_en/en.h>
32 
33 #include <linux/list.h>
34 #include <dev/mlx5/fs.h>
35 #include <dev/mlx5/mpfs.h>
36 #include <dev/mlx5/mlx5_core/fs_tcp.h>
37 
38 /*
39  * The flow tables with rules define the packet processing on receive.
40  * Currently the following structure is set up to handle different
41  * offloads like TLS RX offload, VLAN decapsulation, packet
42  * classification, RSS hashing, VxLAN checksum offloading:
43  *
44  *   +=========+       +=========+      +=================+
45  *   |TCP/IPv4 |       |TCP/IPv4 |      |TCP/IPv4 Match   |
46  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
47  *   |         |       |Catch-all|\     |                 |
48  *   +=========+       +=========+|     +=================+
49  *                                |
50  *       +------------------------+
51  *       V
52  *   +=========+       +=========+      +=================+
53  *   |TCP/IPv6 |       |TCP/IPv6 |      |TCP/IPv6 Match   |
54  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
55  *   |         |       |Catch-all|\     |                 |
56  *   +=========+       +=========+|     +=================+
57  *                                |
58  *       +------------------------+
59  *       V
60  *   +=========+       +=========+      +=================+
61  *   |VLAN ft: |       |VxLAN    |      |VxLAN Main       |
62  *   |CTAG/STAG|------>|      VNI|----->|Inner Proto Match|=====> Inner TIR n
63  *   |VID/noVID|/      |Catch-all|\     |                 |
64  *   +=========+       +=========+|     +=================+
65  *                                |
66  *                                |
67  *                                |
68  *                                v
69  *                      +=================+
70  *                      |Main             |
71  *                      |Outer Proto Match|=====> TIR n
72  *                      |                 |
73  *                      +=================+
74  *
75  * The path through flow rules directs each packet into an appropriate TIR,
76  * according to the:
77  * - VLAN encapsulation
78  * - Outer protocol
79  * - Presence of inner protocol
80  */
81 
82 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
83 
84 enum {
85 	MLX5E_FULLMATCH = 0,
86 	MLX5E_ALLMULTI = 1,
87 	MLX5E_PROMISC = 2,
88 };
89 
90 enum {
91 	MLX5E_UC = 0,
92 	MLX5E_MC_IPV4 = 1,
93 	MLX5E_MC_IPV6 = 2,
94 	MLX5E_MC_OTHER = 3,
95 };
96 
97 enum {
98 	MLX5E_ACTION_NONE = 0,
99 	MLX5E_ACTION_ADD = 1,
100 	MLX5E_ACTION_DEL = 2,
101 };
102 
103 struct mlx5e_eth_addr_hash_node {
104 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
105 	u8	action;
106 	u32	mpfs_index;
107 	struct mlx5e_eth_addr_info ai;
108 };
109 
110 static void mlx5e_del_all_vlan_rules(struct mlx5e_priv *);
111 
112 static inline int
113 mlx5e_hash_eth_addr(const u8 * addr)
114 {
115 	return (addr[5]);
116 }
117 
118 static bool
119 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
120     struct mlx5e_eth_addr_hash_node *hn_new)
121 {
122 	struct mlx5e_eth_addr_hash_node *hn;
123 	u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
124 
125 	LIST_FOREACH(hn, &hash[ix], hlist) {
126 		if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
127 			if (hn->action == MLX5E_ACTION_DEL)
128 				hn->action = MLX5E_ACTION_NONE;
129 			free(hn_new, M_MLX5EN);
130 			return (false);
131 		}
132 	}
133 	LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
134 	return (true);
135 }
136 
137 static void
138 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
139 {
140 	LIST_REMOVE(hn, hlist);
141 	free(hn, M_MLX5EN);
142 }
143 
144 static void
145 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
146     struct mlx5e_eth_addr_info *ai)
147 {
148 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
149 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
150 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
151 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
152 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
153 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
154 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
155 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
156 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6]);
157 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4]);
158 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_ANY]);
159 }
160 
161 static int
162 mlx5e_get_eth_addr_type(const u8 * addr)
163 {
164 	if (ETHER_IS_MULTICAST(addr) == 0)
165 		return (MLX5E_UC);
166 
167 	if ((addr[0] == 0x01) &&
168 	    (addr[1] == 0x00) &&
169 	    (addr[2] == 0x5e) &&
170 	    !(addr[3] & 0x80))
171 		return (MLX5E_MC_IPV4);
172 
173 	if ((addr[0] == 0x33) &&
174 	    (addr[1] == 0x33))
175 		return (MLX5E_MC_IPV6);
176 
177 	return (MLX5E_MC_OTHER);
178 }
179 
180 static	u32
181 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
182 {
183 	int eth_addr_type;
184 	u32 ret;
185 
186 	switch (type) {
187 	case MLX5E_FULLMATCH:
188 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
189 		switch (eth_addr_type) {
190 		case MLX5E_UC:
191 			ret =
192 			    (1 << MLX5E_TT_IPV4_TCP) |
193 			    (1 << MLX5E_TT_IPV6_TCP) |
194 			    (1 << MLX5E_TT_IPV4_UDP) |
195 			    (1 << MLX5E_TT_IPV6_UDP) |
196 			    (1 << MLX5E_TT_IPV4) |
197 			    (1 << MLX5E_TT_IPV6) |
198 			    (1 << MLX5E_TT_ANY) |
199 			    0;
200 			break;
201 
202 		case MLX5E_MC_IPV4:
203 			ret =
204 			    (1 << MLX5E_TT_IPV4_UDP) |
205 			    (1 << MLX5E_TT_IPV4) |
206 			    0;
207 			break;
208 
209 		case MLX5E_MC_IPV6:
210 			ret =
211 			    (1 << MLX5E_TT_IPV6_UDP) |
212 			    (1 << MLX5E_TT_IPV6) |
213 			    0;
214 			break;
215 
216 		default:
217 			ret =
218 			    (1 << MLX5E_TT_ANY) |
219 			    0;
220 			break;
221 		}
222 		break;
223 
224 	case MLX5E_ALLMULTI:
225 		ret =
226 		    (1 << MLX5E_TT_IPV4_UDP) |
227 		    (1 << MLX5E_TT_IPV6_UDP) |
228 		    (1 << MLX5E_TT_IPV4) |
229 		    (1 << MLX5E_TT_IPV6) |
230 		    (1 << MLX5E_TT_ANY) |
231 		    0;
232 		break;
233 
234 	default:			/* MLX5E_PROMISC */
235 		ret =
236 		    (1 << MLX5E_TT_IPV4_TCP) |
237 		    (1 << MLX5E_TT_IPV6_TCP) |
238 		    (1 << MLX5E_TT_IPV4_UDP) |
239 		    (1 << MLX5E_TT_IPV6_UDP) |
240 		    (1 << MLX5E_TT_IPV4) |
241 		    (1 << MLX5E_TT_IPV6) |
242 		    (1 << MLX5E_TT_ANY) |
243 		    0;
244 		break;
245 	}
246 
247 	return (ret);
248 }
249 
250 static int
251 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
252     struct mlx5e_eth_addr_info *ai, int type,
253     u32 *mc, u32 *mv)
254 {
255 	struct mlx5_flow_destination dest = {};
256 	u8 mc_enable = 0;
257 	struct mlx5_flow_rule **rule_p;
258 	struct mlx5_flow_table *ft = priv->fts.main.t;
259 	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
260 				   outer_headers.dmac_47_16);
261 	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
262 				   outer_headers.dmac_47_16);
263 	u32 *tirn = priv->tirn;
264 	u32 tt_vec;
265 	int err = 0;
266 
267 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
268 
269 	switch (type) {
270 	case MLX5E_FULLMATCH:
271 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
272 		memset(mc_dmac, 0xff, ETH_ALEN);
273 		ether_addr_copy(mv_dmac, ai->addr);
274 		break;
275 
276 	case MLX5E_ALLMULTI:
277 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
278 		mc_dmac[0] = 0x01;
279 		mv_dmac[0] = 0x01;
280 		break;
281 
282 	case MLX5E_PROMISC:
283 		break;
284 	default:
285 		break;
286 	}
287 
288 	tt_vec = mlx5e_get_tt_vec(ai, type);
289 
290 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
291 		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
292 		dest.tir_num = tirn[MLX5E_TT_ANY];
293 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
294 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
295 					     MLX5_FS_ETH_FLOW_TAG, &dest);
296 		if (IS_ERR_OR_NULL(*rule_p))
297 			goto err_del_ai;
298 	}
299 
300 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
301 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
302 
303 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
304 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
305 		dest.tir_num = tirn[MLX5E_TT_IPV4];
306 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
307 			 ETHERTYPE_IP);
308 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
309 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
310 					     MLX5_FS_ETH_FLOW_TAG, &dest);
311 		if (IS_ERR_OR_NULL(*rule_p))
312 			goto err_del_ai;
313 	}
314 
315 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
316 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
317 		dest.tir_num = tirn[MLX5E_TT_IPV6];
318 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
319 			 ETHERTYPE_IPV6);
320 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
321 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
322 					     MLX5_FS_ETH_FLOW_TAG, &dest);
323 		if (IS_ERR_OR_NULL(*rule_p))
324 			goto err_del_ai;
325 	}
326 
327 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
328 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
329 
330 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
331 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
332 		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
333 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
334 			 ETHERTYPE_IP);
335 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
336 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
337 					     MLX5_FS_ETH_FLOW_TAG, &dest);
338 		if (IS_ERR_OR_NULL(*rule_p))
339 			goto err_del_ai;
340 	}
341 
342 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
343 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
344 		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
345 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
346 			 ETHERTYPE_IPV6);
347 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
348 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
349 					     MLX5_FS_ETH_FLOW_TAG, &dest);
350 		if (IS_ERR_OR_NULL(*rule_p))
351 			goto err_del_ai;
352 	}
353 
354 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
355 
356 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
357 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
358 		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
359 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
360 			 ETHERTYPE_IP);
361 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
362 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
363 					     MLX5_FS_ETH_FLOW_TAG, &dest);
364 		if (IS_ERR_OR_NULL(*rule_p))
365 			goto err_del_ai;
366 	}
367 
368 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
369 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
370 		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
371 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
372 			 ETHERTYPE_IPV6);
373 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
374 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
375 					     MLX5_FS_ETH_FLOW_TAG, &dest);
376 		if (IS_ERR_OR_NULL(*rule_p))
377 			goto err_del_ai;
378 	}
379 
380 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
381 
382 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
383 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
384 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
385 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
386 			 ETHERTYPE_IP);
387 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
388 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
389 					     MLX5_FS_ETH_FLOW_TAG, &dest);
390 		if (IS_ERR_OR_NULL(*rule_p))
391 			goto err_del_ai;
392 	}
393 
394 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
395 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
396 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
397 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
398 			 ETHERTYPE_IPV6);
399 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
400 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
401 					     MLX5_FS_ETH_FLOW_TAG, &dest);
402 		if (IS_ERR_OR_NULL(*rule_p))
403 			goto err_del_ai;
404 	}
405 
406 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
407 
408 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
409 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
410 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
411 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
412 			 ETHERTYPE_IP);
413 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
414 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
415 					     MLX5_FS_ETH_FLOW_TAG, &dest);
416 		if (IS_ERR_OR_NULL(*rule_p))
417 			goto err_del_ai;
418 	}
419 
420 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
421 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
422 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
423 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
424 			 ETHERTYPE_IPV6);
425 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
426 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
427 					     MLX5_FS_ETH_FLOW_TAG, &dest);
428 		if (IS_ERR_OR_NULL(*rule_p))
429 			goto err_del_ai;
430 	}
431 
432 	return 0;
433 
434 err_del_ai:
435 	err = PTR_ERR(*rule_p);
436 	*rule_p = NULL;
437 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
438 
439 	return err;
440 }
441 
442 static int
443 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
444     struct mlx5e_eth_addr_info *ai, int type)
445 {
446 	u32 *match_criteria;
447 	u32 *match_value;
448 	int err = 0;
449 
450 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
451 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
452 	if (!match_value || !match_criteria) {
453 		mlx5_en_err(priv->ifp, "alloc failed\n");
454 		err = -ENOMEM;
455 		goto add_eth_addr_rule_out;
456 	}
457 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
458 	    match_value);
459 
460 add_eth_addr_rule_out:
461 	kvfree(match_criteria);
462 	kvfree(match_value);
463 
464 	return (err);
465 }
466 
467 static void
468 mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
469 {
470 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
471 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
472 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
473 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
474 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
475 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
476 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
477 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
478 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
479 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
480 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
481 }
482 
483 static int
484 mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
485 {
486 	struct mlx5_flow_destination dest = {};
487 	u8 mc_enable = 0;
488 	struct mlx5_flow_rule **rule_p;
489 	struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
490 	u32 *tirn = priv->tirn_inner_vxlan;
491 	int err = 0;
492 
493 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
494 
495 	mc_enable = MLX5_MATCH_INNER_HEADERS;
496 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
497 
498 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
499 	dest.tir_num = tirn[MLX5E_TT_IPV4];
500 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
501 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
502 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
503 	if (IS_ERR_OR_NULL(*rule_p))
504 		goto err_del_ai;
505 
506 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
507 	dest.tir_num = tirn[MLX5E_TT_IPV6];
508 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
509 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
510 	     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
511 	if (IS_ERR_OR_NULL(*rule_p))
512 		goto err_del_ai;
513 
514 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
515 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_UDP);
516 
517 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
518 	dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
519 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
520 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
521 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
522 	if (IS_ERR_OR_NULL(*rule_p))
523 		goto err_del_ai;
524 
525 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
526 	dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
527 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
528 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
529 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
530 	if (IS_ERR_OR_NULL(*rule_p))
531 		goto err_del_ai;
532 
533 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_TCP);
534 
535 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
536 	dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
537 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
538 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
539 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
540 	if (IS_ERR_OR_NULL(*rule_p))
541 		goto err_del_ai;
542 
543 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
544 	dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
545 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
546 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
547 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
548 	if (IS_ERR_OR_NULL(*rule_p))
549 		goto err_del_ai;
550 
551 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_AH);
552 
553 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
554 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
555 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
556 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
557 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
558 	if (IS_ERR_OR_NULL(*rule_p))
559 		goto err_del_ai;
560 
561 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
562 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
563 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
564 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
565 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
566 	if (IS_ERR_OR_NULL(*rule_p))
567 		goto err_del_ai;
568 
569 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_ESP);
570 
571 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
572 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
573 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
574 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
575 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
576 	if (IS_ERR_OR_NULL(*rule_p))
577 			goto err_del_ai;
578 
579 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP];
580 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
581 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
582 		 ETHERTYPE_IPV6);
583 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
584 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
585 	if (IS_ERR_OR_NULL(*rule_p))
586 		goto err_del_ai;
587 
588 	mc_enable = 0;
589 	memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
590 	memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
591 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
592 	dest.tir_num = tirn[MLX5E_TT_ANY];
593 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
594 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
595 	if (IS_ERR_OR_NULL(*rule_p))
596 		goto err_del_ai;
597 
598 	return (0);
599 
600 err_del_ai:
601 	err = PTR_ERR(*rule_p);
602 	*rule_p = NULL;
603 	mlx5e_del_main_vxlan_rules(priv);
604 
605 	return (err);
606 }
607 
608 static int
609 mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
610 {
611 	u32 *match_criteria;
612 	u32 *match_value;
613 	int err = 0;
614 
615 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
616 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
617 	if (match_value == NULL || match_criteria == NULL) {
618 		mlx5_en_err(priv->ifp, "alloc failed\n");
619 		err = -ENOMEM;
620 		goto add_main_vxlan_rules_out;
621 	}
622 	err = mlx5e_add_main_vxlan_rules_sub(priv, match_criteria, match_value);
623 
624 add_main_vxlan_rules_out:
625 	kvfree(match_criteria);
626 	kvfree(match_value);
627 
628 	return (err);
629 }
630 
631 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
632 {
633 	if_t ifp = priv->ifp;
634 	int max_list_size;
635 	int list_size;
636 	u16 *vlans;
637 	int vlan;
638 	int err;
639 	int i;
640 
641 	list_size = 0;
642 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
643 		list_size++;
644 
645 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
646 
647 	if (list_size > max_list_size) {
648 		mlx5_en_err(ifp,
649 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
650 			    list_size, max_list_size);
651 		list_size = max_list_size;
652 	}
653 
654 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
655 	if (!vlans)
656 		return -ENOMEM;
657 
658 	i = 0;
659 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
660 		if (i >= list_size)
661 			break;
662 		vlans[i++] = vlan;
663 	}
664 
665 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
666 	if (err)
667 		mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
668 			   err);
669 
670 	kfree(vlans);
671 	return err;
672 }
673 
674 enum mlx5e_vlan_rule_type {
675 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
676 	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
677 	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
678 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
679 };
680 
681 static int
682 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
683     enum mlx5e_vlan_rule_type rule_type, u16 vid,
684     u32 *mc, u32 *mv)
685 {
686 	struct mlx5_flow_table *ft = priv->fts.vlan.t;
687 	struct mlx5_flow_destination dest = {};
688 	u8 mc_enable = 0;
689 	struct mlx5_flow_rule **rule_p;
690 	int err = 0;
691 
692 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
693 	dest.ft = priv->fts.vxlan.t;
694 
695 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
696 
697 	switch (rule_type) {
698 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
699 		rule_p = &priv->vlan.untagged_ft_rule;
700 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
701 		break;
702 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
703 		rule_p = &priv->vlan.any_cvlan_ft_rule;
704 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
705 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
706 		break;
707 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
708 		rule_p = &priv->vlan.any_svlan_ft_rule;
709 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
710 		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
711 		break;
712 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
713 		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
714 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
715 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
716 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
717 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
718 		mlx5e_vport_context_update_vlans(priv);
719 		break;
720 	}
721 
722 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
723 				     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
724 				     MLX5_FS_ETH_FLOW_TAG,
725 				     &dest);
726 
727 	if (IS_ERR(*rule_p)) {
728 		err = PTR_ERR(*rule_p);
729 		*rule_p = NULL;
730 		mlx5_en_err(priv->ifp, "add rule failed\n");
731 	}
732 
733 	return (err);
734 }
735 
736 static int
737 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
738     enum mlx5e_vlan_rule_type rule_type, u16 vid)
739 {
740 	u32 *match_criteria;
741 	u32 *match_value;
742 	int err = 0;
743 
744 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
745 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
746 	if (!match_value || !match_criteria) {
747 		mlx5_en_err(priv->ifp, "alloc failed\n");
748 		err = -ENOMEM;
749 		goto add_vlan_rule_out;
750 	}
751 
752 	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
753 				    match_value);
754 
755 add_vlan_rule_out:
756 	kvfree(match_criteria);
757 	kvfree(match_value);
758 
759 	return (err);
760 }
761 
762 static void
763 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
764     enum mlx5e_vlan_rule_type rule_type, u16 vid)
765 {
766 	switch (rule_type) {
767 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
768 		mlx5_del_flow_rule(&priv->vlan.untagged_ft_rule);
769 		break;
770 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
771 		mlx5_del_flow_rule(&priv->vlan.any_cvlan_ft_rule);
772 		break;
773 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
774 		mlx5_del_flow_rule(&priv->vlan.any_svlan_ft_rule);
775 		break;
776 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
777 		mlx5_del_flow_rule(&priv->vlan.active_vlans_ft_rule[vid]);
778 		mlx5e_vport_context_update_vlans(priv);
779 		break;
780 	default:
781 		break;
782 	}
783 }
784 
785 static void
786 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
787 {
788 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
789 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
790 }
791 
792 static int
793 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
794 {
795 	int err;
796 
797 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
798 	if (err)
799 		return (err);
800 
801 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
802 	if (err)
803 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
804 
805 	return (err);
806 }
807 
808 void
809 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
810 {
811 	if (priv->vlan.filter_disabled) {
812 		priv->vlan.filter_disabled = false;
813 		if (if_getflags(priv->ifp) & IFF_PROMISC)
814 			return;
815 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
816 			mlx5e_del_any_vid_rules(priv);
817 	}
818 }
819 
820 void
821 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
822 {
823 	if (!priv->vlan.filter_disabled) {
824 		priv->vlan.filter_disabled = true;
825 		if (if_getflags(priv->ifp) & IFF_PROMISC)
826 			return;
827 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
828 			mlx5e_add_any_vid_rules(priv);
829 	}
830 }
831 
832 void
833 mlx5e_vlan_rx_add_vid(void *arg, if_t ifp, u16 vid)
834 {
835 	struct mlx5e_priv *priv = arg;
836 
837 	if (ifp != priv->ifp)
838 		return;
839 
840 	PRIV_LOCK(priv);
841 	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
842 	    test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
843 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
844 	PRIV_UNLOCK(priv);
845 }
846 
847 void
848 mlx5e_vlan_rx_kill_vid(void *arg, if_t ifp, u16 vid)
849 {
850 	struct mlx5e_priv *priv = arg;
851 
852 	if (ifp != priv->ifp)
853 		return;
854 
855 	PRIV_LOCK(priv);
856 	clear_bit(vid, priv->vlan.active_vlans);
857 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
858 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
859 	PRIV_UNLOCK(priv);
860 }
861 
862 static int
863 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
864 {
865 	int err;
866 	int i;
867 
868 	set_bit(0, priv->vlan.active_vlans);
869 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
870 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
871 					  i);
872 		if (err)
873 			goto error;
874 	}
875 
876 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
877 	if (err)
878 		goto error;
879 
880 	if (priv->vlan.filter_disabled) {
881 		err = mlx5e_add_any_vid_rules(priv);
882 		if (err)
883 			goto error;
884 	}
885 	return (0);
886 error:
887 	mlx5e_del_all_vlan_rules(priv);
888 	return (err);
889 }
890 
891 static void
892 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
893 {
894 	int i;
895 
896 	if (priv->vlan.filter_disabled)
897 		mlx5e_del_any_vid_rules(priv);
898 
899 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
900 
901 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
902 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
903 	clear_bit(0, priv->vlan.active_vlans);
904 }
905 
906 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
907 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
908 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
909 
910 static void
911 mlx5e_execute_action(struct mlx5e_priv *priv,
912     struct mlx5e_eth_addr_hash_node *hn)
913 {
914 	switch (hn->action) {
915 	case MLX5E_ACTION_ADD:
916 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
917 		hn->action = MLX5E_ACTION_NONE;
918 		break;
919 
920 	case MLX5E_ACTION_DEL:
921 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
922 		if (hn->mpfs_index != -1U)
923 			mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
924 		mlx5e_del_eth_addr_from_hash(hn);
925 		break;
926 
927 	default:
928 		break;
929 	}
930 }
931 
932 static struct mlx5e_eth_addr_hash_node *
933 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
934 {
935 	struct mlx5e_eth_addr_hash_node *hn;
936 
937 	hn = LIST_FIRST(fh);
938 	if (hn != NULL) {
939 		LIST_REMOVE(hn, hlist);
940 		LIST_INSERT_HEAD(uh, hn, hlist);
941 	}
942 	return (hn);
943 }
944 
945 static struct mlx5e_eth_addr_hash_node *
946 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
947 {
948 	struct mlx5e_eth_addr_hash_node *hn;
949 
950 	hn = LIST_FIRST(fh);
951 	if (hn != NULL)
952 		LIST_REMOVE(hn, hlist);
953 	return (hn);
954 }
955 
956 struct mlx5e_copy_addr_ctx {
957 	struct mlx5e_eth_addr_hash_head *free;
958 	struct mlx5e_eth_addr_hash_head *fill;
959 	bool success;
960 };
961 
962 static u_int
963 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
964 {
965 	struct mlx5e_copy_addr_ctx *ctx = arg;
966 	struct mlx5e_eth_addr_hash_node *hn;
967 
968 	hn = mlx5e_move_hn(ctx->free, ctx->fill);
969 	if (hn == NULL) {
970 		ctx->success = false;
971 		return (0);
972 	}
973 	ether_addr_copy(hn->ai.addr, LLADDR(sdl));
974 
975 	return (1);
976 }
977 
978 static void
979 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
980 {
981 	struct mlx5e_copy_addr_ctx ctx;
982 	struct mlx5e_eth_addr_hash_head head_free;
983 	struct mlx5e_eth_addr_hash_head head_uc;
984 	struct mlx5e_eth_addr_hash_head head_mc;
985 	struct mlx5e_eth_addr_hash_node *hn;
986 	if_t ifp = priv->ifp;
987 	size_t x;
988 	size_t num;
989 
990 	PRIV_ASSERT_LOCKED(priv);
991 
992 retry:
993 	LIST_INIT(&head_free);
994 	LIST_INIT(&head_uc);
995 	LIST_INIT(&head_mc);
996 	num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp);
997 
998 	/* allocate place holders */
999 	for (x = 0; x != num; x++) {
1000 		hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
1001 		hn->action = MLX5E_ACTION_ADD;
1002 		hn->mpfs_index = -1U;
1003 		LIST_INSERT_HEAD(&head_free, hn, hlist);
1004 	}
1005 
1006 	hn = mlx5e_move_hn(&head_free, &head_uc);
1007 	MPASS(hn != NULL);
1008 
1009 	ether_addr_copy(hn->ai.addr, if_getlladdr(ifp));
1010 
1011 	ctx.free = &head_free;
1012 	ctx.fill = &head_uc;
1013 	ctx.success = true;
1014 	if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx);
1015 	if (ctx.success == false)
1016 		goto cleanup;
1017 
1018 	ctx.fill = &head_mc;
1019 	if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx);
1020 	if (ctx.success == false)
1021 		goto cleanup;
1022 
1023 	/* insert L2 unicast addresses into hash list */
1024 
1025 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
1026 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
1027 			continue;
1028 		if (hn->mpfs_index == -1U)
1029 			mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index,
1030 			    hn->ai.addr, 0, 0);
1031 	}
1032 
1033 	/* insert L2 multicast addresses into hash list */
1034 
1035 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
1036 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
1037 			continue;
1038 	}
1039 
1040 cleanup:
1041 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
1042 		free(hn, M_MLX5EN);
1043 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
1044 		free(hn, M_MLX5EN);
1045 	while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
1046 		free(hn, M_MLX5EN);
1047 
1048 	if (ctx.success == false)
1049 		goto retry;
1050 }
1051 
1052 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
1053 				  u8 addr_array[][ETH_ALEN], int size)
1054 {
1055 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1056 	if_t ifp = priv->ifp;
1057 	struct mlx5e_eth_addr_hash_node *hn;
1058 	struct mlx5e_eth_addr_hash_head *addr_list;
1059 	struct mlx5e_eth_addr_hash_node *tmp;
1060 	int i = 0;
1061 	int hi;
1062 
1063 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1064 
1065 	if (is_uc) /* Make sure our own address is pushed first */
1066 		ether_addr_copy(addr_array[i++], if_getlladdr(ifp));
1067 	else if (priv->eth_addr.broadcast_enabled)
1068 		ether_addr_copy(addr_array[i++], if_getbroadcastaddr(ifp));
1069 
1070 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
1071 		if (ether_addr_equal(if_getlladdr(ifp), hn->ai.addr))
1072 			continue;
1073 		if (i >= size)
1074 			break;
1075 		ether_addr_copy(addr_array[i++], hn->ai.addr);
1076 	}
1077 }
1078 
1079 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
1080 						 int list_type)
1081 {
1082 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1083 	struct mlx5e_eth_addr_hash_node *hn;
1084 	u8 (*addr_array)[ETH_ALEN] = NULL;
1085 	struct mlx5e_eth_addr_hash_head *addr_list;
1086 	struct mlx5e_eth_addr_hash_node *tmp;
1087 	int max_size;
1088 	int size;
1089 	int err;
1090 	int hi;
1091 
1092 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
1093 	max_size = is_uc ?
1094 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1095 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
1096 
1097 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1098 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
1099 		size++;
1100 
1101 	if (size > max_size) {
1102 		mlx5_en_err(priv->ifp,
1103 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
1104 			    is_uc ? "UC" : "MC", size, max_size);
1105 		size = max_size;
1106 	}
1107 
1108 	if (size) {
1109 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
1110 		if (!addr_array) {
1111 			err = -ENOMEM;
1112 			goto out;
1113 		}
1114 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
1115 	}
1116 
1117 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
1118 out:
1119 	if (err)
1120 		mlx5_en_err(priv->ifp,
1121 			   "Failed to modify vport %s list err(%d)\n",
1122 			   is_uc ? "UC" : "MC", err);
1123 	kfree(addr_array);
1124 }
1125 
1126 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
1127 {
1128 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1129 
1130 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
1131 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
1132 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
1133 				      ea->allmulti_enabled,
1134 				      ea->promisc_enabled);
1135 }
1136 
1137 static void
1138 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
1139 {
1140 	struct mlx5e_eth_addr_hash_node *hn;
1141 	struct mlx5e_eth_addr_hash_node *tmp;
1142 	int i;
1143 
1144 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1145 	    mlx5e_execute_action(priv, hn);
1146 
1147 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1148 	    mlx5e_execute_action(priv, hn);
1149 }
1150 
1151 static void
1152 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv, bool rx_mode_enable)
1153 {
1154 	struct mlx5e_eth_addr_hash_node *hn;
1155 	struct mlx5e_eth_addr_hash_node *tmp;
1156 	int i;
1157 
1158 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1159 	    hn->action = MLX5E_ACTION_DEL;
1160 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1161 	    hn->action = MLX5E_ACTION_DEL;
1162 
1163 	if (rx_mode_enable)
1164 		mlx5e_sync_ifp_addr(priv);
1165 
1166 	mlx5e_apply_ifp_addr(priv);
1167 }
1168 
1169 static void
1170 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
1171 {
1172 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1173 	if_t ndev = priv->ifp;
1174 	int ndev_flags = if_getflags(ndev);
1175 
1176 	bool promisc_enabled = rx_mode_enable && (ndev_flags & IFF_PROMISC);
1177 	bool allmulti_enabled = rx_mode_enable && (ndev_flags & IFF_ALLMULTI);
1178 	bool broadcast_enabled = rx_mode_enable;
1179 
1180 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1181 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1182 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1183 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1184 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1185 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1186 
1187 	/* update broadcast address */
1188 	ether_addr_copy(priv->eth_addr.broadcast.addr,
1189 	    if_getbroadcastaddr(priv->ifp));
1190 
1191 	if (enable_promisc) {
1192 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1193 		if (!priv->vlan.filter_disabled)
1194 			mlx5e_add_any_vid_rules(priv);
1195 	}
1196 	if (enable_allmulti)
1197 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1198 	if (enable_broadcast)
1199 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1200 
1201 	mlx5e_handle_ifp_addr(priv, rx_mode_enable);
1202 
1203 	if (disable_broadcast)
1204 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1205 	if (disable_allmulti)
1206 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1207 	if (disable_promisc) {
1208 		if (!priv->vlan.filter_disabled)
1209 			mlx5e_del_any_vid_rules(priv);
1210 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1211 	}
1212 
1213 	ea->promisc_enabled = promisc_enabled;
1214 	ea->allmulti_enabled = allmulti_enabled;
1215 	ea->broadcast_enabled = broadcast_enabled;
1216 
1217 	mlx5e_vport_context_update(priv);
1218 }
1219 
1220 void
1221 mlx5e_set_rx_mode_work(struct work_struct *work)
1222 {
1223 	struct mlx5e_priv *priv =
1224 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
1225 
1226 	PRIV_LOCK(priv);
1227 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1228 		mlx5e_set_rx_mode_core(priv, true);
1229 	PRIV_UNLOCK(priv);
1230 }
1231 
1232 static void
1233 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1234 {
1235 	int i;
1236 
1237 	for (i = ft->num_groups - 1; i >= 0; i--) {
1238 		if (!IS_ERR_OR_NULL(ft->g[i]))
1239 			mlx5_destroy_flow_group(ft->g[i]);
1240 		ft->g[i] = NULL;
1241 	}
1242 	ft->num_groups = 0;
1243 }
1244 
1245 static void
1246 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1247 {
1248 	mlx5e_destroy_groups(ft);
1249 	kfree(ft->g);
1250 	mlx5_destroy_flow_table(ft->t);
1251 	ft->t = NULL;
1252 }
1253 
1254 #define MLX5E_NUM_MAIN_GROUPS	10
1255 #define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
1256 #define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
1257 #define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
1258 #define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
1259 #define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
1260 #define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
1261 #define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
1262 #define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
1263 #define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1264 #define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1265 #define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1266 				 MLX5E_MAIN_GROUP1_SIZE +\
1267 				 MLX5E_MAIN_GROUP2_SIZE +\
1268 				 MLX5E_MAIN_GROUP3_SIZE +\
1269 				 MLX5E_MAIN_GROUP4_SIZE +\
1270 				 MLX5E_MAIN_GROUP5_SIZE +\
1271 				 MLX5E_MAIN_GROUP6_SIZE +\
1272 				 MLX5E_MAIN_GROUP7_SIZE +\
1273 				 MLX5E_MAIN_GROUP8_SIZE +\
1274 				 MLX5E_MAIN_GROUP9_SIZE +\
1275 				 0)
1276 
1277 static int
1278 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1279 				      int inlen)
1280 {
1281 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1282 	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1283 				match_criteria.outer_headers.dmac_47_16);
1284 	int err;
1285 	int ix = 0;
1286 
1287 	/* Tunnel rules need to be first in this list of groups */
1288 
1289 	/* Start tunnel rules */
1290 	memset(in, 0, inlen);
1291 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1292 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1293 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1294 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1295 	MLX5_SET_CFG(in, start_flow_index, ix);
1296 	ix += MLX5E_MAIN_GROUP0_SIZE;
1297 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1298 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1299 	if (IS_ERR(ft->g[ft->num_groups]))
1300 		goto err_destory_groups;
1301 	ft->num_groups++;
1302 	/* End Tunnel Rules */
1303 
1304 	memset(in, 0, inlen);
1305 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1306 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1307 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1308 	MLX5_SET_CFG(in, start_flow_index, ix);
1309 	ix += MLX5E_MAIN_GROUP1_SIZE;
1310 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1311 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1312 	if (IS_ERR(ft->g[ft->num_groups]))
1313 		goto err_destory_groups;
1314 	ft->num_groups++;
1315 
1316 	memset(in, 0, inlen);
1317 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1318 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1319 	MLX5_SET_CFG(in, start_flow_index, ix);
1320 	ix += MLX5E_MAIN_GROUP2_SIZE;
1321 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1322 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1323 	if (IS_ERR(ft->g[ft->num_groups]))
1324 		goto err_destory_groups;
1325 	ft->num_groups++;
1326 
1327 	memset(in, 0, inlen);
1328 	MLX5_SET_CFG(in, start_flow_index, ix);
1329 	ix += MLX5E_MAIN_GROUP3_SIZE;
1330 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1331 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1332 	if (IS_ERR(ft->g[ft->num_groups]))
1333 		goto err_destory_groups;
1334 	ft->num_groups++;
1335 
1336 	memset(in, 0, inlen);
1337 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1338 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1339 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1340 	memset(dmac, 0xff, ETH_ALEN);
1341 	MLX5_SET_CFG(in, start_flow_index, ix);
1342 	ix += MLX5E_MAIN_GROUP4_SIZE;
1343 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1344 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1345 	if (IS_ERR(ft->g[ft->num_groups]))
1346 		goto err_destory_groups;
1347 	ft->num_groups++;
1348 
1349 	memset(in, 0, inlen);
1350 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1351 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1352 	memset(dmac, 0xff, ETH_ALEN);
1353 	MLX5_SET_CFG(in, start_flow_index, ix);
1354 	ix += MLX5E_MAIN_GROUP5_SIZE;
1355 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1356 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1357 	if (IS_ERR(ft->g[ft->num_groups]))
1358 		goto err_destory_groups;
1359 	ft->num_groups++;
1360 
1361 	memset(in, 0, inlen);
1362 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1363 	memset(dmac, 0xff, ETH_ALEN);
1364 	MLX5_SET_CFG(in, start_flow_index, ix);
1365 	ix += MLX5E_MAIN_GROUP6_SIZE;
1366 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1367 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1368 	if (IS_ERR(ft->g[ft->num_groups]))
1369 		goto err_destory_groups;
1370 	ft->num_groups++;
1371 
1372 	memset(in, 0, inlen);
1373 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1374 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1375 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1376 	dmac[0] = 0x01;
1377 	MLX5_SET_CFG(in, start_flow_index, ix);
1378 	ix += MLX5E_MAIN_GROUP7_SIZE;
1379 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1380 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1381 	if (IS_ERR(ft->g[ft->num_groups]))
1382 		goto err_destory_groups;
1383 	ft->num_groups++;
1384 
1385 	memset(in, 0, inlen);
1386 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1387 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1388 	dmac[0] = 0x01;
1389 	MLX5_SET_CFG(in, start_flow_index, ix);
1390 	ix += MLX5E_MAIN_GROUP8_SIZE;
1391 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1392 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1393 	if (IS_ERR(ft->g[ft->num_groups]))
1394 		goto err_destory_groups;
1395 	ft->num_groups++;
1396 
1397 	memset(in, 0, inlen);
1398 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1399 	dmac[0] = 0x01;
1400 	MLX5_SET_CFG(in, start_flow_index, ix);
1401 	ix += MLX5E_MAIN_GROUP9_SIZE;
1402 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1403 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1404 	if (IS_ERR(ft->g[ft->num_groups]))
1405 		goto err_destory_groups;
1406 	ft->num_groups++;
1407 
1408 	return (0);
1409 
1410 err_destory_groups:
1411 	err = PTR_ERR(ft->g[ft->num_groups]);
1412 	ft->g[ft->num_groups] = NULL;
1413 	mlx5e_destroy_groups(ft);
1414 
1415 	return (err);
1416 }
1417 
1418 static int
1419 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1420 {
1421 	u32 *in;
1422 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1423 	int err;
1424 
1425 	in = mlx5_vzalloc(inlen);
1426 	if (!in)
1427 		return (-ENOMEM);
1428 
1429 	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1430 
1431 	kvfree(in);
1432 	return (err);
1433 }
1434 
1435 #define MLX5E_MAIN_VXLAN_GROUP0_SIZE	BIT(3)
1436 #define MLX5E_MAIN_VXLAN_GROUP1_SIZE	BIT(3)
1437 #define MLX5E_MAIN_VXLAN_GROUP2_SIZE	BIT(0)
1438 static int
1439 mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1440     int inlen)
1441 {
1442 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1443 	int err;
1444 	int ix = 0;
1445 
1446 	memset(in, 0, inlen);
1447 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1448 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1449 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1450 	MLX5_SET_CFG(in, start_flow_index, ix);
1451 	ix += MLX5E_MAIN_VXLAN_GROUP0_SIZE;
1452 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1453 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1454 	if (IS_ERR(ft->g[ft->num_groups]))
1455 		goto err_destory_groups;
1456 	ft->num_groups++;
1457 
1458 	memset(in, 0, inlen);
1459 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1460 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1461 	MLX5_SET_CFG(in, start_flow_index, ix);
1462 	ix += MLX5E_MAIN_VXLAN_GROUP1_SIZE;
1463 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1464 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1465 	if (IS_ERR(ft->g[ft->num_groups]))
1466 		goto err_destory_groups;
1467 	ft->num_groups++;
1468 
1469 	memset(in, 0, inlen);
1470 	MLX5_SET_CFG(in, start_flow_index, ix);
1471 	ix += MLX5E_MAIN_VXLAN_GROUP2_SIZE;
1472 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1473 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1474 	if (IS_ERR(ft->g[ft->num_groups]))
1475 		goto err_destory_groups;
1476 	ft->num_groups++;
1477 
1478 	return (0);
1479 
1480 err_destory_groups:
1481 	err = PTR_ERR(ft->g[ft->num_groups]);
1482 	ft->g[ft->num_groups] = NULL;
1483 	mlx5e_destroy_groups(ft);
1484 
1485 	return (err);
1486 }
1487 
1488 static int
1489 mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table *ft)
1490 {
1491 	u32 *in;
1492 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1493 	int err;
1494 
1495 	in = mlx5_vzalloc(inlen);
1496 	if (!in)
1497 		return (-ENOMEM);
1498 
1499 	err = mlx5e_create_main_vxlan_groups_sub(ft, in, inlen);
1500 
1501 	kvfree(in);
1502 	return (err);
1503 }
1504 
1505 
1506 static int
1507 mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
1508 {
1509 	struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
1510 	    &priv->fts.main;
1511 	int err;
1512 
1513 	ft->num_groups = 0;
1514 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0,
1515 	    inner_vxlan ? "vxlan_main" : "main", MLX5E_MAIN_TABLE_SIZE);
1516 
1517 	if (IS_ERR(ft->t)) {
1518 		err = PTR_ERR(ft->t);
1519 		ft->t = NULL;
1520 		return (err);
1521 	}
1522 	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1523 	if (!ft->g) {
1524 		err = -ENOMEM;
1525 		goto err_destroy_main_flow_table;
1526 	}
1527 
1528 	err = inner_vxlan ? mlx5e_create_main_vxlan_groups(ft) :
1529 	    mlx5e_create_main_groups(ft);
1530 	if (err)
1531 		goto err_free_g;
1532 	return (0);
1533 
1534 err_free_g:
1535 	kfree(ft->g);
1536 
1537 err_destroy_main_flow_table:
1538 	mlx5_destroy_flow_table(ft->t);
1539 	ft->t = NULL;
1540 
1541 	return (err);
1542 }
1543 
1544 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1545 {
1546 	mlx5e_destroy_flow_table(&priv->fts.main);
1547 }
1548 
1549 static void mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv *priv)
1550 {
1551 	mlx5e_destroy_flow_table(&priv->fts.main_vxlan);
1552 }
1553 
1554 #define MLX5E_NUM_VLAN_GROUPS	3
1555 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1556 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1557 #define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1558 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1559 				 MLX5E_VLAN_GROUP1_SIZE +\
1560 				 MLX5E_VLAN_GROUP2_SIZE +\
1561 				 0)
1562 
1563 static int
1564 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1565 				      int inlen)
1566 {
1567 	int err;
1568 	int ix = 0;
1569 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1570 
1571 	memset(in, 0, inlen);
1572 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1573 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1574 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1575 	MLX5_SET_CFG(in, start_flow_index, ix);
1576 	ix += MLX5E_VLAN_GROUP0_SIZE;
1577 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1578 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1579 	if (IS_ERR(ft->g[ft->num_groups]))
1580 		goto err_destory_groups;
1581 	ft->num_groups++;
1582 
1583 	memset(in, 0, inlen);
1584 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1585 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1586 	MLX5_SET_CFG(in, start_flow_index, ix);
1587 	ix += MLX5E_VLAN_GROUP1_SIZE;
1588 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1589 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1590 	if (IS_ERR(ft->g[ft->num_groups]))
1591 		goto err_destory_groups;
1592 	ft->num_groups++;
1593 
1594 	memset(in, 0, inlen);
1595 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1596 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1597 	MLX5_SET_CFG(in, start_flow_index, ix);
1598 	ix += MLX5E_VLAN_GROUP2_SIZE;
1599 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1600 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1601 	if (IS_ERR(ft->g[ft->num_groups]))
1602 		goto err_destory_groups;
1603 	ft->num_groups++;
1604 
1605 	return (0);
1606 
1607 err_destory_groups:
1608 	err = PTR_ERR(ft->g[ft->num_groups]);
1609 	ft->g[ft->num_groups] = NULL;
1610 	mlx5e_destroy_groups(ft);
1611 
1612 	return (err);
1613 }
1614 
1615 static int
1616 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1617 {
1618 	u32 *in;
1619 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1620 	int err;
1621 
1622 	in = mlx5_vzalloc(inlen);
1623 	if (!in)
1624 		return (-ENOMEM);
1625 
1626 	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1627 
1628 	kvfree(in);
1629 	return (err);
1630 }
1631 
1632 static int
1633 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1634 {
1635 	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1636 	int err;
1637 
1638 	ft->num_groups = 0;
1639 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1640 				       MLX5E_VLAN_TABLE_SIZE);
1641 
1642 	if (IS_ERR(ft->t)) {
1643 		err = PTR_ERR(ft->t);
1644 		ft->t = NULL;
1645 		return (err);
1646 	}
1647 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1648 	if (!ft->g) {
1649 		err = -ENOMEM;
1650 		goto err_destroy_vlan_flow_table;
1651 	}
1652 
1653 	err = mlx5e_create_vlan_groups(ft);
1654 	if (err)
1655 		goto err_free_g;
1656 
1657 	return (0);
1658 
1659 err_free_g:
1660 	kfree(ft->g);
1661 
1662 err_destroy_vlan_flow_table:
1663 	mlx5_destroy_flow_table(ft->t);
1664 	ft->t = NULL;
1665 
1666 	return (err);
1667 }
1668 
1669 static void
1670 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1671 {
1672 	mlx5e_destroy_flow_table(&priv->fts.vlan);
1673 }
1674 
1675 static int
1676 mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
1677     struct mlx5e_vxlan_db_el *el)
1678 {
1679 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1680 	struct mlx5_flow_destination dest = {};
1681 	u8 mc_enable;
1682 	struct mlx5_flow_rule **rule_p;
1683 	int err = 0;
1684 
1685 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1686 	dest.ft = priv->fts.main_vxlan.t;
1687 
1688 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
1689 	rule_p = &el->vxlan_ft_rule;
1690 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1691 	MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
1692 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1693 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
1694 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1695 	MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
1696 
1697 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1698 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1699 
1700 	if (IS_ERR(*rule_p)) {
1701 		err = PTR_ERR(*rule_p);
1702 		*rule_p = NULL;
1703 		mlx5_en_err(priv->ifp, "add rule failed\n");
1704 	}
1705 
1706 	return (err);
1707 }
1708 
1709 static struct mlx5e_vxlan_db_el *
1710 mlx5e_vxlan_find_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1711 {
1712 	struct mlx5e_vxlan_db_el *el;
1713 
1714 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1715 		if (el->proto == proto && el->port == port)
1716 			return (el);
1717 	}
1718 	return (NULL);
1719 }
1720 
1721 static struct mlx5e_vxlan_db_el *
1722 mlx5e_vxlan_alloc_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1723 {
1724 	struct mlx5e_vxlan_db_el *el;
1725 
1726 	el = mlx5_vzalloc(sizeof(*el));
1727 	el->refcount = 1;
1728 	el->proto = proto;
1729 	el->port = port;
1730 	el->vxlan_ft_rule = NULL;
1731 	return (el);
1732 }
1733 
1734 static int
1735 mlx5e_vxlan_family_to_proto(sa_family_t family, u_int *proto)
1736 {
1737 	switch (family) {
1738 	case AF_INET:
1739 		*proto = ETHERTYPE_IP;
1740 		return (0);
1741 	case AF_INET6:
1742 		*proto = ETHERTYPE_IPV6;
1743 		return (0);
1744 	default:
1745 		return (-EINVAL);
1746 	}
1747 }
1748 
1749 static int
1750 mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
1751     struct mlx5e_vxlan_db_el *el)
1752 {
1753 	u32 *match_criteria;
1754 	u32 *match_value;
1755 	int err;
1756 
1757 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1758 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1759 	if (match_value == NULL || match_criteria == NULL) {
1760 		mlx5_en_err(priv->ifp, "alloc failed\n");
1761 		err = -ENOMEM;
1762 		goto add_vxlan_rule_out;
1763 	}
1764 
1765 	err = mlx5e_add_vxlan_rule_sub(priv, match_criteria, match_value, el);
1766 
1767 add_vxlan_rule_out:
1768 	kvfree(match_criteria);
1769 	kvfree(match_value);
1770 
1771 	return (err);
1772 }
1773 
1774 static int
1775 mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1776 {
1777 	struct mlx5e_vxlan_db_el *el;
1778 	u_int proto;
1779 	int err;
1780 
1781 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1782 	if (err != 0)
1783 		return (err);
1784 
1785 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1786 	if (el != NULL) {
1787 		el->refcount++;
1788 		if (el->installed)
1789 			return (0);
1790 	}
1791 	el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
1792 
1793 	if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
1794 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1795 		if (err == 0)
1796 			el->installed = true;
1797 	}
1798 	if (err == 0)
1799 		TAILQ_INSERT_TAIL(&priv->vxlan.head, el, link);
1800 	else
1801 		kvfree(el);
1802 
1803 	return (err);
1804 }
1805 
1806 static int
1807 mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
1808 {
1809 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1810 	struct mlx5_flow_destination dest = {};
1811 	u8 mc_enable = 0;
1812 	struct mlx5_flow_rule **rule_p;
1813 	int err = 0;
1814 
1815 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1816 	dest.ft = priv->fts.main.t;
1817 
1818 	rule_p = &priv->fts.vxlan_catchall_ft_rule;
1819 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1820 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1821 
1822 	if (IS_ERR(*rule_p)) {
1823 		err = PTR_ERR(*rule_p);
1824 		*rule_p = NULL;
1825 		mlx5_en_err(priv->ifp, "add rule failed\n");
1826 	}
1827 
1828 	return (err);
1829 }
1830 
1831 
1832 static int
1833 mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
1834 {
1835 	u32 *match_criteria;
1836 	u32 *match_value;
1837 	int err;
1838 
1839 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1840 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1841 	if (match_value == NULL || match_criteria == NULL) {
1842 		mlx5_en_err(priv->ifp, "alloc failed\n");
1843 		err = -ENOMEM;
1844 		goto add_vxlan_rule_out;
1845 	}
1846 
1847 	err = mlx5e_add_vxlan_catchall_rule_sub(priv, match_criteria,
1848 	    match_value);
1849 
1850 add_vxlan_rule_out:
1851 	kvfree(match_criteria);
1852 	kvfree(match_value);
1853 
1854 	return (err);
1855 }
1856 
1857 int
1858 mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv)
1859 {
1860 	struct mlx5e_vxlan_db_el *el;
1861 	int err;
1862 
1863 	err = 0;
1864 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1865 		if (el->installed)
1866 			continue;
1867 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1868 		if (err != 0)
1869 			break;
1870 		el->installed = true;
1871 	}
1872 
1873 	return (err);
1874 }
1875 
1876 static int
1877 mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1878 {
1879 	struct mlx5e_vxlan_db_el *el;
1880 	u_int proto;
1881 	int err;
1882 
1883 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1884 	if (err != 0)
1885 		return (err);
1886 
1887 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1888 	if (el == NULL)
1889 		return (0);
1890 	if (el->refcount > 1) {
1891 		el->refcount--;
1892 		return (0);
1893 	}
1894 
1895 	if (el->installed)
1896 		mlx5_del_flow_rule(&el->vxlan_ft_rule);
1897 	TAILQ_REMOVE(&priv->vxlan.head, el, link);
1898 	kvfree(el);
1899 	return (0);
1900 }
1901 
1902 void
1903 mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
1904 {
1905 	struct mlx5e_vxlan_db_el *el;
1906 
1907 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1908 		if (!el->installed)
1909 			continue;
1910 		mlx5_del_flow_rule(&el->vxlan_ft_rule);
1911 		el->installed = false;
1912 	}
1913 }
1914 
1915 static void
1916 mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
1917 {
1918 	mlx5_del_flow_rule(&priv->fts.vxlan_catchall_ft_rule);
1919 }
1920 
1921 void
1922 mlx5e_vxlan_start(void *arg, if_t ifp __unused, sa_family_t family,
1923     u_int port)
1924 {
1925 	struct mlx5e_priv *priv = arg;
1926 	int err;
1927 
1928 	PRIV_LOCK(priv);
1929 	err = mlx5_vxlan_udp_port_add(priv->mdev, port);
1930 	if (err == 0 && test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1931 		mlx5e_add_vxlan_rule(priv, family, port);
1932 	PRIV_UNLOCK(priv);
1933 }
1934 
1935 void
1936 mlx5e_vxlan_stop(void *arg, if_t ifp __unused, sa_family_t family,
1937     u_int port)
1938 {
1939 	struct mlx5e_priv *priv = arg;
1940 
1941 	PRIV_LOCK(priv);
1942 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1943 		mlx5e_del_vxlan_rule(priv, family, port);
1944 	(void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
1945 	PRIV_UNLOCK(priv);
1946 }
1947 
1948 #define	MLX5E_VXLAN_GROUP0_SIZE	BIT(3)	/* XXXKIB */
1949 #define	MLX5E_VXLAN_GROUP1_SIZE	BIT(0)
1950 #define	MLX5E_NUM_VXLAN_GROUPS	BIT(1)
1951 #define	MLX5E_VXLAN_TABLE_SIZE	\
1952     (MLX5E_VXLAN_GROUP0_SIZE + MLX5E_VXLAN_GROUP1_SIZE)
1953 
1954 static int
1955 mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1956 				      int inlen)
1957 {
1958 	int err;
1959 	int ix = 0;
1960 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1961 
1962 	memset(in, 0, inlen);
1963 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1964 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1965 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1966 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1967 	MLX5_SET_CFG(in, start_flow_index, ix);
1968 	ix += MLX5E_VXLAN_GROUP0_SIZE;
1969 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1970 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1971 	if (IS_ERR(ft->g[ft->num_groups]))
1972 		goto err_destory_groups;
1973 	ft->num_groups++;
1974 
1975 	memset(in, 0, inlen);
1976 	MLX5_SET_CFG(in, start_flow_index, ix);
1977 	ix += MLX5E_VXLAN_GROUP1_SIZE;
1978 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1979 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1980 	if (IS_ERR(ft->g[ft->num_groups]))
1981 		goto err_destory_groups;
1982 	ft->num_groups++;
1983 
1984 	return (0);
1985 
1986 err_destory_groups:
1987 	err = PTR_ERR(ft->g[ft->num_groups]);
1988 	ft->g[ft->num_groups] = NULL;
1989 	mlx5e_destroy_groups(ft);
1990 
1991 	return (err);
1992 }
1993 
1994 static int
1995 mlx5e_create_vxlan_groups(struct mlx5e_flow_table *ft)
1996 {
1997 	u32 *in;
1998 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1999 	int err;
2000 
2001 	in = mlx5_vzalloc(inlen);
2002 	if (!in)
2003 		return (-ENOMEM);
2004 
2005 	err = mlx5e_create_vxlan_groups_sub(ft, in, inlen);
2006 
2007 	kvfree(in);
2008 	return (err);
2009 }
2010 
2011 static int
2012 mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
2013 {
2014 	struct mlx5e_flow_table *ft = &priv->fts.vxlan;
2015 	int err;
2016 
2017 	ft->num_groups = 0;
2018 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vxlan",
2019 				       MLX5E_VXLAN_TABLE_SIZE);
2020 
2021 	if (IS_ERR(ft->t)) {
2022 		err = PTR_ERR(ft->t);
2023 		ft->t = NULL;
2024 		return (err);
2025 	}
2026 	ft->g = kcalloc(MLX5E_NUM_VXLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
2027 	if (!ft->g) {
2028 		err = -ENOMEM;
2029 		goto err_destroy_vxlan_flow_table;
2030 	}
2031 
2032 	err = mlx5e_create_vxlan_groups(ft);
2033 	if (err)
2034 		goto err_free_g;
2035 
2036 	TAILQ_INIT(&priv->vxlan.head);
2037 	return (0);
2038 
2039 err_free_g:
2040 	kfree(ft->g);
2041 
2042 err_destroy_vxlan_flow_table:
2043 	mlx5_destroy_flow_table(ft->t);
2044 	ft->t = NULL;
2045 
2046 	return (err);
2047 }
2048 
2049 #define MLX5E_NUM_INNER_RSS_GROUPS	3
2050 #define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
2051 #define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
2052 #define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
2053 #define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
2054 					 MLX5E_INNER_RSS_GROUP1_SIZE +\
2055 					 MLX5E_INNER_RSS_GROUP2_SIZE +\
2056 					 0)
2057 
2058 static int
2059 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2060 					   int inlen)
2061 {
2062 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2063 	int err;
2064 	int ix = 0;
2065 
2066 	memset(in, 0, inlen);
2067 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2068 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2069 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
2070 	MLX5_SET_CFG(in, start_flow_index, ix);
2071 	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
2072 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2073 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2074 	if (IS_ERR(ft->g[ft->num_groups]))
2075 		goto err_destory_groups;
2076 	ft->num_groups++;
2077 
2078 	memset(in, 0, inlen);
2079 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2080 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2081 	MLX5_SET_CFG(in, start_flow_index, ix);
2082 	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
2083 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2084 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2085 	if (IS_ERR(ft->g[ft->num_groups]))
2086 		goto err_destory_groups;
2087 	ft->num_groups++;
2088 
2089 	memset(in, 0, inlen);
2090 	MLX5_SET_CFG(in, start_flow_index, ix);
2091 	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
2092 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2093 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2094 	if (IS_ERR(ft->g[ft->num_groups]))
2095 		goto err_destory_groups;
2096 	ft->num_groups++;
2097 
2098 	return (0);
2099 
2100 err_destory_groups:
2101 	err = PTR_ERR(ft->g[ft->num_groups]);
2102 	ft->g[ft->num_groups] = NULL;
2103 	mlx5e_destroy_groups(ft);
2104 
2105 	return (err);
2106 }
2107 
2108 static int
2109 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
2110 {
2111 	u32 *in;
2112 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2113 	int err;
2114 
2115 	in = mlx5_vzalloc(inlen);
2116 	if (!in)
2117 		return (-ENOMEM);
2118 
2119 	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
2120 
2121 	kvfree(in);
2122 	return (err);
2123 }
2124 
2125 static int
2126 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
2127 {
2128 	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
2129 	int err;
2130 
2131 	ft->num_groups = 0;
2132 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
2133 				       MLX5E_INNER_RSS_TABLE_SIZE);
2134 
2135 	if (IS_ERR(ft->t)) {
2136 		err = PTR_ERR(ft->t);
2137 		ft->t = NULL;
2138 		return (err);
2139 	}
2140 	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
2141 			GFP_KERNEL);
2142 	if (!ft->g) {
2143 		err = -ENOMEM;
2144 		goto err_destroy_inner_rss_flow_table;
2145 	}
2146 
2147 	err = mlx5e_create_inner_rss_groups(ft);
2148 	if (err)
2149 		goto err_free_g;
2150 
2151 	return (0);
2152 
2153 err_free_g:
2154 	kfree(ft->g);
2155 
2156 err_destroy_inner_rss_flow_table:
2157 	mlx5_destroy_flow_table(ft->t);
2158 	ft->t = NULL;
2159 
2160 	return (err);
2161 }
2162 
2163 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
2164 {
2165 	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
2166 }
2167 
2168 static void
2169 mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
2170 {
2171 	mlx5e_destroy_flow_table(&priv->fts.vxlan);
2172 }
2173 
2174 int
2175 mlx5e_open_flow_tables(struct mlx5e_priv *priv)
2176 {
2177 	int err;
2178 
2179 	/* setup namespace pointer */
2180 	priv->fts.ns = mlx5_get_flow_namespace(
2181 	    priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2182 
2183 	err = mlx5e_create_vlan_flow_table(priv);
2184 	if (err)
2185 		return (err);
2186 
2187 	err = mlx5e_create_vxlan_flow_table(priv);
2188 	if (err)
2189 		goto err_destroy_vlan_flow_table;
2190 
2191 	err = mlx5e_create_main_flow_table(priv, true);
2192 	if (err)
2193 		goto err_destroy_vxlan_flow_table;
2194 
2195 	err = mlx5e_create_inner_rss_flow_table(priv);
2196 	if (err)
2197 		goto err_destroy_main_flow_table_true;
2198 
2199 	err = mlx5e_create_main_flow_table(priv, false);
2200 	if (err)
2201 		goto err_destroy_inner_rss_flow_table;
2202 
2203 	err = mlx5e_add_vxlan_catchall_rule(priv);
2204 	if (err)
2205 		goto err_destroy_main_flow_table_false;
2206 
2207 	err = mlx5e_accel_fs_tcp_create(priv);
2208 	if (err)
2209 		goto err_del_vxlan_catchall_rule;
2210 
2211 	return (0);
2212 
2213 err_del_vxlan_catchall_rule:
2214 	mlx5e_del_vxlan_catchall_rule(priv);
2215 err_destroy_main_flow_table_false:
2216 	mlx5e_destroy_main_flow_table(priv);
2217 err_destroy_inner_rss_flow_table:
2218 	mlx5e_destroy_inner_rss_flow_table(priv);
2219 err_destroy_main_flow_table_true:
2220 	mlx5e_destroy_main_vxlan_flow_table(priv);
2221 err_destroy_vxlan_flow_table:
2222 	mlx5e_destroy_vxlan_flow_table(priv);
2223 err_destroy_vlan_flow_table:
2224 	mlx5e_destroy_vlan_flow_table(priv);
2225 
2226 	return (err);
2227 }
2228 
2229 void
2230 mlx5e_close_flow_tables(struct mlx5e_priv *priv)
2231 {
2232 	mlx5e_accel_fs_tcp_destroy(priv);
2233 	mlx5e_del_vxlan_catchall_rule(priv);
2234 	mlx5e_destroy_main_flow_table(priv);
2235 	mlx5e_destroy_inner_rss_flow_table(priv);
2236 	mlx5e_destroy_main_vxlan_flow_table(priv);
2237 	mlx5e_destroy_vxlan_flow_table(priv);
2238 	mlx5e_destroy_vlan_flow_table(priv);
2239 }
2240 
2241 int
2242 mlx5e_open_flow_rules(struct mlx5e_priv *priv)
2243 {
2244 	int err;
2245 
2246 	err = mlx5e_add_all_vlan_rules(priv);
2247 	if (err)
2248 		return (err);
2249 
2250 	err = mlx5e_add_main_vxlan_rules(priv);
2251 	if (err)
2252 		goto err_del_all_vlan_rules;
2253 
2254 	err = mlx5e_add_all_vxlan_rules(priv);
2255 	if (err)
2256 		goto err_del_main_vxlan_rules;
2257 
2258 	mlx5e_set_rx_mode_core(priv, true);
2259 
2260 	set_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2261 
2262 	return (0);
2263 
2264 err_del_main_vxlan_rules:
2265 	mlx5e_del_main_vxlan_rules(priv);
2266 
2267 err_del_all_vlan_rules:
2268 	mlx5e_del_all_vlan_rules(priv);
2269 
2270 	return (err);
2271 }
2272 
2273 void
2274 mlx5e_close_flow_rules(struct mlx5e_priv *priv)
2275 {
2276 	clear_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2277 
2278 	mlx5e_set_rx_mode_core(priv, false);
2279 	mlx5e_del_all_vxlan_rules(priv);
2280 	mlx5e_del_main_vxlan_rules(priv);
2281 	mlx5e_del_all_vlan_rules(priv);
2282 }
2283