xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 
30 #include <linux/list.h>
31 #include <dev/mlx5/fs.h>
32 #include <dev/mlx5/mpfs.h>
33 
34 /*
35  * The flow tables with rules define the packet processing on receive.
36  * Currently, the following structure is set up to handle different offloads
37  * like VLAN decapsulation, packet classification, RSS hashing, VxLAN checksum
38  * offloading:
39  *
40  *
41  *   +=========+       +=========+	+=================+
42  *   |VLAN ft: |       |VxLAN	 |	|VxLAN Main    	  |
43  *   |CTAG/STAG|------>|      VNI|----->|Inner Proto Match|=====> Inner TIR n
44  *   |VID/noVID|/      |Catch-all|\	|		  |
45  *   +=========+       +=========+|	+=================+
46  *     	       	       	     	  |
47  *			     	  |
48  *			     	  |
49  *			     	  v
50  *		       	+=================+
51  *			|Main             |
52  *			|Outer Proto Match|=====> TIR n
53  *			|	          |
54  *     	       	       	+=================+
55  *
56  * The path through flow rules directs each packet into an appropriate TIR,
57  * according to the:
58  * - VLAN encapsulation
59  * - Outer protocol
60  * - Presence of inner protocol
61  */
62 
63 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
64 
65 enum {
66 	MLX5E_FULLMATCH = 0,
67 	MLX5E_ALLMULTI = 1,
68 	MLX5E_PROMISC = 2,
69 };
70 
71 enum {
72 	MLX5E_UC = 0,
73 	MLX5E_MC_IPV4 = 1,
74 	MLX5E_MC_IPV6 = 2,
75 	MLX5E_MC_OTHER = 3,
76 };
77 
78 enum {
79 	MLX5E_ACTION_NONE = 0,
80 	MLX5E_ACTION_ADD = 1,
81 	MLX5E_ACTION_DEL = 2,
82 };
83 
84 struct mlx5e_eth_addr_hash_node {
85 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
86 	u8	action;
87 	u32	mpfs_index;
88 	struct mlx5e_eth_addr_info ai;
89 };
90 
91 static inline int
92 mlx5e_hash_eth_addr(const u8 * addr)
93 {
94 	return (addr[5]);
95 }
96 
97 static bool
98 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
99     struct mlx5e_eth_addr_hash_node *hn_new)
100 {
101 	struct mlx5e_eth_addr_hash_node *hn;
102 	u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
103 
104 	LIST_FOREACH(hn, &hash[ix], hlist) {
105 		if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
106 			if (hn->action == MLX5E_ACTION_DEL)
107 				hn->action = MLX5E_ACTION_NONE;
108 			free(hn_new, M_MLX5EN);
109 			return (false);
110 		}
111 	}
112 	LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
113 	return (true);
114 }
115 
116 static void
117 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
118 {
119 	LIST_REMOVE(hn, hlist);
120 	free(hn, M_MLX5EN);
121 }
122 
123 static void
124 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
125     struct mlx5e_eth_addr_info *ai)
126 {
127 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP))
128 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
129 
130 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP))
131 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
132 
133 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH))
134 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
135 
136 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH))
137 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
138 
139 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
140 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
141 
142 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
143 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
144 
145 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
146 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
147 
148 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
149 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
150 
151 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
152 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
153 
154 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
155 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
156 
157 	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
158 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
159 
160 	/* ensure the rules are not freed again */
161 	ai->tt_vec = 0;
162 }
163 
164 static int
165 mlx5e_get_eth_addr_type(const u8 * addr)
166 {
167 	if (ETHER_IS_MULTICAST(addr) == 0)
168 		return (MLX5E_UC);
169 
170 	if ((addr[0] == 0x01) &&
171 	    (addr[1] == 0x00) &&
172 	    (addr[2] == 0x5e) &&
173 	    !(addr[3] & 0x80))
174 		return (MLX5E_MC_IPV4);
175 
176 	if ((addr[0] == 0x33) &&
177 	    (addr[1] == 0x33))
178 		return (MLX5E_MC_IPV6);
179 
180 	return (MLX5E_MC_OTHER);
181 }
182 
183 static	u32
184 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
185 {
186 	int eth_addr_type;
187 	u32 ret;
188 
189 	switch (type) {
190 	case MLX5E_FULLMATCH:
191 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
192 		switch (eth_addr_type) {
193 		case MLX5E_UC:
194 			ret =
195 			    (1 << MLX5E_TT_IPV4_TCP) |
196 			    (1 << MLX5E_TT_IPV6_TCP) |
197 			    (1 << MLX5E_TT_IPV4_UDP) |
198 			    (1 << MLX5E_TT_IPV6_UDP) |
199 			    (1 << MLX5E_TT_IPV4) |
200 			    (1 << MLX5E_TT_IPV6) |
201 			    (1 << MLX5E_TT_ANY) |
202 			    0;
203 			break;
204 
205 		case MLX5E_MC_IPV4:
206 			ret =
207 			    (1 << MLX5E_TT_IPV4_UDP) |
208 			    (1 << MLX5E_TT_IPV4) |
209 			    0;
210 			break;
211 
212 		case MLX5E_MC_IPV6:
213 			ret =
214 			    (1 << MLX5E_TT_IPV6_UDP) |
215 			    (1 << MLX5E_TT_IPV6) |
216 			    0;
217 			break;
218 
219 		default:
220 			ret =
221 			    (1 << MLX5E_TT_ANY) |
222 			    0;
223 			break;
224 		}
225 		break;
226 
227 	case MLX5E_ALLMULTI:
228 		ret =
229 		    (1 << MLX5E_TT_IPV4_UDP) |
230 		    (1 << MLX5E_TT_IPV6_UDP) |
231 		    (1 << MLX5E_TT_IPV4) |
232 		    (1 << MLX5E_TT_IPV6) |
233 		    (1 << MLX5E_TT_ANY) |
234 		    0;
235 		break;
236 
237 	default:			/* MLX5E_PROMISC */
238 		ret =
239 		    (1 << MLX5E_TT_IPV4_TCP) |
240 		    (1 << MLX5E_TT_IPV6_TCP) |
241 		    (1 << MLX5E_TT_IPV4_UDP) |
242 		    (1 << MLX5E_TT_IPV6_UDP) |
243 		    (1 << MLX5E_TT_IPV4) |
244 		    (1 << MLX5E_TT_IPV6) |
245 		    (1 << MLX5E_TT_ANY) |
246 		    0;
247 		break;
248 	}
249 
250 	return (ret);
251 }
252 
253 static int
254 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
255     struct mlx5e_eth_addr_info *ai, int type,
256     u32 *mc, u32 *mv)
257 {
258 	struct mlx5_flow_destination dest = {};
259 	u8 mc_enable = 0;
260 	struct mlx5_flow_rule **rule_p;
261 	struct mlx5_flow_table *ft = priv->fts.main.t;
262 	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
263 				   outer_headers.dmac_47_16);
264 	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
265 				   outer_headers.dmac_47_16);
266 	u32 *tirn = priv->tirn;
267 	u32 tt_vec;
268 	int err = 0;
269 
270 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
271 
272 	switch (type) {
273 	case MLX5E_FULLMATCH:
274 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
275 		memset(mc_dmac, 0xff, ETH_ALEN);
276 		ether_addr_copy(mv_dmac, ai->addr);
277 		break;
278 
279 	case MLX5E_ALLMULTI:
280 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
281 		mc_dmac[0] = 0x01;
282 		mv_dmac[0] = 0x01;
283 		break;
284 
285 	case MLX5E_PROMISC:
286 		break;
287 	default:
288 		break;
289 	}
290 
291 	tt_vec = mlx5e_get_tt_vec(ai, type);
292 
293 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
294 		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
295 		dest.tir_num = tirn[MLX5E_TT_ANY];
296 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
297 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
298 					     MLX5_FS_ETH_FLOW_TAG, &dest);
299 		if (IS_ERR_OR_NULL(*rule_p))
300 			goto err_del_ai;
301 		ai->tt_vec |= BIT(MLX5E_TT_ANY);
302 	}
303 
304 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
305 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
306 
307 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
308 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
309 		dest.tir_num = tirn[MLX5E_TT_IPV4];
310 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
311 			 ETHERTYPE_IP);
312 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
313 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
314 					     MLX5_FS_ETH_FLOW_TAG, &dest);
315 		if (IS_ERR_OR_NULL(*rule_p))
316 			goto err_del_ai;
317 		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
318 	}
319 
320 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
321 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
322 		dest.tir_num = tirn[MLX5E_TT_IPV6];
323 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
324 			 ETHERTYPE_IPV6);
325 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
326 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
327 					     MLX5_FS_ETH_FLOW_TAG, &dest);
328 		if (IS_ERR_OR_NULL(*rule_p))
329 			goto err_del_ai;
330 		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
331 	}
332 
333 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
334 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
335 
336 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
337 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
338 		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
339 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
340 			 ETHERTYPE_IP);
341 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
342 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
343 					     MLX5_FS_ETH_FLOW_TAG, &dest);
344 		if (IS_ERR_OR_NULL(*rule_p))
345 			goto err_del_ai;
346 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
347 	}
348 
349 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
350 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
351 		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
352 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
353 			 ETHERTYPE_IPV6);
354 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
355 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
356 					     MLX5_FS_ETH_FLOW_TAG, &dest);
357 		if (IS_ERR_OR_NULL(*rule_p))
358 			goto err_del_ai;
359 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
360 	}
361 
362 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
363 
364 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
365 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
366 		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
367 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
368 			 ETHERTYPE_IP);
369 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
370 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
371 					     MLX5_FS_ETH_FLOW_TAG, &dest);
372 		if (IS_ERR_OR_NULL(*rule_p))
373 			goto err_del_ai;
374 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
375 	}
376 
377 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
378 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
379 		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
380 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
381 			 ETHERTYPE_IPV6);
382 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
383 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
384 					     MLX5_FS_ETH_FLOW_TAG, &dest);
385 		if (IS_ERR_OR_NULL(*rule_p))
386 			goto err_del_ai;
387 
388 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
389 	}
390 
391 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
392 
393 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
394 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
395 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
396 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
397 			 ETHERTYPE_IP);
398 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
399 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
400 					     MLX5_FS_ETH_FLOW_TAG, &dest);
401 		if (IS_ERR_OR_NULL(*rule_p))
402 			goto err_del_ai;
403 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
404 	}
405 
406 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
407 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
408 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
409 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
410 			 ETHERTYPE_IPV6);
411 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
412 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
413 					     MLX5_FS_ETH_FLOW_TAG, &dest);
414 		if (IS_ERR_OR_NULL(*rule_p))
415 			goto err_del_ai;
416 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
417 	}
418 
419 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
420 
421 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
422 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
423 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
424 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
425 			 ETHERTYPE_IP);
426 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
427 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
428 					     MLX5_FS_ETH_FLOW_TAG, &dest);
429 		if (IS_ERR_OR_NULL(*rule_p))
430 			goto err_del_ai;
431 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
432 	}
433 
434 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
435 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
436 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
437 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
438 			 ETHERTYPE_IPV6);
439 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
440 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
441 					     MLX5_FS_ETH_FLOW_TAG, &dest);
442 		if (IS_ERR_OR_NULL(*rule_p))
443 			goto err_del_ai;
444 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
445 	}
446 
447 	return 0;
448 
449 err_del_ai:
450 	err = PTR_ERR(*rule_p);
451 	*rule_p = NULL;
452 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
453 
454 	return err;
455 }
456 
457 static int
458 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
459     struct mlx5e_eth_addr_info *ai, int type)
460 {
461 	u32 *match_criteria;
462 	u32 *match_value;
463 	int err = 0;
464 
465 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
466 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
467 	if (!match_value || !match_criteria) {
468 		mlx5_en_err(priv->ifp, "alloc failed\n");
469 		err = -ENOMEM;
470 		goto add_eth_addr_rule_out;
471 	}
472 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
473 	    match_value);
474 
475 add_eth_addr_rule_out:
476 	kvfree(match_criteria);
477 	kvfree(match_value);
478 
479 	return (err);
480 }
481 
482 static void
483 mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
484 {
485 	struct mlx5_flow_rule **ra = priv->fts.main_vxlan_rule, **r;
486 
487 	r = &ra[MLX5E_TT_IPV6_IPSEC_ESP];
488 	if (*r != NULL) {
489 		mlx5_del_flow_rule(*r);
490 		*r = NULL;
491 	}
492 
493 	r = &ra[MLX5E_TT_IPV4_IPSEC_ESP];
494 	if (*r != NULL) {
495 		mlx5_del_flow_rule(*r);
496 		*r = NULL;
497 	}
498 
499 	r = &ra[MLX5E_TT_IPV6_IPSEC_AH];
500 	if (*r != NULL) {
501 		mlx5_del_flow_rule(*r);
502 		*r = NULL;
503 	}
504 
505 	r = &ra[MLX5E_TT_IPV4_IPSEC_AH];
506 	if (*r != NULL) {
507 		mlx5_del_flow_rule(*r);
508 		*r = NULL;
509 	}
510 
511 	r = &ra[MLX5E_TT_IPV6_TCP];
512 	if (*r != NULL) {
513 		mlx5_del_flow_rule(*r);
514 		*r = NULL;
515 	}
516 
517 	r = &ra[MLX5E_TT_IPV4_TCP];
518 	if (*r != NULL) {
519 		mlx5_del_flow_rule(*r);
520 		*r = NULL;
521 	}
522 
523 	r = &ra[MLX5E_TT_IPV6_UDP];
524 	if (*r != NULL) {
525 		mlx5_del_flow_rule(*r);
526 		*r = NULL;
527 	}
528 
529 	r = &ra[MLX5E_TT_IPV4_UDP];
530 	if (*r != NULL) {
531 		mlx5_del_flow_rule(*r);
532 		*r = NULL;
533 	}
534 
535 	r = &ra[MLX5E_TT_IPV6];
536 	if (*r != NULL) {
537 		mlx5_del_flow_rule(*r);
538 		*r = NULL;
539 	}
540 
541 	r = &ra[MLX5E_TT_IPV4];
542 	if (*r != NULL) {
543 		mlx5_del_flow_rule(*r);
544 		*r = NULL;
545 	}
546 
547 	r = &ra[MLX5E_TT_ANY];
548 	if (*r != NULL) {
549 		mlx5_del_flow_rule(*r);
550 		*r = NULL;
551 	}
552 }
553 
554 static int
555 mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
556 {
557 	struct mlx5_flow_destination dest = {};
558 	u8 mc_enable = 0;
559 	struct mlx5_flow_rule **rule_p;
560 	struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
561 	u32 *tirn = priv->tirn_inner_vxlan;
562 	int err = 0;
563 
564 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
565 
566 	mc_enable = MLX5_MATCH_INNER_HEADERS;
567 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
568 
569 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
570 	dest.tir_num = tirn[MLX5E_TT_IPV4];
571 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
572 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
573 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
574 	if (IS_ERR_OR_NULL(*rule_p))
575 		goto err_del_ai;
576 
577 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
578 	dest.tir_num = tirn[MLX5E_TT_IPV6];
579 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
580 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
581 	     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
582 	if (IS_ERR_OR_NULL(*rule_p))
583 		goto err_del_ai;
584 
585 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
586 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_UDP);
587 
588 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
589 	dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
590 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
591 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
592 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
593 	if (IS_ERR_OR_NULL(*rule_p))
594 		goto err_del_ai;
595 
596 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
597 	dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
598 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
599 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
600 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
601 	if (IS_ERR_OR_NULL(*rule_p))
602 		goto err_del_ai;
603 
604 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_TCP);
605 
606 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
607 	dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
608 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
609 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
610 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
611 	if (IS_ERR_OR_NULL(*rule_p))
612 		goto err_del_ai;
613 
614 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
615 	dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
616 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
617 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
618 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
619 	if (IS_ERR_OR_NULL(*rule_p))
620 		goto err_del_ai;
621 
622 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_AH);
623 
624 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
625 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
626 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
627 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
628 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
629 	if (IS_ERR_OR_NULL(*rule_p))
630 		goto err_del_ai;
631 
632 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
633 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
634 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
635 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
636 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
637 	if (IS_ERR_OR_NULL(*rule_p))
638 		goto err_del_ai;
639 
640 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_ESP);
641 
642 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
643 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
644 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
645 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
646 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
647 	if (IS_ERR_OR_NULL(*rule_p))
648 			goto err_del_ai;
649 
650 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP];
651 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
652 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
653 		 ETHERTYPE_IPV6);
654 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
655 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
656 	if (IS_ERR_OR_NULL(*rule_p))
657 		goto err_del_ai;
658 
659 	mc_enable = 0;
660 	memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
661 	memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
662 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
663 	dest.tir_num = tirn[MLX5E_TT_ANY];
664 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
665 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
666 	if (IS_ERR_OR_NULL(*rule_p))
667 		goto err_del_ai;
668 
669 	return (0);
670 
671 err_del_ai:
672 	err = PTR_ERR(*rule_p);
673 	*rule_p = NULL;
674 	mlx5e_del_main_vxlan_rules(priv);
675 
676 	return (err);
677 }
678 
679 static int
680 mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
681 {
682 	u32 *match_criteria;
683 	u32 *match_value;
684 	int err = 0;
685 
686 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
687 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
688 	if (match_value == NULL || match_criteria == NULL) {
689 		mlx5_en_err(priv->ifp, "alloc failed\n");
690 		err = -ENOMEM;
691 		goto add_main_vxlan_rules_out;
692 	}
693 	err = mlx5e_add_main_vxlan_rules_sub(priv, match_criteria, match_value);
694 
695 add_main_vxlan_rules_out:
696 	kvfree(match_criteria);
697 	kvfree(match_value);
698 
699 	return (err);
700 }
701 
702 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
703 {
704 	struct ifnet *ifp = priv->ifp;
705 	int max_list_size;
706 	int list_size;
707 	u16 *vlans;
708 	int vlan;
709 	int err;
710 	int i;
711 
712 	list_size = 0;
713 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
714 		list_size++;
715 
716 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
717 
718 	if (list_size > max_list_size) {
719 		mlx5_en_err(ifp,
720 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
721 			    list_size, max_list_size);
722 		list_size = max_list_size;
723 	}
724 
725 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
726 	if (!vlans)
727 		return -ENOMEM;
728 
729 	i = 0;
730 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
731 		if (i >= list_size)
732 			break;
733 		vlans[i++] = vlan;
734 	}
735 
736 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
737 	if (err)
738 		mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
739 			   err);
740 
741 	kfree(vlans);
742 	return err;
743 }
744 
745 enum mlx5e_vlan_rule_type {
746 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
747 	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
748 	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
749 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
750 };
751 
752 static int
753 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
754     enum mlx5e_vlan_rule_type rule_type, u16 vid,
755     u32 *mc, u32 *mv)
756 {
757 	struct mlx5_flow_table *ft = priv->fts.vlan.t;
758 	struct mlx5_flow_destination dest = {};
759 	u8 mc_enable = 0;
760 	struct mlx5_flow_rule **rule_p;
761 	int err = 0;
762 
763 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
764 	dest.ft = ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) ?
765 	    priv->fts.vxlan.t : priv->fts.main.t;
766 
767 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
768 
769 	switch (rule_type) {
770 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
771 		rule_p = &priv->vlan.untagged_ft_rule;
772 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
773 		break;
774 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
775 		rule_p = &priv->vlan.any_cvlan_ft_rule;
776 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
777 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
778 		break;
779 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
780 		rule_p = &priv->vlan.any_svlan_ft_rule;
781 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
782 		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
783 		break;
784 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
785 		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
786 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
787 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
788 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
789 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
790 		mlx5e_vport_context_update_vlans(priv);
791 		break;
792 	}
793 
794 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
795 				     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
796 				     MLX5_FS_ETH_FLOW_TAG,
797 				     &dest);
798 
799 	if (IS_ERR(*rule_p)) {
800 		err = PTR_ERR(*rule_p);
801 		*rule_p = NULL;
802 		mlx5_en_err(priv->ifp, "add rule failed\n");
803 	}
804 
805 	return (err);
806 }
807 
808 static int
809 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
810     enum mlx5e_vlan_rule_type rule_type, u16 vid)
811 {
812 	u32 *match_criteria;
813 	u32 *match_value;
814 	int err = 0;
815 
816 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
817 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
818 	if (!match_value || !match_criteria) {
819 		mlx5_en_err(priv->ifp, "alloc failed\n");
820 		err = -ENOMEM;
821 		goto add_vlan_rule_out;
822 	}
823 
824 	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
825 				    match_value);
826 
827 add_vlan_rule_out:
828 	kvfree(match_criteria);
829 	kvfree(match_value);
830 
831 	return (err);
832 }
833 
834 static void
835 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
836     enum mlx5e_vlan_rule_type rule_type, u16 vid)
837 {
838 	switch (rule_type) {
839 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
840 		if (priv->vlan.untagged_ft_rule) {
841 			mlx5_del_flow_rule(priv->vlan.untagged_ft_rule);
842 			priv->vlan.untagged_ft_rule = NULL;
843 		}
844 		break;
845 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
846 		if (priv->vlan.any_cvlan_ft_rule) {
847 			mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule);
848 			priv->vlan.any_cvlan_ft_rule = NULL;
849 		}
850 		break;
851 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
852 		if (priv->vlan.any_svlan_ft_rule) {
853 			mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule);
854 			priv->vlan.any_svlan_ft_rule = NULL;
855 		}
856 		break;
857 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
858 		if (priv->vlan.active_vlans_ft_rule[vid]) {
859 			mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]);
860 			priv->vlan.active_vlans_ft_rule[vid] = NULL;
861 		}
862 		mlx5e_vport_context_update_vlans(priv);
863 		break;
864 	default:
865 		break;
866 	}
867 }
868 
869 static void
870 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
871 {
872 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
873 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
874 }
875 
876 static int
877 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
878 {
879 	int err;
880 
881 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
882 	if (err)
883 		return (err);
884 
885 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
886 	if (err)
887 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
888 
889 	return (err);
890 }
891 
892 void
893 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
894 {
895 	if (priv->vlan.filter_disabled) {
896 		priv->vlan.filter_disabled = false;
897 		if (priv->ifp->if_flags & IFF_PROMISC)
898 			return;
899 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
900 			mlx5e_del_any_vid_rules(priv);
901 	}
902 }
903 
904 void
905 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
906 {
907 	if (!priv->vlan.filter_disabled) {
908 		priv->vlan.filter_disabled = true;
909 		if (priv->ifp->if_flags & IFF_PROMISC)
910 			return;
911 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
912 			mlx5e_add_any_vid_rules(priv);
913 	}
914 }
915 
916 void
917 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
918 {
919 	struct mlx5e_priv *priv = arg;
920 
921 	if (ifp != priv->ifp)
922 		return;
923 
924 	PRIV_LOCK(priv);
925 	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
926 	    test_bit(MLX5E_STATE_OPENED, &priv->state))
927 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
928 	PRIV_UNLOCK(priv);
929 }
930 
931 void
932 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
933 {
934 	struct mlx5e_priv *priv = arg;
935 
936 	if (ifp != priv->ifp)
937 		return;
938 
939 	PRIV_LOCK(priv);
940 	clear_bit(vid, priv->vlan.active_vlans);
941 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
942 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
943 	PRIV_UNLOCK(priv);
944 }
945 
946 int
947 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
948 {
949 	int err;
950 	int i;
951 
952 	set_bit(0, priv->vlan.active_vlans);
953 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
954 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
955 					  i);
956 		if (err)
957 			goto error;
958 	}
959 
960 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
961 	if (err)
962 		goto error;
963 
964 	if (priv->vlan.filter_disabled) {
965 		err = mlx5e_add_any_vid_rules(priv);
966 		if (err)
967 			goto error;
968 	}
969 	return (0);
970 error:
971 	mlx5e_del_all_vlan_rules(priv);
972 	return (err);
973 }
974 
975 void
976 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
977 {
978 	int i;
979 
980 	if (priv->vlan.filter_disabled)
981 		mlx5e_del_any_vid_rules(priv);
982 
983 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
984 
985 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
986 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
987 	clear_bit(0, priv->vlan.active_vlans);
988 }
989 
990 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
991 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
992 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
993 
994 static void
995 mlx5e_execute_action(struct mlx5e_priv *priv,
996     struct mlx5e_eth_addr_hash_node *hn)
997 {
998 	switch (hn->action) {
999 	case MLX5E_ACTION_ADD:
1000 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
1001 		hn->action = MLX5E_ACTION_NONE;
1002 		break;
1003 
1004 	case MLX5E_ACTION_DEL:
1005 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
1006 		if (hn->mpfs_index != -1U)
1007 			mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
1008 		mlx5e_del_eth_addr_from_hash(hn);
1009 		break;
1010 
1011 	default:
1012 		break;
1013 	}
1014 }
1015 
1016 static struct mlx5e_eth_addr_hash_node *
1017 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
1018 {
1019 	struct mlx5e_eth_addr_hash_node *hn;
1020 
1021 	hn = LIST_FIRST(fh);
1022 	if (hn != NULL) {
1023 		LIST_REMOVE(hn, hlist);
1024 		LIST_INSERT_HEAD(uh, hn, hlist);
1025 	}
1026 	return (hn);
1027 }
1028 
1029 static struct mlx5e_eth_addr_hash_node *
1030 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
1031 {
1032 	struct mlx5e_eth_addr_hash_node *hn;
1033 
1034 	hn = LIST_FIRST(fh);
1035 	if (hn != NULL)
1036 		LIST_REMOVE(hn, hlist);
1037 	return (hn);
1038 }
1039 
1040 struct mlx5e_copy_addr_ctx {
1041 	struct mlx5e_eth_addr_hash_head *free;
1042 	struct mlx5e_eth_addr_hash_head *fill;
1043 	bool success;
1044 };
1045 
1046 static u_int
1047 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1048 {
1049 	struct mlx5e_copy_addr_ctx *ctx = arg;
1050 	struct mlx5e_eth_addr_hash_node *hn;
1051 
1052 	hn = mlx5e_move_hn(ctx->free, ctx->fill);
1053 	if (hn == NULL) {
1054 		ctx->success = false;
1055 		return (0);
1056 	}
1057 	ether_addr_copy(hn->ai.addr, LLADDR(sdl));
1058 
1059 	return (1);
1060 }
1061 
1062 static void
1063 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
1064 {
1065 	struct mlx5e_copy_addr_ctx ctx;
1066 	struct mlx5e_eth_addr_hash_head head_free;
1067 	struct mlx5e_eth_addr_hash_head head_uc;
1068 	struct mlx5e_eth_addr_hash_head head_mc;
1069 	struct mlx5e_eth_addr_hash_node *hn;
1070 	struct ifnet *ifp = priv->ifp;
1071 	size_t x;
1072 	size_t num;
1073 
1074 	PRIV_ASSERT_LOCKED(priv);
1075 
1076 retry:
1077 	LIST_INIT(&head_free);
1078 	LIST_INIT(&head_uc);
1079 	LIST_INIT(&head_mc);
1080 	num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp);
1081 
1082 	/* allocate place holders */
1083 	for (x = 0; x != num; x++) {
1084 		hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
1085 		hn->action = MLX5E_ACTION_ADD;
1086 		hn->mpfs_index = -1U;
1087 		LIST_INSERT_HEAD(&head_free, hn, hlist);
1088 	}
1089 
1090 	hn = mlx5e_move_hn(&head_free, &head_uc);
1091 	MPASS(hn != NULL);
1092 
1093 	ether_addr_copy(hn->ai.addr,
1094 	    LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
1095 
1096 	ctx.free = &head_free;
1097 	ctx.fill = &head_uc;
1098 	ctx.success = true;
1099 	if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx);
1100 	if (ctx.success == false)
1101 		goto cleanup;
1102 
1103 	ctx.fill = &head_mc;
1104 	if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx);
1105 	if (ctx.success == false)
1106 		goto cleanup;
1107 
1108 	/* insert L2 unicast addresses into hash list */
1109 
1110 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
1111 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
1112 			continue;
1113 		if (hn->mpfs_index == -1U)
1114 			mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index,
1115 			    hn->ai.addr, 0, 0);
1116 	}
1117 
1118 	/* insert L2 multicast addresses into hash list */
1119 
1120 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
1121 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
1122 			continue;
1123 	}
1124 
1125 cleanup:
1126 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
1127 		free(hn, M_MLX5EN);
1128 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
1129 		free(hn, M_MLX5EN);
1130 	while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
1131 		free(hn, M_MLX5EN);
1132 
1133 	if (ctx.success == false)
1134 		goto retry;
1135 }
1136 
1137 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
1138 				  u8 addr_array[][ETH_ALEN], int size)
1139 {
1140 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1141 	struct ifnet *ifp = priv->ifp;
1142 	struct mlx5e_eth_addr_hash_node *hn;
1143 	struct mlx5e_eth_addr_hash_head *addr_list;
1144 	struct mlx5e_eth_addr_hash_node *tmp;
1145 	int i = 0;
1146 	int hi;
1147 
1148 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1149 
1150 	if (is_uc) /* Make sure our own address is pushed first */
1151 		ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
1152 	else if (priv->eth_addr.broadcast_enabled)
1153 		ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
1154 
1155 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
1156 		if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
1157 			continue;
1158 		if (i >= size)
1159 			break;
1160 		ether_addr_copy(addr_array[i++], hn->ai.addr);
1161 	}
1162 }
1163 
1164 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
1165 						 int list_type)
1166 {
1167 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1168 	struct mlx5e_eth_addr_hash_node *hn;
1169 	u8 (*addr_array)[ETH_ALEN] = NULL;
1170 	struct mlx5e_eth_addr_hash_head *addr_list;
1171 	struct mlx5e_eth_addr_hash_node *tmp;
1172 	int max_size;
1173 	int size;
1174 	int err;
1175 	int hi;
1176 
1177 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
1178 	max_size = is_uc ?
1179 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1180 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
1181 
1182 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1183 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
1184 		size++;
1185 
1186 	if (size > max_size) {
1187 		mlx5_en_err(priv->ifp,
1188 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
1189 			    is_uc ? "UC" : "MC", size, max_size);
1190 		size = max_size;
1191 	}
1192 
1193 	if (size) {
1194 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
1195 		if (!addr_array) {
1196 			err = -ENOMEM;
1197 			goto out;
1198 		}
1199 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
1200 	}
1201 
1202 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
1203 out:
1204 	if (err)
1205 		mlx5_en_err(priv->ifp,
1206 			   "Failed to modify vport %s list err(%d)\n",
1207 			   is_uc ? "UC" : "MC", err);
1208 	kfree(addr_array);
1209 }
1210 
1211 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
1212 {
1213 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1214 
1215 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
1216 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
1217 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
1218 				      ea->allmulti_enabled,
1219 				      ea->promisc_enabled);
1220 }
1221 
1222 static void
1223 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
1224 {
1225 	struct mlx5e_eth_addr_hash_node *hn;
1226 	struct mlx5e_eth_addr_hash_node *tmp;
1227 	int i;
1228 
1229 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1230 	    mlx5e_execute_action(priv, hn);
1231 
1232 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1233 	    mlx5e_execute_action(priv, hn);
1234 }
1235 
1236 static void
1237 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
1238 {
1239 	struct mlx5e_eth_addr_hash_node *hn;
1240 	struct mlx5e_eth_addr_hash_node *tmp;
1241 	int i;
1242 
1243 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1244 	    hn->action = MLX5E_ACTION_DEL;
1245 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1246 	    hn->action = MLX5E_ACTION_DEL;
1247 
1248 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1249 		mlx5e_sync_ifp_addr(priv);
1250 
1251 	mlx5e_apply_ifp_addr(priv);
1252 }
1253 
1254 void
1255 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
1256 {
1257 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1258 	struct ifnet *ndev = priv->ifp;
1259 
1260 	bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
1261 	bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
1262 	bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
1263 	bool broadcast_enabled = rx_mode_enable;
1264 
1265 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1266 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1267 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1268 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1269 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1270 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1271 
1272 	/* update broadcast address */
1273 	ether_addr_copy(priv->eth_addr.broadcast.addr,
1274 	    priv->ifp->if_broadcastaddr);
1275 
1276 	if (enable_promisc) {
1277 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1278 		if (!priv->vlan.filter_disabled)
1279 			mlx5e_add_any_vid_rules(priv);
1280 	}
1281 	if (enable_allmulti)
1282 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1283 	if (enable_broadcast)
1284 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1285 
1286 	mlx5e_handle_ifp_addr(priv);
1287 
1288 	if (disable_broadcast)
1289 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1290 	if (disable_allmulti)
1291 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1292 	if (disable_promisc) {
1293 		if (!priv->vlan.filter_disabled)
1294 			mlx5e_del_any_vid_rules(priv);
1295 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1296 	}
1297 
1298 	ea->promisc_enabled = promisc_enabled;
1299 	ea->allmulti_enabled = allmulti_enabled;
1300 	ea->broadcast_enabled = broadcast_enabled;
1301 
1302 	mlx5e_vport_context_update(priv);
1303 }
1304 
1305 void
1306 mlx5e_set_rx_mode_work(struct work_struct *work)
1307 {
1308 	struct mlx5e_priv *priv =
1309 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
1310 
1311 	PRIV_LOCK(priv);
1312 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1313 		mlx5e_set_rx_mode_core(priv);
1314 	PRIV_UNLOCK(priv);
1315 }
1316 
1317 static void
1318 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1319 {
1320 	int i;
1321 
1322 	for (i = ft->num_groups - 1; i >= 0; i--) {
1323 		if (!IS_ERR_OR_NULL(ft->g[i]))
1324 			mlx5_destroy_flow_group(ft->g[i]);
1325 		ft->g[i] = NULL;
1326 	}
1327 	ft->num_groups = 0;
1328 }
1329 
1330 static void
1331 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1332 {
1333 	mlx5e_destroy_groups(ft);
1334 	kfree(ft->g);
1335 	mlx5_destroy_flow_table(ft->t);
1336 	ft->t = NULL;
1337 }
1338 
1339 #define MLX5E_NUM_MAIN_GROUPS	10
1340 #define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
1341 #define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
1342 #define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
1343 #define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
1344 #define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
1345 #define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
1346 #define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
1347 #define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
1348 #define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1349 #define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1350 #define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1351 				 MLX5E_MAIN_GROUP1_SIZE +\
1352 				 MLX5E_MAIN_GROUP2_SIZE +\
1353 				 MLX5E_MAIN_GROUP3_SIZE +\
1354 				 MLX5E_MAIN_GROUP4_SIZE +\
1355 				 MLX5E_MAIN_GROUP5_SIZE +\
1356 				 MLX5E_MAIN_GROUP6_SIZE +\
1357 				 MLX5E_MAIN_GROUP7_SIZE +\
1358 				 MLX5E_MAIN_GROUP8_SIZE +\
1359 				 MLX5E_MAIN_GROUP9_SIZE +\
1360 				 0)
1361 
1362 static int
1363 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1364 				      int inlen)
1365 {
1366 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1367 	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1368 				match_criteria.outer_headers.dmac_47_16);
1369 	int err;
1370 	int ix = 0;
1371 
1372 	/* Tunnel rules need to be first in this list of groups */
1373 
1374 	/* Start tunnel rules */
1375 	memset(in, 0, inlen);
1376 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1377 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1378 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1379 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1380 	MLX5_SET_CFG(in, start_flow_index, ix);
1381 	ix += MLX5E_MAIN_GROUP0_SIZE;
1382 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1383 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1384 	if (IS_ERR(ft->g[ft->num_groups]))
1385 		goto err_destory_groups;
1386 	ft->num_groups++;
1387 	/* End Tunnel Rules */
1388 
1389 	memset(in, 0, inlen);
1390 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1391 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1392 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1393 	MLX5_SET_CFG(in, start_flow_index, ix);
1394 	ix += MLX5E_MAIN_GROUP1_SIZE;
1395 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1396 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1397 	if (IS_ERR(ft->g[ft->num_groups]))
1398 		goto err_destory_groups;
1399 	ft->num_groups++;
1400 
1401 	memset(in, 0, inlen);
1402 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1403 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1404 	MLX5_SET_CFG(in, start_flow_index, ix);
1405 	ix += MLX5E_MAIN_GROUP2_SIZE;
1406 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1407 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1408 	if (IS_ERR(ft->g[ft->num_groups]))
1409 		goto err_destory_groups;
1410 	ft->num_groups++;
1411 
1412 	memset(in, 0, inlen);
1413 	MLX5_SET_CFG(in, start_flow_index, ix);
1414 	ix += MLX5E_MAIN_GROUP3_SIZE;
1415 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1416 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1417 	if (IS_ERR(ft->g[ft->num_groups]))
1418 		goto err_destory_groups;
1419 	ft->num_groups++;
1420 
1421 	memset(in, 0, inlen);
1422 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1423 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1424 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1425 	memset(dmac, 0xff, ETH_ALEN);
1426 	MLX5_SET_CFG(in, start_flow_index, ix);
1427 	ix += MLX5E_MAIN_GROUP4_SIZE;
1428 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1429 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1430 	if (IS_ERR(ft->g[ft->num_groups]))
1431 		goto err_destory_groups;
1432 	ft->num_groups++;
1433 
1434 	memset(in, 0, inlen);
1435 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1436 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1437 	memset(dmac, 0xff, ETH_ALEN);
1438 	MLX5_SET_CFG(in, start_flow_index, ix);
1439 	ix += MLX5E_MAIN_GROUP5_SIZE;
1440 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1441 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1442 	if (IS_ERR(ft->g[ft->num_groups]))
1443 		goto err_destory_groups;
1444 	ft->num_groups++;
1445 
1446 	memset(in, 0, inlen);
1447 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1448 	memset(dmac, 0xff, ETH_ALEN);
1449 	MLX5_SET_CFG(in, start_flow_index, ix);
1450 	ix += MLX5E_MAIN_GROUP6_SIZE;
1451 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1452 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1453 	if (IS_ERR(ft->g[ft->num_groups]))
1454 		goto err_destory_groups;
1455 	ft->num_groups++;
1456 
1457 	memset(in, 0, inlen);
1458 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1459 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1460 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1461 	dmac[0] = 0x01;
1462 	MLX5_SET_CFG(in, start_flow_index, ix);
1463 	ix += MLX5E_MAIN_GROUP7_SIZE;
1464 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1465 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1466 	if (IS_ERR(ft->g[ft->num_groups]))
1467 		goto err_destory_groups;
1468 	ft->num_groups++;
1469 
1470 	memset(in, 0, inlen);
1471 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1472 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1473 	dmac[0] = 0x01;
1474 	MLX5_SET_CFG(in, start_flow_index, ix);
1475 	ix += MLX5E_MAIN_GROUP8_SIZE;
1476 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1477 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1478 	if (IS_ERR(ft->g[ft->num_groups]))
1479 		goto err_destory_groups;
1480 	ft->num_groups++;
1481 
1482 	memset(in, 0, inlen);
1483 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1484 	dmac[0] = 0x01;
1485 	MLX5_SET_CFG(in, start_flow_index, ix);
1486 	ix += MLX5E_MAIN_GROUP9_SIZE;
1487 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1488 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1489 	if (IS_ERR(ft->g[ft->num_groups]))
1490 		goto err_destory_groups;
1491 	ft->num_groups++;
1492 
1493 	return (0);
1494 
1495 err_destory_groups:
1496 	err = PTR_ERR(ft->g[ft->num_groups]);
1497 	ft->g[ft->num_groups] = NULL;
1498 	mlx5e_destroy_groups(ft);
1499 
1500 	return (err);
1501 }
1502 
1503 static int
1504 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1505 {
1506 	u32 *in;
1507 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1508 	int err;
1509 
1510 	in = mlx5_vzalloc(inlen);
1511 	if (!in)
1512 		return (-ENOMEM);
1513 
1514 	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1515 
1516 	kvfree(in);
1517 	return (err);
1518 }
1519 
1520 #define MLX5E_MAIN_VXLAN_GROUP0_SIZE	BIT(3)
1521 #define MLX5E_MAIN_VXLAN_GROUP1_SIZE	BIT(3)
1522 #define MLX5E_MAIN_VXLAN_GROUP2_SIZE	BIT(0)
1523 static int
1524 mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1525     int inlen)
1526 {
1527 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1528 	int err;
1529 	int ix = 0;
1530 
1531 	memset(in, 0, inlen);
1532 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1533 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1534 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1535 	MLX5_SET_CFG(in, start_flow_index, ix);
1536 	ix += MLX5E_MAIN_VXLAN_GROUP0_SIZE;
1537 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1538 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1539 	if (IS_ERR(ft->g[ft->num_groups]))
1540 		goto err_destory_groups;
1541 	ft->num_groups++;
1542 
1543 	memset(in, 0, inlen);
1544 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1545 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1546 	MLX5_SET_CFG(in, start_flow_index, ix);
1547 	ix += MLX5E_MAIN_VXLAN_GROUP1_SIZE;
1548 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1549 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1550 	if (IS_ERR(ft->g[ft->num_groups]))
1551 		goto err_destory_groups;
1552 	ft->num_groups++;
1553 
1554 	memset(in, 0, inlen);
1555 	MLX5_SET_CFG(in, start_flow_index, ix);
1556 	ix += MLX5E_MAIN_VXLAN_GROUP2_SIZE;
1557 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1558 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1559 	if (IS_ERR(ft->g[ft->num_groups]))
1560 		goto err_destory_groups;
1561 	ft->num_groups++;
1562 
1563 	return (0);
1564 
1565 err_destory_groups:
1566 	err = PTR_ERR(ft->g[ft->num_groups]);
1567 	ft->g[ft->num_groups] = NULL;
1568 	mlx5e_destroy_groups(ft);
1569 
1570 	return (err);
1571 }
1572 
1573 static int
1574 mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table *ft)
1575 {
1576 	u32 *in;
1577 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1578 	int err;
1579 
1580 	in = mlx5_vzalloc(inlen);
1581 	if (!in)
1582 		return (-ENOMEM);
1583 
1584 	err = mlx5e_create_main_vxlan_groups_sub(ft, in, inlen);
1585 
1586 	kvfree(in);
1587 	return (err);
1588 }
1589 
1590 
1591 static int
1592 mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
1593 {
1594 	struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
1595 	    &priv->fts.main;
1596 	int err;
1597 
1598 	ft->num_groups = 0;
1599 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0,
1600 	    inner_vxlan ? "vxlan_main" : "main", MLX5E_MAIN_TABLE_SIZE);
1601 
1602 	if (IS_ERR(ft->t)) {
1603 		err = PTR_ERR(ft->t);
1604 		ft->t = NULL;
1605 		return (err);
1606 	}
1607 	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1608 	if (!ft->g) {
1609 		err = -ENOMEM;
1610 		goto err_destroy_main_flow_table;
1611 	}
1612 
1613 	err = inner_vxlan ? mlx5e_create_main_vxlan_groups(ft) :
1614 	    mlx5e_create_main_groups(ft);
1615 	if (err)
1616 		goto err_free_g;
1617 	return (0);
1618 
1619 err_free_g:
1620 	kfree(ft->g);
1621 
1622 err_destroy_main_flow_table:
1623 	mlx5_destroy_flow_table(ft->t);
1624 	ft->t = NULL;
1625 
1626 	return (err);
1627 }
1628 
1629 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1630 {
1631 	mlx5e_destroy_flow_table(&priv->fts.main);
1632 }
1633 
1634 static void mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv *priv)
1635 {
1636 	mlx5e_destroy_flow_table(&priv->fts.main_vxlan);
1637 }
1638 
1639 #define MLX5E_NUM_VLAN_GROUPS	3
1640 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1641 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1642 #define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1643 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1644 				 MLX5E_VLAN_GROUP1_SIZE +\
1645 				 MLX5E_VLAN_GROUP2_SIZE +\
1646 				 0)
1647 
1648 static int
1649 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1650 				      int inlen)
1651 {
1652 	int err;
1653 	int ix = 0;
1654 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1655 
1656 	memset(in, 0, inlen);
1657 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1658 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1659 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1660 	MLX5_SET_CFG(in, start_flow_index, ix);
1661 	ix += MLX5E_VLAN_GROUP0_SIZE;
1662 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1663 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1664 	if (IS_ERR(ft->g[ft->num_groups]))
1665 		goto err_destory_groups;
1666 	ft->num_groups++;
1667 
1668 	memset(in, 0, inlen);
1669 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1670 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1671 	MLX5_SET_CFG(in, start_flow_index, ix);
1672 	ix += MLX5E_VLAN_GROUP1_SIZE;
1673 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1674 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1675 	if (IS_ERR(ft->g[ft->num_groups]))
1676 		goto err_destory_groups;
1677 	ft->num_groups++;
1678 
1679 	memset(in, 0, inlen);
1680 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1681 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1682 	MLX5_SET_CFG(in, start_flow_index, ix);
1683 	ix += MLX5E_VLAN_GROUP2_SIZE;
1684 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1685 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1686 	if (IS_ERR(ft->g[ft->num_groups]))
1687 		goto err_destory_groups;
1688 	ft->num_groups++;
1689 
1690 	return (0);
1691 
1692 err_destory_groups:
1693 	err = PTR_ERR(ft->g[ft->num_groups]);
1694 	ft->g[ft->num_groups] = NULL;
1695 	mlx5e_destroy_groups(ft);
1696 
1697 	return (err);
1698 }
1699 
1700 static int
1701 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1702 {
1703 	u32 *in;
1704 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1705 	int err;
1706 
1707 	in = mlx5_vzalloc(inlen);
1708 	if (!in)
1709 		return (-ENOMEM);
1710 
1711 	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1712 
1713 	kvfree(in);
1714 	return (err);
1715 }
1716 
1717 static int
1718 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1719 {
1720 	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1721 	int err;
1722 
1723 	ft->num_groups = 0;
1724 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1725 				       MLX5E_VLAN_TABLE_SIZE);
1726 
1727 	if (IS_ERR(ft->t)) {
1728 		err = PTR_ERR(ft->t);
1729 		ft->t = NULL;
1730 		return (err);
1731 	}
1732 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1733 	if (!ft->g) {
1734 		err = -ENOMEM;
1735 		goto err_destroy_vlan_flow_table;
1736 	}
1737 
1738 	err = mlx5e_create_vlan_groups(ft);
1739 	if (err)
1740 		goto err_free_g;
1741 
1742 	return (0);
1743 
1744 err_free_g:
1745 	kfree(ft->g);
1746 
1747 err_destroy_vlan_flow_table:
1748 	mlx5_destroy_flow_table(ft->t);
1749 	ft->t = NULL;
1750 
1751 	return (err);
1752 }
1753 
1754 static void
1755 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1756 {
1757 	mlx5e_destroy_flow_table(&priv->fts.vlan);
1758 }
1759 
1760 static int
1761 mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
1762     struct mlx5e_vxlan_db_el *el)
1763 {
1764 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1765 	struct mlx5_flow_destination dest = {};
1766 	u8 mc_enable;
1767 	struct mlx5_flow_rule **rule_p;
1768 	int err = 0;
1769 
1770 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1771 	dest.ft = priv->fts.main_vxlan.t;
1772 
1773 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
1774 	rule_p = &el->vxlan_ft_rule;
1775 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1776 	MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
1777 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1778 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
1779 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1780 	MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
1781 
1782 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1783 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1784 
1785 	if (IS_ERR(*rule_p)) {
1786 		err = PTR_ERR(*rule_p);
1787 		*rule_p = NULL;
1788 		mlx5_en_err(priv->ifp, "add rule failed\n");
1789 	}
1790 
1791 	return (err);
1792 }
1793 
1794 static struct mlx5e_vxlan_db_el *
1795 mlx5e_vxlan_find_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1796 {
1797 	struct mlx5e_vxlan_db_el *el;
1798 
1799 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1800 		if (el->proto == proto && el->port == port)
1801 			return (el);
1802 	}
1803 	return (NULL);
1804 }
1805 
1806 static struct mlx5e_vxlan_db_el *
1807 mlx5e_vxlan_alloc_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1808 {
1809 	struct mlx5e_vxlan_db_el *el;
1810 
1811 	el = mlx5_vzalloc(sizeof(*el));
1812 	el->refcount = 1;
1813 	el->proto = proto;
1814 	el->port = port;
1815 	el->vxlan_ft_rule = NULL;
1816 	return (el);
1817 }
1818 
1819 static int
1820 mlx5e_vxlan_family_to_proto(sa_family_t family, u_int *proto)
1821 {
1822 	switch (family) {
1823 	case AF_INET:
1824 		*proto = ETHERTYPE_IP;
1825 		return (0);
1826 	case AF_INET6:
1827 		*proto = ETHERTYPE_IPV6;
1828 		return (0);
1829 	default:
1830 		return (-EINVAL);
1831 	}
1832 }
1833 
1834 static int
1835 mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
1836     struct mlx5e_vxlan_db_el *el)
1837 {
1838 	u32 *match_criteria;
1839 	u32 *match_value;
1840 	int err;
1841 
1842 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1843 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1844 	if (match_value == NULL || match_criteria == NULL) {
1845 		mlx5_en_err(priv->ifp, "alloc failed\n");
1846 		err = -ENOMEM;
1847 		goto add_vxlan_rule_out;
1848 	}
1849 
1850 	err = mlx5e_add_vxlan_rule_sub(priv, match_criteria, match_value, el);
1851 
1852 add_vxlan_rule_out:
1853 	kvfree(match_criteria);
1854 	kvfree(match_value);
1855 
1856 	return (err);
1857 }
1858 
1859 static int
1860 mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1861 {
1862 	struct mlx5e_vxlan_db_el *el;
1863 	u_int proto;
1864 	int err;
1865 
1866 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1867 	if (err != 0)
1868 		return (err);
1869 
1870 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1871 	if (el != NULL) {
1872 		el->refcount++;
1873 		if (el->installed)
1874 			return (0);
1875 	}
1876 	el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
1877 
1878 	if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
1879 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1880 		if (err == 0)
1881 			el->installed = true;
1882 	}
1883 	if (err == 0)
1884 		TAILQ_INSERT_TAIL(&priv->vxlan.head, el, link);
1885 	else
1886 		kvfree(el);
1887 
1888 	return (err);
1889 }
1890 
1891 static int
1892 mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
1893 {
1894 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1895 	struct mlx5_flow_destination dest = {};
1896 	u8 mc_enable = 0;
1897 	struct mlx5_flow_rule **rule_p;
1898 	int err = 0;
1899 
1900 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1901 	dest.ft = priv->fts.main.t;
1902 
1903 	rule_p = &priv->fts.vxlan_catchall_ft_rule;
1904 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1905 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1906 
1907 	if (IS_ERR(*rule_p)) {
1908 		err = PTR_ERR(*rule_p);
1909 		*rule_p = NULL;
1910 		mlx5_en_err(priv->ifp, "add rule failed\n");
1911 	}
1912 
1913 	return (err);
1914 }
1915 
1916 
1917 static int
1918 mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
1919 {
1920 	u32 *match_criteria;
1921 	u32 *match_value;
1922 	int err;
1923 
1924 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1925 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1926 	if (match_value == NULL || match_criteria == NULL) {
1927 		mlx5_en_err(priv->ifp, "alloc failed\n");
1928 		err = -ENOMEM;
1929 		goto add_vxlan_rule_out;
1930 	}
1931 
1932 	err = mlx5e_add_vxlan_catchall_rule_sub(priv, match_criteria,
1933 	    match_value);
1934 
1935 add_vxlan_rule_out:
1936 	kvfree(match_criteria);
1937 	kvfree(match_value);
1938 
1939 	return (err);
1940 }
1941 
1942 int
1943 mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv)
1944 {
1945 	struct mlx5e_vxlan_db_el *el;
1946 	int err;
1947 
1948 	err = 0;
1949 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1950 		if (el->installed)
1951 			continue;
1952 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1953 		if (err != 0)
1954 			break;
1955 		el->installed = false;
1956 	}
1957 
1958 	return (err);
1959 }
1960 
1961 static int
1962 mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1963 {
1964 	struct mlx5e_vxlan_db_el *el;
1965 	u_int proto;
1966 	int err;
1967 
1968 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1969 	if (err != 0)
1970 		return (err);
1971 
1972 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1973 	if (el == NULL)
1974 		return (0);
1975 	if (el->refcount > 1) {
1976 		el->refcount--;
1977 		return (0);
1978 	}
1979 
1980 	mlx5_del_flow_rule(el->vxlan_ft_rule);
1981 	TAILQ_REMOVE(&priv->vxlan.head, el, link);
1982 	kvfree(el);
1983 	return (0);
1984 }
1985 
1986 void
1987 mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
1988 {
1989 	struct mlx5e_vxlan_db_el *el;
1990 
1991 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1992 		if (!el->installed)
1993 			continue;
1994 		mlx5_del_flow_rule(el->vxlan_ft_rule);
1995 		el->installed = false;
1996 	}
1997 }
1998 
1999 static void
2000 mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
2001 {
2002 	mlx5_del_flow_rule(priv->fts.vxlan_catchall_ft_rule);
2003 }
2004 
2005 void
2006 mlx5e_vxlan_start(void *arg, struct ifnet *ifp __unused, sa_family_t family,
2007     u_int port)
2008 {
2009 	struct mlx5e_priv *priv = arg;
2010 	int err;
2011 
2012 	PRIV_LOCK(priv);
2013 	err = mlx5_vxlan_udp_port_add(priv->mdev, port);
2014 	if (err == 0 && test_bit(MLX5E_STATE_OPENED, &priv->state))
2015 		mlx5e_add_vxlan_rule(priv, family, port);
2016 	PRIV_UNLOCK(priv);
2017 }
2018 
2019 void
2020 mlx5e_vxlan_stop(void *arg, struct ifnet *ifp __unused, sa_family_t family,
2021     u_int port)
2022 {
2023 	struct mlx5e_priv *priv = arg;
2024 
2025 	PRIV_LOCK(priv);
2026 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
2027 		mlx5e_del_vxlan_rule(priv, family, port);
2028 	(void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
2029 	PRIV_UNLOCK(priv);
2030 }
2031 
2032 #define	MLX5E_VXLAN_GROUP0_SIZE	BIT(3)	/* XXXKIB */
2033 #define	MLX5E_VXLAN_GROUP1_SIZE	BIT(0)
2034 #define	MLX5E_NUM_VXLAN_GROUPS	BIT(1)
2035 #define	MLX5E_VXLAN_TABLE_SIZE	\
2036     (MLX5E_VXLAN_GROUP0_SIZE + MLX5E_VXLAN_GROUP1_SIZE)
2037 
2038 static int
2039 mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2040 				      int inlen)
2041 {
2042 	int err;
2043 	int ix = 0;
2044 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2045 
2046 	memset(in, 0, inlen);
2047 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
2048 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
2049 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
2050 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
2051 	MLX5_SET_CFG(in, start_flow_index, ix);
2052 	ix += MLX5E_VXLAN_GROUP0_SIZE;
2053 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2054 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2055 	if (IS_ERR(ft->g[ft->num_groups]))
2056 		goto err_destory_groups;
2057 	ft->num_groups++;
2058 
2059 	memset(in, 0, inlen);
2060 	MLX5_SET_CFG(in, start_flow_index, ix);
2061 	ix += MLX5E_VXLAN_GROUP1_SIZE;
2062 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2063 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2064 	if (IS_ERR(ft->g[ft->num_groups]))
2065 		goto err_destory_groups;
2066 	ft->num_groups++;
2067 
2068 	return (0);
2069 
2070 err_destory_groups:
2071 	err = PTR_ERR(ft->g[ft->num_groups]);
2072 	ft->g[ft->num_groups] = NULL;
2073 	mlx5e_destroy_groups(ft);
2074 
2075 	return (err);
2076 }
2077 
2078 static int
2079 mlx5e_create_vxlan_groups(struct mlx5e_flow_table *ft)
2080 {
2081 	u32 *in;
2082 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2083 	int err;
2084 
2085 	in = mlx5_vzalloc(inlen);
2086 	if (!in)
2087 		return (-ENOMEM);
2088 
2089 	err = mlx5e_create_vxlan_groups_sub(ft, in, inlen);
2090 
2091 	kvfree(in);
2092 	return (err);
2093 }
2094 
2095 static int
2096 mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
2097 {
2098 	struct mlx5e_flow_table *ft = &priv->fts.vxlan;
2099 	int err;
2100 
2101 	ft->num_groups = 0;
2102 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vxlan",
2103 				       MLX5E_VXLAN_TABLE_SIZE);
2104 
2105 	if (IS_ERR(ft->t)) {
2106 		err = PTR_ERR(ft->t);
2107 		ft->t = NULL;
2108 		return (err);
2109 	}
2110 	ft->g = kcalloc(MLX5E_NUM_VXLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
2111 	if (!ft->g) {
2112 		err = -ENOMEM;
2113 		goto err_destroy_vxlan_flow_table;
2114 	}
2115 
2116 	err = mlx5e_create_vxlan_groups(ft);
2117 	if (err)
2118 		goto err_free_g;
2119 
2120 	TAILQ_INIT(&priv->vxlan.head);
2121 	return (0);
2122 
2123 err_free_g:
2124 	kfree(ft->g);
2125 
2126 err_destroy_vxlan_flow_table:
2127 	mlx5_destroy_flow_table(ft->t);
2128 	ft->t = NULL;
2129 
2130 	return (err);
2131 }
2132 
2133 #define MLX5E_NUM_INNER_RSS_GROUPS	3
2134 #define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
2135 #define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
2136 #define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
2137 #define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
2138 					 MLX5E_INNER_RSS_GROUP1_SIZE +\
2139 					 MLX5E_INNER_RSS_GROUP2_SIZE +\
2140 					 0)
2141 
2142 static int
2143 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2144 					   int inlen)
2145 {
2146 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2147 	int err;
2148 	int ix = 0;
2149 
2150 	memset(in, 0, inlen);
2151 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2152 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2153 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
2154 	MLX5_SET_CFG(in, start_flow_index, ix);
2155 	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
2156 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2157 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2158 	if (IS_ERR(ft->g[ft->num_groups]))
2159 		goto err_destory_groups;
2160 	ft->num_groups++;
2161 
2162 	memset(in, 0, inlen);
2163 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2164 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2165 	MLX5_SET_CFG(in, start_flow_index, ix);
2166 	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
2167 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2168 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2169 	if (IS_ERR(ft->g[ft->num_groups]))
2170 		goto err_destory_groups;
2171 	ft->num_groups++;
2172 
2173 	memset(in, 0, inlen);
2174 	MLX5_SET_CFG(in, start_flow_index, ix);
2175 	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
2176 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2177 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2178 	if (IS_ERR(ft->g[ft->num_groups]))
2179 		goto err_destory_groups;
2180 	ft->num_groups++;
2181 
2182 	return (0);
2183 
2184 err_destory_groups:
2185 	err = PTR_ERR(ft->g[ft->num_groups]);
2186 	ft->g[ft->num_groups] = NULL;
2187 	mlx5e_destroy_groups(ft);
2188 
2189 	return (err);
2190 }
2191 
2192 static int
2193 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
2194 {
2195 	u32 *in;
2196 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2197 	int err;
2198 
2199 	in = mlx5_vzalloc(inlen);
2200 	if (!in)
2201 		return (-ENOMEM);
2202 
2203 	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
2204 
2205 	kvfree(in);
2206 	return (err);
2207 }
2208 
2209 static int
2210 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
2211 {
2212 	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
2213 	int err;
2214 
2215 	ft->num_groups = 0;
2216 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
2217 				       MLX5E_INNER_RSS_TABLE_SIZE);
2218 
2219 	if (IS_ERR(ft->t)) {
2220 		err = PTR_ERR(ft->t);
2221 		ft->t = NULL;
2222 		return (err);
2223 	}
2224 	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
2225 			GFP_KERNEL);
2226 	if (!ft->g) {
2227 		err = -ENOMEM;
2228 		goto err_destroy_inner_rss_flow_table;
2229 	}
2230 
2231 	err = mlx5e_create_inner_rss_groups(ft);
2232 	if (err)
2233 		goto err_free_g;
2234 
2235 	return (0);
2236 
2237 err_free_g:
2238 	kfree(ft->g);
2239 
2240 err_destroy_inner_rss_flow_table:
2241 	mlx5_destroy_flow_table(ft->t);
2242 	ft->t = NULL;
2243 
2244 	return (err);
2245 }
2246 
2247 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
2248 {
2249 	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
2250 }
2251 
2252 static void
2253 mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
2254 {
2255 	mlx5e_destroy_flow_table(&priv->fts.vxlan);
2256 }
2257 
2258 int
2259 mlx5e_open_flow_table(struct mlx5e_priv *priv)
2260 {
2261 	int err;
2262 
2263 	priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
2264 					       MLX5_FLOW_NAMESPACE_KERNEL);
2265 
2266 	err = mlx5e_create_vlan_flow_table(priv);
2267 	if (err)
2268 		return (err);
2269 
2270 	if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
2271 		err = mlx5e_create_vxlan_flow_table(priv);
2272 		if (err)
2273 			goto err_destroy_vlan_flow_table;
2274 	}
2275 
2276 	err = mlx5e_create_main_flow_table(priv, false);
2277 	if (err)
2278 		goto err_destroy_vxlan_flow_table;
2279 
2280 	if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
2281 		err = mlx5e_create_main_flow_table(priv, true);
2282 		if (err)
2283 			goto err_destroy_main_flow_table;
2284 
2285 		err = mlx5e_create_inner_rss_flow_table(priv);
2286 		if (err)
2287 			goto err_destroy_main_vxlan_flow_table;
2288 
2289 		err = mlx5e_add_vxlan_catchall_rule(priv);
2290 		if (err != 0)
2291 			goto err_destroy_inner_rss_flow_table;
2292 
2293 		err = mlx5e_add_main_vxlan_rules(priv);
2294 		if (err != 0)
2295 			goto err_destroy_vxlan_catchall_rule;
2296 	}
2297 
2298 	return (0);
2299 
2300 err_destroy_vxlan_catchall_rule:
2301 	mlx5e_del_vxlan_catchall_rule(priv);
2302 err_destroy_inner_rss_flow_table:
2303 	mlx5e_destroy_inner_rss_flow_table(priv);
2304 err_destroy_main_vxlan_flow_table:
2305 	mlx5e_destroy_main_vxlan_flow_table(priv);
2306 err_destroy_main_flow_table:
2307 	mlx5e_destroy_main_flow_table(priv);
2308 err_destroy_vxlan_flow_table:
2309 	if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0)
2310 		mlx5e_destroy_vxlan_flow_table(priv);
2311 err_destroy_vlan_flow_table:
2312 	mlx5e_destroy_vlan_flow_table(priv);
2313 
2314 	return (err);
2315 }
2316 
2317 void
2318 mlx5e_close_flow_table(struct mlx5e_priv *priv)
2319 {
2320 
2321 	mlx5e_handle_ifp_addr(priv);
2322 	if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
2323 		mlx5e_destroy_inner_rss_flow_table(priv);
2324 		mlx5e_del_vxlan_catchall_rule(priv);
2325 		mlx5e_destroy_vxlan_flow_table(priv);
2326 		mlx5e_del_main_vxlan_rules(priv);
2327 	}
2328 	mlx5e_destroy_main_flow_table(priv);
2329 	if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0)
2330 		mlx5e_destroy_main_vxlan_flow_table(priv);
2331 	mlx5e_destroy_vlan_flow_table(priv);
2332 }
2333