xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 031beb4e239bfce798af17f5fe8dba8bcaf13d99)
1 /*-
2  * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28 
29 #include <dev/mlx5/mlx5_en/en.h>
30 
31 #include <linux/list.h>
32 #include <dev/mlx5/fs.h>
33 #include <dev/mlx5/mpfs.h>
34 #include <dev/mlx5/mlx5_core/fs_tcp.h>
35 
36 /*
37  * The flow tables with rules define the packet processing on receive.
38  * Currently the following structure is set up to handle different
39  * offloads like TLS RX offload, VLAN decapsulation, packet
40  * classification, RSS hashing, VxLAN checksum offloading:
41  *
42  *   +=========+       +=========+      +=================+
43  *   |TCP/IPv4 |       |TCP/IPv4 |      |TCP/IPv4 Match   |
44  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
45  *   |         |       |Catch-all|\     |                 |
46  *   +=========+       +=========+|     +=================+
47  *                                |
48  *       +------------------------+
49  *       V
50  *   +=========+       +=========+      +=================+
51  *   |TCP/IPv6 |       |TCP/IPv6 |      |TCP/IPv6 Match   |
52  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
53  *   |         |       |Catch-all|\     |                 |
54  *   +=========+       +=========+|     +=================+
55  *                                |
56  *       +------------------------+
57  *       V
58  *   +=========+       +=========+      +=================+
59  *   |VLAN ft: |       |VxLAN    |      |VxLAN Main       |
60  *   |CTAG/STAG|------>|      VNI|----->|Inner Proto Match|=====> Inner TIR n
61  *   |VID/noVID|/      |Catch-all|\     |                 |
62  *   +=========+       +=========+|     +=================+
63  *                                |
64  *                                |
65  *                                |
66  *                                v
67  *                      +=================+
68  *                      |Main             |
69  *                      |Outer Proto Match|=====> TIR n
70  *                      |                 |
71  *                      +=================+
72  *
73  * The path through flow rules directs each packet into an appropriate TIR,
74  * according to the:
75  * - VLAN encapsulation
76  * - Outer protocol
77  * - Presence of inner protocol
78  */
79 
80 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
81 
82 enum {
83 	MLX5E_FULLMATCH = 0,
84 	MLX5E_ALLMULTI = 1,
85 	MLX5E_PROMISC = 2,
86 };
87 
88 enum {
89 	MLX5E_UC = 0,
90 	MLX5E_MC_IPV4 = 1,
91 	MLX5E_MC_IPV6 = 2,
92 	MLX5E_MC_OTHER = 3,
93 };
94 
95 enum {
96 	MLX5E_ACTION_NONE = 0,
97 	MLX5E_ACTION_ADD = 1,
98 	MLX5E_ACTION_DEL = 2,
99 };
100 
101 struct mlx5e_eth_addr_hash_node {
102 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
103 	u8	action;
104 	u32	mpfs_index;
105 	struct mlx5e_eth_addr_info ai;
106 };
107 
108 static void mlx5e_del_all_vlan_rules(struct mlx5e_priv *);
109 
110 static inline int
111 mlx5e_hash_eth_addr(const u8 * addr)
112 {
113 	return (addr[5]);
114 }
115 
116 static bool
117 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
118     struct mlx5e_eth_addr_hash_node *hn_new)
119 {
120 	struct mlx5e_eth_addr_hash_node *hn;
121 	u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
122 
123 	LIST_FOREACH(hn, &hash[ix], hlist) {
124 		if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
125 			if (hn->action == MLX5E_ACTION_DEL)
126 				hn->action = MLX5E_ACTION_NONE;
127 			free(hn_new, M_MLX5EN);
128 			return (false);
129 		}
130 	}
131 	LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
132 	return (true);
133 }
134 
135 static void
136 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
137 {
138 	LIST_REMOVE(hn, hlist);
139 	free(hn, M_MLX5EN);
140 }
141 
142 static void
143 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
144     struct mlx5e_eth_addr_info *ai)
145 {
146 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
147 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
148 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
149 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
150 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
151 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
152 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
153 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
154 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6]);
155 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4]);
156 	mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_ANY]);
157 }
158 
159 static int
160 mlx5e_get_eth_addr_type(const u8 * addr)
161 {
162 	if (ETHER_IS_MULTICAST(addr) == 0)
163 		return (MLX5E_UC);
164 
165 	if ((addr[0] == 0x01) &&
166 	    (addr[1] == 0x00) &&
167 	    (addr[2] == 0x5e) &&
168 	    !(addr[3] & 0x80))
169 		return (MLX5E_MC_IPV4);
170 
171 	if ((addr[0] == 0x33) &&
172 	    (addr[1] == 0x33))
173 		return (MLX5E_MC_IPV6);
174 
175 	return (MLX5E_MC_OTHER);
176 }
177 
178 static	u32
179 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
180 {
181 	int eth_addr_type;
182 	u32 ret;
183 
184 	switch (type) {
185 	case MLX5E_FULLMATCH:
186 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
187 		switch (eth_addr_type) {
188 		case MLX5E_UC:
189 			ret =
190 			    (1 << MLX5E_TT_IPV4_TCP) |
191 			    (1 << MLX5E_TT_IPV6_TCP) |
192 			    (1 << MLX5E_TT_IPV4_UDP) |
193 			    (1 << MLX5E_TT_IPV6_UDP) |
194 			    (1 << MLX5E_TT_IPV4) |
195 			    (1 << MLX5E_TT_IPV6) |
196 			    (1 << MLX5E_TT_ANY) |
197 			    0;
198 			break;
199 
200 		case MLX5E_MC_IPV4:
201 			ret =
202 			    (1 << MLX5E_TT_IPV4_UDP) |
203 			    (1 << MLX5E_TT_IPV4) |
204 			    0;
205 			break;
206 
207 		case MLX5E_MC_IPV6:
208 			ret =
209 			    (1 << MLX5E_TT_IPV6_UDP) |
210 			    (1 << MLX5E_TT_IPV6) |
211 			    0;
212 			break;
213 
214 		default:
215 			ret =
216 			    (1 << MLX5E_TT_ANY) |
217 			    0;
218 			break;
219 		}
220 		break;
221 
222 	case MLX5E_ALLMULTI:
223 		ret =
224 		    (1 << MLX5E_TT_IPV4_UDP) |
225 		    (1 << MLX5E_TT_IPV6_UDP) |
226 		    (1 << MLX5E_TT_IPV4) |
227 		    (1 << MLX5E_TT_IPV6) |
228 		    (1 << MLX5E_TT_ANY) |
229 		    0;
230 		break;
231 
232 	default:			/* MLX5E_PROMISC */
233 		ret =
234 		    (1 << MLX5E_TT_IPV4_TCP) |
235 		    (1 << MLX5E_TT_IPV6_TCP) |
236 		    (1 << MLX5E_TT_IPV4_UDP) |
237 		    (1 << MLX5E_TT_IPV6_UDP) |
238 		    (1 << MLX5E_TT_IPV4) |
239 		    (1 << MLX5E_TT_IPV6) |
240 		    (1 << MLX5E_TT_ANY) |
241 		    0;
242 		break;
243 	}
244 
245 	return (ret);
246 }
247 
248 static int
249 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
250     struct mlx5e_eth_addr_info *ai, int type,
251     u32 *mc, u32 *mv)
252 {
253 	struct mlx5_flow_destination dest = {};
254 	u8 mc_enable = 0;
255 	struct mlx5_flow_rule **rule_p;
256 	struct mlx5_flow_table *ft = priv->fts.main.t;
257 	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
258 				   outer_headers.dmac_47_16);
259 	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
260 				   outer_headers.dmac_47_16);
261 	u32 *tirn = priv->tirn;
262 	u32 tt_vec;
263 	int err = 0;
264 
265 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
266 
267 	switch (type) {
268 	case MLX5E_FULLMATCH:
269 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
270 		memset(mc_dmac, 0xff, ETH_ALEN);
271 		ether_addr_copy(mv_dmac, ai->addr);
272 		break;
273 
274 	case MLX5E_ALLMULTI:
275 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
276 		mc_dmac[0] = 0x01;
277 		mv_dmac[0] = 0x01;
278 		break;
279 
280 	case MLX5E_PROMISC:
281 		break;
282 	default:
283 		break;
284 	}
285 
286 	tt_vec = mlx5e_get_tt_vec(ai, type);
287 
288 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
289 		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
290 		dest.tir_num = tirn[MLX5E_TT_ANY];
291 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
292 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
293 					     MLX5_FS_ETH_FLOW_TAG, &dest);
294 		if (IS_ERR_OR_NULL(*rule_p))
295 			goto err_del_ai;
296 	}
297 
298 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
299 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
300 
301 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
302 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
303 		dest.tir_num = tirn[MLX5E_TT_IPV4];
304 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
305 			 ETHERTYPE_IP);
306 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
307 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
308 					     MLX5_FS_ETH_FLOW_TAG, &dest);
309 		if (IS_ERR_OR_NULL(*rule_p))
310 			goto err_del_ai;
311 	}
312 
313 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
314 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
315 		dest.tir_num = tirn[MLX5E_TT_IPV6];
316 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
317 			 ETHERTYPE_IPV6);
318 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
319 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
320 					     MLX5_FS_ETH_FLOW_TAG, &dest);
321 		if (IS_ERR_OR_NULL(*rule_p))
322 			goto err_del_ai;
323 	}
324 
325 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
326 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
327 
328 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
329 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
330 		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
331 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
332 			 ETHERTYPE_IP);
333 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
334 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
335 					     MLX5_FS_ETH_FLOW_TAG, &dest);
336 		if (IS_ERR_OR_NULL(*rule_p))
337 			goto err_del_ai;
338 	}
339 
340 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
341 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
342 		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
343 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
344 			 ETHERTYPE_IPV6);
345 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
346 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
347 					     MLX5_FS_ETH_FLOW_TAG, &dest);
348 		if (IS_ERR_OR_NULL(*rule_p))
349 			goto err_del_ai;
350 	}
351 
352 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
353 
354 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
355 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
356 		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
357 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
358 			 ETHERTYPE_IP);
359 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
360 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
361 					     MLX5_FS_ETH_FLOW_TAG, &dest);
362 		if (IS_ERR_OR_NULL(*rule_p))
363 			goto err_del_ai;
364 	}
365 
366 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
367 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
368 		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
369 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
370 			 ETHERTYPE_IPV6);
371 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
372 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
373 					     MLX5_FS_ETH_FLOW_TAG, &dest);
374 		if (IS_ERR_OR_NULL(*rule_p))
375 			goto err_del_ai;
376 	}
377 
378 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
379 
380 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
381 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
382 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
383 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
384 			 ETHERTYPE_IP);
385 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
386 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
387 					     MLX5_FS_ETH_FLOW_TAG, &dest);
388 		if (IS_ERR_OR_NULL(*rule_p))
389 			goto err_del_ai;
390 	}
391 
392 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
393 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
394 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
395 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
396 			 ETHERTYPE_IPV6);
397 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
398 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
399 					     MLX5_FS_ETH_FLOW_TAG, &dest);
400 		if (IS_ERR_OR_NULL(*rule_p))
401 			goto err_del_ai;
402 	}
403 
404 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
405 
406 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
407 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
408 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
409 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
410 			 ETHERTYPE_IP);
411 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
412 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
413 					     MLX5_FS_ETH_FLOW_TAG, &dest);
414 		if (IS_ERR_OR_NULL(*rule_p))
415 			goto err_del_ai;
416 	}
417 
418 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
419 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
420 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
421 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
422 			 ETHERTYPE_IPV6);
423 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
424 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
425 					     MLX5_FS_ETH_FLOW_TAG, &dest);
426 		if (IS_ERR_OR_NULL(*rule_p))
427 			goto err_del_ai;
428 	}
429 
430 	return 0;
431 
432 err_del_ai:
433 	err = PTR_ERR(*rule_p);
434 	*rule_p = NULL;
435 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
436 
437 	return err;
438 }
439 
440 static int
441 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
442     struct mlx5e_eth_addr_info *ai, int type)
443 {
444 	u32 *match_criteria;
445 	u32 *match_value;
446 	int err = 0;
447 
448 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
449 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
450 	if (!match_value || !match_criteria) {
451 		mlx5_en_err(priv->ifp, "alloc failed\n");
452 		err = -ENOMEM;
453 		goto add_eth_addr_rule_out;
454 	}
455 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
456 	    match_value);
457 
458 add_eth_addr_rule_out:
459 	kvfree(match_criteria);
460 	kvfree(match_value);
461 
462 	return (err);
463 }
464 
465 static void
466 mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
467 {
468 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
469 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
470 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
471 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
472 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
473 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
474 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
475 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
476 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
477 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
478 	mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
479 }
480 
481 static int
482 mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
483 {
484 	struct mlx5_flow_destination dest = {};
485 	u8 mc_enable = 0;
486 	struct mlx5_flow_rule **rule_p;
487 	struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
488 	u32 *tirn = priv->tirn_inner_vxlan;
489 	int err = 0;
490 
491 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
492 
493 	mc_enable = MLX5_MATCH_INNER_HEADERS;
494 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
495 
496 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
497 	dest.tir_num = tirn[MLX5E_TT_IPV4];
498 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
499 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
500 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
501 	if (IS_ERR_OR_NULL(*rule_p))
502 		goto err_del_ai;
503 
504 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
505 	dest.tir_num = tirn[MLX5E_TT_IPV6];
506 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
507 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
508 	     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
509 	if (IS_ERR_OR_NULL(*rule_p))
510 		goto err_del_ai;
511 
512 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
513 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_UDP);
514 
515 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
516 	dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
517 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
518 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
519 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
520 	if (IS_ERR_OR_NULL(*rule_p))
521 		goto err_del_ai;
522 
523 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
524 	dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
525 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
526 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
527 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
528 	if (IS_ERR_OR_NULL(*rule_p))
529 		goto err_del_ai;
530 
531 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_TCP);
532 
533 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
534 	dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
535 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
536 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
537 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
538 	if (IS_ERR_OR_NULL(*rule_p))
539 		goto err_del_ai;
540 
541 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
542 	dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
543 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
544 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
545 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
546 	if (IS_ERR_OR_NULL(*rule_p))
547 		goto err_del_ai;
548 
549 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_AH);
550 
551 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
552 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
553 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
554 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
555 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
556 	if (IS_ERR_OR_NULL(*rule_p))
557 		goto err_del_ai;
558 
559 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
560 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
561 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
562 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
563 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
564 	if (IS_ERR_OR_NULL(*rule_p))
565 		goto err_del_ai;
566 
567 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_ESP);
568 
569 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
570 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
571 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
572 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
573 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
574 	if (IS_ERR_OR_NULL(*rule_p))
575 			goto err_del_ai;
576 
577 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP];
578 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
579 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
580 		 ETHERTYPE_IPV6);
581 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
582 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
583 	if (IS_ERR_OR_NULL(*rule_p))
584 		goto err_del_ai;
585 
586 	mc_enable = 0;
587 	memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
588 	memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
589 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
590 	dest.tir_num = tirn[MLX5E_TT_ANY];
591 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
592 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
593 	if (IS_ERR_OR_NULL(*rule_p))
594 		goto err_del_ai;
595 
596 	return (0);
597 
598 err_del_ai:
599 	err = PTR_ERR(*rule_p);
600 	*rule_p = NULL;
601 	mlx5e_del_main_vxlan_rules(priv);
602 
603 	return (err);
604 }
605 
606 static int
607 mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
608 {
609 	u32 *match_criteria;
610 	u32 *match_value;
611 	int err = 0;
612 
613 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
614 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
615 	if (match_value == NULL || match_criteria == NULL) {
616 		mlx5_en_err(priv->ifp, "alloc failed\n");
617 		err = -ENOMEM;
618 		goto add_main_vxlan_rules_out;
619 	}
620 	err = mlx5e_add_main_vxlan_rules_sub(priv, match_criteria, match_value);
621 
622 add_main_vxlan_rules_out:
623 	kvfree(match_criteria);
624 	kvfree(match_value);
625 
626 	return (err);
627 }
628 
629 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
630 {
631 	if_t ifp = priv->ifp;
632 	int max_list_size;
633 	int list_size;
634 	u16 *vlans;
635 	int vlan;
636 	int err;
637 	int i;
638 
639 	list_size = 0;
640 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
641 		list_size++;
642 
643 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
644 
645 	if (list_size > max_list_size) {
646 		mlx5_en_err(ifp,
647 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
648 			    list_size, max_list_size);
649 		list_size = max_list_size;
650 	}
651 
652 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
653 	if (!vlans)
654 		return -ENOMEM;
655 
656 	i = 0;
657 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
658 		if (i >= list_size)
659 			break;
660 		vlans[i++] = vlan;
661 	}
662 
663 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
664 	if (err)
665 		mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
666 			   err);
667 
668 	kfree(vlans);
669 	return err;
670 }
671 
672 enum mlx5e_vlan_rule_type {
673 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
674 	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
675 	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
676 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
677 };
678 
679 static int
680 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
681     enum mlx5e_vlan_rule_type rule_type, u16 vid,
682     u32 *mc, u32 *mv)
683 {
684 	struct mlx5_flow_table *ft = priv->fts.vlan.t;
685 	struct mlx5_flow_destination dest = {};
686 	u8 mc_enable = 0;
687 	struct mlx5_flow_rule **rule_p;
688 	int err = 0;
689 
690 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
691 	dest.ft = priv->fts.vxlan.t;
692 
693 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
694 
695 	switch (rule_type) {
696 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
697 		rule_p = &priv->vlan.untagged_ft_rule;
698 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
699 		break;
700 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
701 		rule_p = &priv->vlan.any_cvlan_ft_rule;
702 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
703 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
704 		break;
705 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
706 		rule_p = &priv->vlan.any_svlan_ft_rule;
707 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
708 		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
709 		break;
710 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
711 		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
712 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
713 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
714 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
715 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
716 		mlx5e_vport_context_update_vlans(priv);
717 		break;
718 	}
719 
720 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
721 				     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
722 				     MLX5_FS_ETH_FLOW_TAG,
723 				     &dest);
724 
725 	if (IS_ERR(*rule_p)) {
726 		err = PTR_ERR(*rule_p);
727 		*rule_p = NULL;
728 		mlx5_en_err(priv->ifp, "add rule failed\n");
729 	}
730 
731 	return (err);
732 }
733 
734 static int
735 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
736     enum mlx5e_vlan_rule_type rule_type, u16 vid)
737 {
738 	u32 *match_criteria;
739 	u32 *match_value;
740 	int err = 0;
741 
742 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
743 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
744 	if (!match_value || !match_criteria) {
745 		mlx5_en_err(priv->ifp, "alloc failed\n");
746 		err = -ENOMEM;
747 		goto add_vlan_rule_out;
748 	}
749 
750 	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
751 				    match_value);
752 
753 add_vlan_rule_out:
754 	kvfree(match_criteria);
755 	kvfree(match_value);
756 
757 	return (err);
758 }
759 
760 static void
761 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
762     enum mlx5e_vlan_rule_type rule_type, u16 vid)
763 {
764 	switch (rule_type) {
765 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
766 		mlx5_del_flow_rule(&priv->vlan.untagged_ft_rule);
767 		break;
768 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
769 		mlx5_del_flow_rule(&priv->vlan.any_cvlan_ft_rule);
770 		break;
771 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
772 		mlx5_del_flow_rule(&priv->vlan.any_svlan_ft_rule);
773 		break;
774 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
775 		mlx5_del_flow_rule(&priv->vlan.active_vlans_ft_rule[vid]);
776 		mlx5e_vport_context_update_vlans(priv);
777 		break;
778 	default:
779 		break;
780 	}
781 }
782 
783 static void
784 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
785 {
786 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
787 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
788 }
789 
790 static int
791 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
792 {
793 	int err;
794 
795 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
796 	if (err)
797 		return (err);
798 
799 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
800 	if (err)
801 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
802 
803 	return (err);
804 }
805 
806 void
807 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
808 {
809 	if (priv->vlan.filter_disabled) {
810 		priv->vlan.filter_disabled = false;
811 		if (if_getflags(priv->ifp) & IFF_PROMISC)
812 			return;
813 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
814 			mlx5e_del_any_vid_rules(priv);
815 	}
816 }
817 
818 void
819 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
820 {
821 	if (!priv->vlan.filter_disabled) {
822 		priv->vlan.filter_disabled = true;
823 		if (if_getflags(priv->ifp) & IFF_PROMISC)
824 			return;
825 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
826 			mlx5e_add_any_vid_rules(priv);
827 	}
828 }
829 
830 void
831 mlx5e_vlan_rx_add_vid(void *arg, if_t ifp, u16 vid)
832 {
833 	struct mlx5e_priv *priv = arg;
834 
835 	if (ifp != priv->ifp)
836 		return;
837 
838 	PRIV_LOCK(priv);
839 	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
840 	    test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
841 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
842 	PRIV_UNLOCK(priv);
843 }
844 
845 void
846 mlx5e_vlan_rx_kill_vid(void *arg, if_t ifp, u16 vid)
847 {
848 	struct mlx5e_priv *priv = arg;
849 
850 	if (ifp != priv->ifp)
851 		return;
852 
853 	PRIV_LOCK(priv);
854 	clear_bit(vid, priv->vlan.active_vlans);
855 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
856 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
857 	PRIV_UNLOCK(priv);
858 }
859 
860 static int
861 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
862 {
863 	int err;
864 	int i;
865 
866 	set_bit(0, priv->vlan.active_vlans);
867 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
868 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
869 					  i);
870 		if (err)
871 			goto error;
872 	}
873 
874 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
875 	if (err)
876 		goto error;
877 
878 	if (priv->vlan.filter_disabled) {
879 		err = mlx5e_add_any_vid_rules(priv);
880 		if (err)
881 			goto error;
882 	}
883 	return (0);
884 error:
885 	mlx5e_del_all_vlan_rules(priv);
886 	return (err);
887 }
888 
889 static void
890 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
891 {
892 	int i;
893 
894 	if (priv->vlan.filter_disabled)
895 		mlx5e_del_any_vid_rules(priv);
896 
897 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
898 
899 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
900 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
901 	clear_bit(0, priv->vlan.active_vlans);
902 }
903 
904 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
905 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
906 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
907 
908 static void
909 mlx5e_execute_action(struct mlx5e_priv *priv,
910     struct mlx5e_eth_addr_hash_node *hn)
911 {
912 	switch (hn->action) {
913 	case MLX5E_ACTION_ADD:
914 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
915 		hn->action = MLX5E_ACTION_NONE;
916 		break;
917 
918 	case MLX5E_ACTION_DEL:
919 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
920 		if (hn->mpfs_index != -1U)
921 			mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
922 		mlx5e_del_eth_addr_from_hash(hn);
923 		break;
924 
925 	default:
926 		break;
927 	}
928 }
929 
930 static struct mlx5e_eth_addr_hash_node *
931 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
932 {
933 	struct mlx5e_eth_addr_hash_node *hn;
934 
935 	hn = LIST_FIRST(fh);
936 	if (hn != NULL) {
937 		LIST_REMOVE(hn, hlist);
938 		LIST_INSERT_HEAD(uh, hn, hlist);
939 	}
940 	return (hn);
941 }
942 
943 static struct mlx5e_eth_addr_hash_node *
944 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
945 {
946 	struct mlx5e_eth_addr_hash_node *hn;
947 
948 	hn = LIST_FIRST(fh);
949 	if (hn != NULL)
950 		LIST_REMOVE(hn, hlist);
951 	return (hn);
952 }
953 
954 struct mlx5e_copy_addr_ctx {
955 	struct mlx5e_eth_addr_hash_head *free;
956 	struct mlx5e_eth_addr_hash_head *fill;
957 	bool success;
958 };
959 
960 static u_int
961 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
962 {
963 	struct mlx5e_copy_addr_ctx *ctx = arg;
964 	struct mlx5e_eth_addr_hash_node *hn;
965 
966 	hn = mlx5e_move_hn(ctx->free, ctx->fill);
967 	if (hn == NULL) {
968 		ctx->success = false;
969 		return (0);
970 	}
971 	ether_addr_copy(hn->ai.addr, LLADDR(sdl));
972 
973 	return (1);
974 }
975 
976 static void
977 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
978 {
979 	struct mlx5e_copy_addr_ctx ctx;
980 	struct mlx5e_eth_addr_hash_head head_free;
981 	struct mlx5e_eth_addr_hash_head head_uc;
982 	struct mlx5e_eth_addr_hash_head head_mc;
983 	struct mlx5e_eth_addr_hash_node *hn;
984 	if_t ifp = priv->ifp;
985 	size_t x;
986 	size_t num;
987 
988 	PRIV_ASSERT_LOCKED(priv);
989 
990 retry:
991 	LIST_INIT(&head_free);
992 	LIST_INIT(&head_uc);
993 	LIST_INIT(&head_mc);
994 	num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp);
995 
996 	/* allocate place holders */
997 	for (x = 0; x != num; x++) {
998 		hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
999 		hn->action = MLX5E_ACTION_ADD;
1000 		hn->mpfs_index = -1U;
1001 		LIST_INSERT_HEAD(&head_free, hn, hlist);
1002 	}
1003 
1004 	hn = mlx5e_move_hn(&head_free, &head_uc);
1005 	MPASS(hn != NULL);
1006 
1007 	ether_addr_copy(hn->ai.addr, if_getlladdr(ifp));
1008 
1009 	ctx.free = &head_free;
1010 	ctx.fill = &head_uc;
1011 	ctx.success = true;
1012 	if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx);
1013 	if (ctx.success == false)
1014 		goto cleanup;
1015 
1016 	ctx.fill = &head_mc;
1017 	if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx);
1018 	if (ctx.success == false)
1019 		goto cleanup;
1020 
1021 	/* insert L2 unicast addresses into hash list */
1022 
1023 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
1024 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
1025 			continue;
1026 		if (hn->mpfs_index == -1U)
1027 			mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index,
1028 			    hn->ai.addr, 0, 0);
1029 	}
1030 
1031 	/* insert L2 multicast addresses into hash list */
1032 
1033 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
1034 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
1035 			continue;
1036 	}
1037 
1038 cleanup:
1039 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
1040 		free(hn, M_MLX5EN);
1041 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
1042 		free(hn, M_MLX5EN);
1043 	while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
1044 		free(hn, M_MLX5EN);
1045 
1046 	if (ctx.success == false)
1047 		goto retry;
1048 }
1049 
1050 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
1051 				  u8 addr_array[][ETH_ALEN], int size)
1052 {
1053 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1054 	if_t ifp = priv->ifp;
1055 	struct mlx5e_eth_addr_hash_node *hn;
1056 	struct mlx5e_eth_addr_hash_head *addr_list;
1057 	struct mlx5e_eth_addr_hash_node *tmp;
1058 	int i = 0;
1059 	int hi;
1060 
1061 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1062 
1063 	if (is_uc) /* Make sure our own address is pushed first */
1064 		ether_addr_copy(addr_array[i++], if_getlladdr(ifp));
1065 	else if (priv->eth_addr.broadcast_enabled)
1066 		ether_addr_copy(addr_array[i++], if_getbroadcastaddr(ifp));
1067 
1068 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
1069 		if (ether_addr_equal(if_getlladdr(ifp), hn->ai.addr))
1070 			continue;
1071 		if (i >= size)
1072 			break;
1073 		ether_addr_copy(addr_array[i++], hn->ai.addr);
1074 	}
1075 }
1076 
1077 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
1078 						 int list_type)
1079 {
1080 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1081 	struct mlx5e_eth_addr_hash_node *hn;
1082 	u8 (*addr_array)[ETH_ALEN] = NULL;
1083 	struct mlx5e_eth_addr_hash_head *addr_list;
1084 	struct mlx5e_eth_addr_hash_node *tmp;
1085 	int max_size;
1086 	int size;
1087 	int err;
1088 	int hi;
1089 
1090 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
1091 	max_size = is_uc ?
1092 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1093 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
1094 
1095 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1096 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
1097 		size++;
1098 
1099 	if (size > max_size) {
1100 		mlx5_en_err(priv->ifp,
1101 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
1102 			    is_uc ? "UC" : "MC", size, max_size);
1103 		size = max_size;
1104 	}
1105 
1106 	if (size) {
1107 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
1108 		if (!addr_array) {
1109 			err = -ENOMEM;
1110 			goto out;
1111 		}
1112 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
1113 	}
1114 
1115 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
1116 out:
1117 	if (err)
1118 		mlx5_en_err(priv->ifp,
1119 			   "Failed to modify vport %s list err(%d)\n",
1120 			   is_uc ? "UC" : "MC", err);
1121 	kfree(addr_array);
1122 }
1123 
1124 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
1125 {
1126 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1127 
1128 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
1129 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
1130 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
1131 				      ea->allmulti_enabled,
1132 				      ea->promisc_enabled);
1133 }
1134 
1135 static void
1136 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
1137 {
1138 	struct mlx5e_eth_addr_hash_node *hn;
1139 	struct mlx5e_eth_addr_hash_node *tmp;
1140 	int i;
1141 
1142 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1143 	    mlx5e_execute_action(priv, hn);
1144 
1145 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1146 	    mlx5e_execute_action(priv, hn);
1147 }
1148 
1149 static void
1150 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv, bool rx_mode_enable)
1151 {
1152 	struct mlx5e_eth_addr_hash_node *hn;
1153 	struct mlx5e_eth_addr_hash_node *tmp;
1154 	int i;
1155 
1156 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1157 	    hn->action = MLX5E_ACTION_DEL;
1158 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1159 	    hn->action = MLX5E_ACTION_DEL;
1160 
1161 	if (rx_mode_enable)
1162 		mlx5e_sync_ifp_addr(priv);
1163 
1164 	mlx5e_apply_ifp_addr(priv);
1165 }
1166 
1167 static void
1168 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
1169 {
1170 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1171 	if_t ndev = priv->ifp;
1172 	int ndev_flags = if_getflags(ndev);
1173 
1174 	bool promisc_enabled = rx_mode_enable && (ndev_flags & IFF_PROMISC);
1175 	bool allmulti_enabled = rx_mode_enable && (ndev_flags & IFF_ALLMULTI);
1176 	bool broadcast_enabled = rx_mode_enable;
1177 
1178 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1179 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1180 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1181 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1182 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1183 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1184 
1185 	/* update broadcast address */
1186 	ether_addr_copy(priv->eth_addr.broadcast.addr,
1187 	    if_getbroadcastaddr(priv->ifp));
1188 
1189 	if (enable_promisc) {
1190 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1191 		if (!priv->vlan.filter_disabled)
1192 			mlx5e_add_any_vid_rules(priv);
1193 	}
1194 	if (enable_allmulti)
1195 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1196 	if (enable_broadcast)
1197 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1198 
1199 	mlx5e_handle_ifp_addr(priv, rx_mode_enable);
1200 
1201 	if (disable_broadcast)
1202 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1203 	if (disable_allmulti)
1204 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1205 	if (disable_promisc) {
1206 		if (!priv->vlan.filter_disabled)
1207 			mlx5e_del_any_vid_rules(priv);
1208 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1209 	}
1210 
1211 	ea->promisc_enabled = promisc_enabled;
1212 	ea->allmulti_enabled = allmulti_enabled;
1213 	ea->broadcast_enabled = broadcast_enabled;
1214 
1215 	mlx5e_vport_context_update(priv);
1216 }
1217 
1218 void
1219 mlx5e_set_rx_mode_work(struct work_struct *work)
1220 {
1221 	struct mlx5e_priv *priv =
1222 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
1223 
1224 	PRIV_LOCK(priv);
1225 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1226 		mlx5e_set_rx_mode_core(priv, true);
1227 	PRIV_UNLOCK(priv);
1228 }
1229 
1230 static void
1231 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1232 {
1233 	int i;
1234 
1235 	for (i = ft->num_groups - 1; i >= 0; i--) {
1236 		if (!IS_ERR_OR_NULL(ft->g[i]))
1237 			mlx5_destroy_flow_group(ft->g[i]);
1238 		ft->g[i] = NULL;
1239 	}
1240 	ft->num_groups = 0;
1241 }
1242 
1243 static void
1244 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1245 {
1246 	mlx5e_destroy_groups(ft);
1247 	kfree(ft->g);
1248 	mlx5_destroy_flow_table(ft->t);
1249 	ft->t = NULL;
1250 }
1251 
1252 #define MLX5E_NUM_MAIN_GROUPS	10
1253 #define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
1254 #define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
1255 #define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
1256 #define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
1257 #define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
1258 #define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
1259 #define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
1260 #define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
1261 #define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1262 #define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1263 #define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1264 				 MLX5E_MAIN_GROUP1_SIZE +\
1265 				 MLX5E_MAIN_GROUP2_SIZE +\
1266 				 MLX5E_MAIN_GROUP3_SIZE +\
1267 				 MLX5E_MAIN_GROUP4_SIZE +\
1268 				 MLX5E_MAIN_GROUP5_SIZE +\
1269 				 MLX5E_MAIN_GROUP6_SIZE +\
1270 				 MLX5E_MAIN_GROUP7_SIZE +\
1271 				 MLX5E_MAIN_GROUP8_SIZE +\
1272 				 MLX5E_MAIN_GROUP9_SIZE +\
1273 				 0)
1274 
1275 static int
1276 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1277 				      int inlen)
1278 {
1279 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1280 	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1281 				match_criteria.outer_headers.dmac_47_16);
1282 	int err;
1283 	int ix = 0;
1284 
1285 	/* Tunnel rules need to be first in this list of groups */
1286 
1287 	/* Start tunnel rules */
1288 	memset(in, 0, inlen);
1289 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1290 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1291 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1292 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1293 	MLX5_SET_CFG(in, start_flow_index, ix);
1294 	ix += MLX5E_MAIN_GROUP0_SIZE;
1295 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1296 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1297 	if (IS_ERR(ft->g[ft->num_groups]))
1298 		goto err_destory_groups;
1299 	ft->num_groups++;
1300 	/* End Tunnel Rules */
1301 
1302 	memset(in, 0, inlen);
1303 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1304 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1305 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1306 	MLX5_SET_CFG(in, start_flow_index, ix);
1307 	ix += MLX5E_MAIN_GROUP1_SIZE;
1308 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1309 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1310 	if (IS_ERR(ft->g[ft->num_groups]))
1311 		goto err_destory_groups;
1312 	ft->num_groups++;
1313 
1314 	memset(in, 0, inlen);
1315 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1316 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1317 	MLX5_SET_CFG(in, start_flow_index, ix);
1318 	ix += MLX5E_MAIN_GROUP2_SIZE;
1319 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1320 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1321 	if (IS_ERR(ft->g[ft->num_groups]))
1322 		goto err_destory_groups;
1323 	ft->num_groups++;
1324 
1325 	memset(in, 0, inlen);
1326 	MLX5_SET_CFG(in, start_flow_index, ix);
1327 	ix += MLX5E_MAIN_GROUP3_SIZE;
1328 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1329 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1330 	if (IS_ERR(ft->g[ft->num_groups]))
1331 		goto err_destory_groups;
1332 	ft->num_groups++;
1333 
1334 	memset(in, 0, inlen);
1335 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1336 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1337 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1338 	memset(dmac, 0xff, ETH_ALEN);
1339 	MLX5_SET_CFG(in, start_flow_index, ix);
1340 	ix += MLX5E_MAIN_GROUP4_SIZE;
1341 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1342 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1343 	if (IS_ERR(ft->g[ft->num_groups]))
1344 		goto err_destory_groups;
1345 	ft->num_groups++;
1346 
1347 	memset(in, 0, inlen);
1348 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1349 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1350 	memset(dmac, 0xff, ETH_ALEN);
1351 	MLX5_SET_CFG(in, start_flow_index, ix);
1352 	ix += MLX5E_MAIN_GROUP5_SIZE;
1353 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1354 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1355 	if (IS_ERR(ft->g[ft->num_groups]))
1356 		goto err_destory_groups;
1357 	ft->num_groups++;
1358 
1359 	memset(in, 0, inlen);
1360 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1361 	memset(dmac, 0xff, ETH_ALEN);
1362 	MLX5_SET_CFG(in, start_flow_index, ix);
1363 	ix += MLX5E_MAIN_GROUP6_SIZE;
1364 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1365 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1366 	if (IS_ERR(ft->g[ft->num_groups]))
1367 		goto err_destory_groups;
1368 	ft->num_groups++;
1369 
1370 	memset(in, 0, inlen);
1371 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1372 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1373 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1374 	dmac[0] = 0x01;
1375 	MLX5_SET_CFG(in, start_flow_index, ix);
1376 	ix += MLX5E_MAIN_GROUP7_SIZE;
1377 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1378 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1379 	if (IS_ERR(ft->g[ft->num_groups]))
1380 		goto err_destory_groups;
1381 	ft->num_groups++;
1382 
1383 	memset(in, 0, inlen);
1384 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1385 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1386 	dmac[0] = 0x01;
1387 	MLX5_SET_CFG(in, start_flow_index, ix);
1388 	ix += MLX5E_MAIN_GROUP8_SIZE;
1389 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1390 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1391 	if (IS_ERR(ft->g[ft->num_groups]))
1392 		goto err_destory_groups;
1393 	ft->num_groups++;
1394 
1395 	memset(in, 0, inlen);
1396 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1397 	dmac[0] = 0x01;
1398 	MLX5_SET_CFG(in, start_flow_index, ix);
1399 	ix += MLX5E_MAIN_GROUP9_SIZE;
1400 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1401 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1402 	if (IS_ERR(ft->g[ft->num_groups]))
1403 		goto err_destory_groups;
1404 	ft->num_groups++;
1405 
1406 	return (0);
1407 
1408 err_destory_groups:
1409 	err = PTR_ERR(ft->g[ft->num_groups]);
1410 	ft->g[ft->num_groups] = NULL;
1411 	mlx5e_destroy_groups(ft);
1412 
1413 	return (err);
1414 }
1415 
1416 static int
1417 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1418 {
1419 	u32 *in;
1420 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1421 	int err;
1422 
1423 	in = mlx5_vzalloc(inlen);
1424 	if (!in)
1425 		return (-ENOMEM);
1426 
1427 	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1428 
1429 	kvfree(in);
1430 	return (err);
1431 }
1432 
1433 #define MLX5E_MAIN_VXLAN_GROUP0_SIZE	BIT(3)
1434 #define MLX5E_MAIN_VXLAN_GROUP1_SIZE	BIT(3)
1435 #define MLX5E_MAIN_VXLAN_GROUP2_SIZE	BIT(0)
1436 static int
1437 mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1438     int inlen)
1439 {
1440 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1441 	int err;
1442 	int ix = 0;
1443 
1444 	memset(in, 0, inlen);
1445 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1446 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1447 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1448 	MLX5_SET_CFG(in, start_flow_index, ix);
1449 	ix += MLX5E_MAIN_VXLAN_GROUP0_SIZE;
1450 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1451 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1452 	if (IS_ERR(ft->g[ft->num_groups]))
1453 		goto err_destory_groups;
1454 	ft->num_groups++;
1455 
1456 	memset(in, 0, inlen);
1457 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1458 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1459 	MLX5_SET_CFG(in, start_flow_index, ix);
1460 	ix += MLX5E_MAIN_VXLAN_GROUP1_SIZE;
1461 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1462 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1463 	if (IS_ERR(ft->g[ft->num_groups]))
1464 		goto err_destory_groups;
1465 	ft->num_groups++;
1466 
1467 	memset(in, 0, inlen);
1468 	MLX5_SET_CFG(in, start_flow_index, ix);
1469 	ix += MLX5E_MAIN_VXLAN_GROUP2_SIZE;
1470 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1471 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1472 	if (IS_ERR(ft->g[ft->num_groups]))
1473 		goto err_destory_groups;
1474 	ft->num_groups++;
1475 
1476 	return (0);
1477 
1478 err_destory_groups:
1479 	err = PTR_ERR(ft->g[ft->num_groups]);
1480 	ft->g[ft->num_groups] = NULL;
1481 	mlx5e_destroy_groups(ft);
1482 
1483 	return (err);
1484 }
1485 
1486 static int
1487 mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table *ft)
1488 {
1489 	u32 *in;
1490 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1491 	int err;
1492 
1493 	in = mlx5_vzalloc(inlen);
1494 	if (!in)
1495 		return (-ENOMEM);
1496 
1497 	err = mlx5e_create_main_vxlan_groups_sub(ft, in, inlen);
1498 
1499 	kvfree(in);
1500 	return (err);
1501 }
1502 
1503 
1504 static int
1505 mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
1506 {
1507 	struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
1508 	    &priv->fts.main;
1509 	int err;
1510 
1511 	ft->num_groups = 0;
1512 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0,
1513 	    inner_vxlan ? "vxlan_main" : "main", MLX5E_MAIN_TABLE_SIZE);
1514 
1515 	if (IS_ERR(ft->t)) {
1516 		err = PTR_ERR(ft->t);
1517 		ft->t = NULL;
1518 		return (err);
1519 	}
1520 	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1521 	if (!ft->g) {
1522 		err = -ENOMEM;
1523 		goto err_destroy_main_flow_table;
1524 	}
1525 
1526 	err = inner_vxlan ? mlx5e_create_main_vxlan_groups(ft) :
1527 	    mlx5e_create_main_groups(ft);
1528 	if (err)
1529 		goto err_free_g;
1530 	return (0);
1531 
1532 err_free_g:
1533 	kfree(ft->g);
1534 
1535 err_destroy_main_flow_table:
1536 	mlx5_destroy_flow_table(ft->t);
1537 	ft->t = NULL;
1538 
1539 	return (err);
1540 }
1541 
1542 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1543 {
1544 	mlx5e_destroy_flow_table(&priv->fts.main);
1545 }
1546 
1547 static void mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv *priv)
1548 {
1549 	mlx5e_destroy_flow_table(&priv->fts.main_vxlan);
1550 }
1551 
1552 #define MLX5E_NUM_VLAN_GROUPS	3
1553 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1554 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1555 #define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1556 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1557 				 MLX5E_VLAN_GROUP1_SIZE +\
1558 				 MLX5E_VLAN_GROUP2_SIZE +\
1559 				 0)
1560 
1561 static int
1562 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1563 				      int inlen)
1564 {
1565 	int err;
1566 	int ix = 0;
1567 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1568 
1569 	memset(in, 0, inlen);
1570 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1571 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1572 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1573 	MLX5_SET_CFG(in, start_flow_index, ix);
1574 	ix += MLX5E_VLAN_GROUP0_SIZE;
1575 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1576 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1577 	if (IS_ERR(ft->g[ft->num_groups]))
1578 		goto err_destory_groups;
1579 	ft->num_groups++;
1580 
1581 	memset(in, 0, inlen);
1582 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1583 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1584 	MLX5_SET_CFG(in, start_flow_index, ix);
1585 	ix += MLX5E_VLAN_GROUP1_SIZE;
1586 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1587 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1588 	if (IS_ERR(ft->g[ft->num_groups]))
1589 		goto err_destory_groups;
1590 	ft->num_groups++;
1591 
1592 	memset(in, 0, inlen);
1593 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1594 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1595 	MLX5_SET_CFG(in, start_flow_index, ix);
1596 	ix += MLX5E_VLAN_GROUP2_SIZE;
1597 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1598 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1599 	if (IS_ERR(ft->g[ft->num_groups]))
1600 		goto err_destory_groups;
1601 	ft->num_groups++;
1602 
1603 	return (0);
1604 
1605 err_destory_groups:
1606 	err = PTR_ERR(ft->g[ft->num_groups]);
1607 	ft->g[ft->num_groups] = NULL;
1608 	mlx5e_destroy_groups(ft);
1609 
1610 	return (err);
1611 }
1612 
1613 static int
1614 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1615 {
1616 	u32 *in;
1617 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1618 	int err;
1619 
1620 	in = mlx5_vzalloc(inlen);
1621 	if (!in)
1622 		return (-ENOMEM);
1623 
1624 	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1625 
1626 	kvfree(in);
1627 	return (err);
1628 }
1629 
1630 static int
1631 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1632 {
1633 	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1634 	int err;
1635 
1636 	ft->num_groups = 0;
1637 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1638 				       MLX5E_VLAN_TABLE_SIZE);
1639 
1640 	if (IS_ERR(ft->t)) {
1641 		err = PTR_ERR(ft->t);
1642 		ft->t = NULL;
1643 		return (err);
1644 	}
1645 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1646 	if (!ft->g) {
1647 		err = -ENOMEM;
1648 		goto err_destroy_vlan_flow_table;
1649 	}
1650 
1651 	err = mlx5e_create_vlan_groups(ft);
1652 	if (err)
1653 		goto err_free_g;
1654 
1655 	return (0);
1656 
1657 err_free_g:
1658 	kfree(ft->g);
1659 
1660 err_destroy_vlan_flow_table:
1661 	mlx5_destroy_flow_table(ft->t);
1662 	ft->t = NULL;
1663 
1664 	return (err);
1665 }
1666 
1667 static void
1668 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1669 {
1670 	mlx5e_destroy_flow_table(&priv->fts.vlan);
1671 }
1672 
1673 static int
1674 mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
1675     struct mlx5e_vxlan_db_el *el)
1676 {
1677 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1678 	struct mlx5_flow_destination dest = {};
1679 	u8 mc_enable;
1680 	struct mlx5_flow_rule **rule_p;
1681 	int err = 0;
1682 
1683 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1684 	dest.ft = priv->fts.main_vxlan.t;
1685 
1686 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
1687 	rule_p = &el->vxlan_ft_rule;
1688 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1689 	MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
1690 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1691 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
1692 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1693 	MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
1694 
1695 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1696 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1697 
1698 	if (IS_ERR(*rule_p)) {
1699 		err = PTR_ERR(*rule_p);
1700 		*rule_p = NULL;
1701 		mlx5_en_err(priv->ifp, "add rule failed\n");
1702 	}
1703 
1704 	return (err);
1705 }
1706 
1707 static struct mlx5e_vxlan_db_el *
1708 mlx5e_vxlan_find_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1709 {
1710 	struct mlx5e_vxlan_db_el *el;
1711 
1712 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1713 		if (el->proto == proto && el->port == port)
1714 			return (el);
1715 	}
1716 	return (NULL);
1717 }
1718 
1719 static struct mlx5e_vxlan_db_el *
1720 mlx5e_vxlan_alloc_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1721 {
1722 	struct mlx5e_vxlan_db_el *el;
1723 
1724 	el = mlx5_vzalloc(sizeof(*el));
1725 	el->refcount = 1;
1726 	el->proto = proto;
1727 	el->port = port;
1728 	el->vxlan_ft_rule = NULL;
1729 	return (el);
1730 }
1731 
1732 static int
1733 mlx5e_vxlan_family_to_proto(sa_family_t family, u_int *proto)
1734 {
1735 	switch (family) {
1736 	case AF_INET:
1737 		*proto = ETHERTYPE_IP;
1738 		return (0);
1739 	case AF_INET6:
1740 		*proto = ETHERTYPE_IPV6;
1741 		return (0);
1742 	default:
1743 		return (-EINVAL);
1744 	}
1745 }
1746 
1747 static int
1748 mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
1749     struct mlx5e_vxlan_db_el *el)
1750 {
1751 	u32 *match_criteria;
1752 	u32 *match_value;
1753 	int err;
1754 
1755 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1756 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1757 	if (match_value == NULL || match_criteria == NULL) {
1758 		mlx5_en_err(priv->ifp, "alloc failed\n");
1759 		err = -ENOMEM;
1760 		goto add_vxlan_rule_out;
1761 	}
1762 
1763 	err = mlx5e_add_vxlan_rule_sub(priv, match_criteria, match_value, el);
1764 
1765 add_vxlan_rule_out:
1766 	kvfree(match_criteria);
1767 	kvfree(match_value);
1768 
1769 	return (err);
1770 }
1771 
1772 static int
1773 mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1774 {
1775 	struct mlx5e_vxlan_db_el *el;
1776 	u_int proto;
1777 	int err;
1778 
1779 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1780 	if (err != 0)
1781 		return (err);
1782 
1783 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1784 	if (el != NULL) {
1785 		el->refcount++;
1786 		if (el->installed)
1787 			return (0);
1788 	}
1789 	el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
1790 
1791 	if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
1792 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1793 		if (err == 0)
1794 			el->installed = true;
1795 	}
1796 	if (err == 0)
1797 		TAILQ_INSERT_TAIL(&priv->vxlan.head, el, link);
1798 	else
1799 		kvfree(el);
1800 
1801 	return (err);
1802 }
1803 
1804 static int
1805 mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
1806 {
1807 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1808 	struct mlx5_flow_destination dest = {};
1809 	u8 mc_enable = 0;
1810 	struct mlx5_flow_rule **rule_p;
1811 	int err = 0;
1812 
1813 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1814 	dest.ft = priv->fts.main.t;
1815 
1816 	rule_p = &priv->fts.vxlan_catchall_ft_rule;
1817 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1818 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1819 
1820 	if (IS_ERR(*rule_p)) {
1821 		err = PTR_ERR(*rule_p);
1822 		*rule_p = NULL;
1823 		mlx5_en_err(priv->ifp, "add rule failed\n");
1824 	}
1825 
1826 	return (err);
1827 }
1828 
1829 
1830 static int
1831 mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
1832 {
1833 	u32 *match_criteria;
1834 	u32 *match_value;
1835 	int err;
1836 
1837 	match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1838 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1839 	if (match_value == NULL || match_criteria == NULL) {
1840 		mlx5_en_err(priv->ifp, "alloc failed\n");
1841 		err = -ENOMEM;
1842 		goto add_vxlan_rule_out;
1843 	}
1844 
1845 	err = mlx5e_add_vxlan_catchall_rule_sub(priv, match_criteria,
1846 	    match_value);
1847 
1848 add_vxlan_rule_out:
1849 	kvfree(match_criteria);
1850 	kvfree(match_value);
1851 
1852 	return (err);
1853 }
1854 
1855 int
1856 mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv)
1857 {
1858 	struct mlx5e_vxlan_db_el *el;
1859 	int err;
1860 
1861 	err = 0;
1862 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1863 		if (el->installed)
1864 			continue;
1865 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1866 		if (err != 0)
1867 			break;
1868 		el->installed = true;
1869 	}
1870 
1871 	return (err);
1872 }
1873 
1874 static int
1875 mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1876 {
1877 	struct mlx5e_vxlan_db_el *el;
1878 	u_int proto;
1879 	int err;
1880 
1881 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1882 	if (err != 0)
1883 		return (err);
1884 
1885 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1886 	if (el == NULL)
1887 		return (0);
1888 	if (el->refcount > 1) {
1889 		el->refcount--;
1890 		return (0);
1891 	}
1892 
1893 	if (el->installed)
1894 		mlx5_del_flow_rule(&el->vxlan_ft_rule);
1895 	TAILQ_REMOVE(&priv->vxlan.head, el, link);
1896 	kvfree(el);
1897 	return (0);
1898 }
1899 
1900 void
1901 mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
1902 {
1903 	struct mlx5e_vxlan_db_el *el;
1904 
1905 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1906 		if (!el->installed)
1907 			continue;
1908 		mlx5_del_flow_rule(&el->vxlan_ft_rule);
1909 		el->installed = false;
1910 	}
1911 }
1912 
1913 static void
1914 mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
1915 {
1916 	mlx5_del_flow_rule(&priv->fts.vxlan_catchall_ft_rule);
1917 }
1918 
1919 void
1920 mlx5e_vxlan_start(void *arg, if_t ifp __unused, sa_family_t family,
1921     u_int port)
1922 {
1923 	struct mlx5e_priv *priv = arg;
1924 	int err;
1925 
1926 	PRIV_LOCK(priv);
1927 	err = mlx5_vxlan_udp_port_add(priv->mdev, port);
1928 	if (err == 0 && test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1929 		mlx5e_add_vxlan_rule(priv, family, port);
1930 	PRIV_UNLOCK(priv);
1931 }
1932 
1933 void
1934 mlx5e_vxlan_stop(void *arg, if_t ifp __unused, sa_family_t family,
1935     u_int port)
1936 {
1937 	struct mlx5e_priv *priv = arg;
1938 
1939 	PRIV_LOCK(priv);
1940 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1941 		mlx5e_del_vxlan_rule(priv, family, port);
1942 	(void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
1943 	PRIV_UNLOCK(priv);
1944 }
1945 
1946 #define	MLX5E_VXLAN_GROUP0_SIZE	BIT(3)	/* XXXKIB */
1947 #define	MLX5E_VXLAN_GROUP1_SIZE	BIT(0)
1948 #define	MLX5E_NUM_VXLAN_GROUPS	BIT(1)
1949 #define	MLX5E_VXLAN_TABLE_SIZE	\
1950     (MLX5E_VXLAN_GROUP0_SIZE + MLX5E_VXLAN_GROUP1_SIZE)
1951 
1952 static int
1953 mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1954 				      int inlen)
1955 {
1956 	int err;
1957 	int ix = 0;
1958 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1959 
1960 	memset(in, 0, inlen);
1961 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1962 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1963 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1964 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1965 	MLX5_SET_CFG(in, start_flow_index, ix);
1966 	ix += MLX5E_VXLAN_GROUP0_SIZE;
1967 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1968 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1969 	if (IS_ERR(ft->g[ft->num_groups]))
1970 		goto err_destory_groups;
1971 	ft->num_groups++;
1972 
1973 	memset(in, 0, inlen);
1974 	MLX5_SET_CFG(in, start_flow_index, ix);
1975 	ix += MLX5E_VXLAN_GROUP1_SIZE;
1976 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1977 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1978 	if (IS_ERR(ft->g[ft->num_groups]))
1979 		goto err_destory_groups;
1980 	ft->num_groups++;
1981 
1982 	return (0);
1983 
1984 err_destory_groups:
1985 	err = PTR_ERR(ft->g[ft->num_groups]);
1986 	ft->g[ft->num_groups] = NULL;
1987 	mlx5e_destroy_groups(ft);
1988 
1989 	return (err);
1990 }
1991 
1992 static int
1993 mlx5e_create_vxlan_groups(struct mlx5e_flow_table *ft)
1994 {
1995 	u32 *in;
1996 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1997 	int err;
1998 
1999 	in = mlx5_vzalloc(inlen);
2000 	if (!in)
2001 		return (-ENOMEM);
2002 
2003 	err = mlx5e_create_vxlan_groups_sub(ft, in, inlen);
2004 
2005 	kvfree(in);
2006 	return (err);
2007 }
2008 
2009 static int
2010 mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
2011 {
2012 	struct mlx5e_flow_table *ft = &priv->fts.vxlan;
2013 	int err;
2014 
2015 	ft->num_groups = 0;
2016 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vxlan",
2017 				       MLX5E_VXLAN_TABLE_SIZE);
2018 
2019 	if (IS_ERR(ft->t)) {
2020 		err = PTR_ERR(ft->t);
2021 		ft->t = NULL;
2022 		return (err);
2023 	}
2024 	ft->g = kcalloc(MLX5E_NUM_VXLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
2025 	if (!ft->g) {
2026 		err = -ENOMEM;
2027 		goto err_destroy_vxlan_flow_table;
2028 	}
2029 
2030 	err = mlx5e_create_vxlan_groups(ft);
2031 	if (err)
2032 		goto err_free_g;
2033 
2034 	TAILQ_INIT(&priv->vxlan.head);
2035 	return (0);
2036 
2037 err_free_g:
2038 	kfree(ft->g);
2039 
2040 err_destroy_vxlan_flow_table:
2041 	mlx5_destroy_flow_table(ft->t);
2042 	ft->t = NULL;
2043 
2044 	return (err);
2045 }
2046 
2047 #define MLX5E_NUM_INNER_RSS_GROUPS	3
2048 #define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
2049 #define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
2050 #define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
2051 #define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
2052 					 MLX5E_INNER_RSS_GROUP1_SIZE +\
2053 					 MLX5E_INNER_RSS_GROUP2_SIZE +\
2054 					 0)
2055 
2056 static int
2057 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2058 					   int inlen)
2059 {
2060 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2061 	int err;
2062 	int ix = 0;
2063 
2064 	memset(in, 0, inlen);
2065 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2066 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2067 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
2068 	MLX5_SET_CFG(in, start_flow_index, ix);
2069 	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
2070 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2071 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2072 	if (IS_ERR(ft->g[ft->num_groups]))
2073 		goto err_destory_groups;
2074 	ft->num_groups++;
2075 
2076 	memset(in, 0, inlen);
2077 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2078 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2079 	MLX5_SET_CFG(in, start_flow_index, ix);
2080 	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
2081 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2082 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2083 	if (IS_ERR(ft->g[ft->num_groups]))
2084 		goto err_destory_groups;
2085 	ft->num_groups++;
2086 
2087 	memset(in, 0, inlen);
2088 	MLX5_SET_CFG(in, start_flow_index, ix);
2089 	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
2090 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2091 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2092 	if (IS_ERR(ft->g[ft->num_groups]))
2093 		goto err_destory_groups;
2094 	ft->num_groups++;
2095 
2096 	return (0);
2097 
2098 err_destory_groups:
2099 	err = PTR_ERR(ft->g[ft->num_groups]);
2100 	ft->g[ft->num_groups] = NULL;
2101 	mlx5e_destroy_groups(ft);
2102 
2103 	return (err);
2104 }
2105 
2106 static int
2107 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
2108 {
2109 	u32 *in;
2110 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2111 	int err;
2112 
2113 	in = mlx5_vzalloc(inlen);
2114 	if (!in)
2115 		return (-ENOMEM);
2116 
2117 	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
2118 
2119 	kvfree(in);
2120 	return (err);
2121 }
2122 
2123 static int
2124 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
2125 {
2126 	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
2127 	int err;
2128 
2129 	ft->num_groups = 0;
2130 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
2131 				       MLX5E_INNER_RSS_TABLE_SIZE);
2132 
2133 	if (IS_ERR(ft->t)) {
2134 		err = PTR_ERR(ft->t);
2135 		ft->t = NULL;
2136 		return (err);
2137 	}
2138 	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
2139 			GFP_KERNEL);
2140 	if (!ft->g) {
2141 		err = -ENOMEM;
2142 		goto err_destroy_inner_rss_flow_table;
2143 	}
2144 
2145 	err = mlx5e_create_inner_rss_groups(ft);
2146 	if (err)
2147 		goto err_free_g;
2148 
2149 	return (0);
2150 
2151 err_free_g:
2152 	kfree(ft->g);
2153 
2154 err_destroy_inner_rss_flow_table:
2155 	mlx5_destroy_flow_table(ft->t);
2156 	ft->t = NULL;
2157 
2158 	return (err);
2159 }
2160 
2161 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
2162 {
2163 	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
2164 }
2165 
2166 static void
2167 mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
2168 {
2169 	mlx5e_destroy_flow_table(&priv->fts.vxlan);
2170 }
2171 
2172 int
2173 mlx5e_open_flow_tables(struct mlx5e_priv *priv)
2174 {
2175 	int err;
2176 
2177 	/* setup namespace pointer */
2178 	priv->fts.ns = mlx5_get_flow_namespace(
2179 	    priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2180 
2181 	err = mlx5e_create_vlan_flow_table(priv);
2182 	if (err)
2183 		return (err);
2184 
2185 	err = mlx5e_create_vxlan_flow_table(priv);
2186 	if (err)
2187 		goto err_destroy_vlan_flow_table;
2188 
2189 	err = mlx5e_create_main_flow_table(priv, true);
2190 	if (err)
2191 		goto err_destroy_vxlan_flow_table;
2192 
2193 	err = mlx5e_create_inner_rss_flow_table(priv);
2194 	if (err)
2195 		goto err_destroy_main_flow_table_true;
2196 
2197 	err = mlx5e_create_main_flow_table(priv, false);
2198 	if (err)
2199 		goto err_destroy_inner_rss_flow_table;
2200 
2201 	err = mlx5e_add_vxlan_catchall_rule(priv);
2202 	if (err)
2203 		goto err_destroy_main_flow_table_false;
2204 
2205 	err = mlx5e_accel_fs_tcp_create(priv);
2206 	if (err)
2207 		goto err_del_vxlan_catchall_rule;
2208 
2209 	return (0);
2210 
2211 err_del_vxlan_catchall_rule:
2212 	mlx5e_del_vxlan_catchall_rule(priv);
2213 err_destroy_main_flow_table_false:
2214 	mlx5e_destroy_main_flow_table(priv);
2215 err_destroy_inner_rss_flow_table:
2216 	mlx5e_destroy_inner_rss_flow_table(priv);
2217 err_destroy_main_flow_table_true:
2218 	mlx5e_destroy_main_vxlan_flow_table(priv);
2219 err_destroy_vxlan_flow_table:
2220 	mlx5e_destroy_vxlan_flow_table(priv);
2221 err_destroy_vlan_flow_table:
2222 	mlx5e_destroy_vlan_flow_table(priv);
2223 
2224 	return (err);
2225 }
2226 
2227 void
2228 mlx5e_close_flow_tables(struct mlx5e_priv *priv)
2229 {
2230 	mlx5e_accel_fs_tcp_destroy(priv);
2231 	mlx5e_del_vxlan_catchall_rule(priv);
2232 	mlx5e_destroy_main_flow_table(priv);
2233 	mlx5e_destroy_inner_rss_flow_table(priv);
2234 	mlx5e_destroy_main_vxlan_flow_table(priv);
2235 	mlx5e_destroy_vxlan_flow_table(priv);
2236 	mlx5e_destroy_vlan_flow_table(priv);
2237 }
2238 
2239 int
2240 mlx5e_open_flow_rules(struct mlx5e_priv *priv)
2241 {
2242 	int err;
2243 
2244 	err = mlx5e_add_all_vlan_rules(priv);
2245 	if (err)
2246 		return (err);
2247 
2248 	err = mlx5e_add_main_vxlan_rules(priv);
2249 	if (err)
2250 		goto err_del_all_vlan_rules;
2251 
2252 	err = mlx5e_add_all_vxlan_rules(priv);
2253 	if (err)
2254 		goto err_del_main_vxlan_rules;
2255 
2256 	mlx5e_set_rx_mode_core(priv, true);
2257 
2258 	set_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2259 
2260 	return (0);
2261 
2262 err_del_main_vxlan_rules:
2263 	mlx5e_del_main_vxlan_rules(priv);
2264 
2265 err_del_all_vlan_rules:
2266 	mlx5e_del_all_vlan_rules(priv);
2267 
2268 	return (err);
2269 }
2270 
2271 void
2272 mlx5e_close_flow_rules(struct mlx5e_priv *priv)
2273 {
2274 	clear_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2275 
2276 	mlx5e_set_rx_mode_core(priv, false);
2277 	mlx5e_del_all_vxlan_rules(priv);
2278 	mlx5e_del_main_vxlan_rules(priv);
2279 	mlx5e_del_all_vlan_rules(priv);
2280 }
2281