xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 357378bbdedf24ce2b90e9bd831af4a9db3ec70a)
1 /*-
2  * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28 
29 #include <dev/mlx5/mlx5_en/en.h>
30 
31 #include <linux/list.h>
32 #include <dev/mlx5/fs.h>
33 #include <dev/mlx5/mpfs.h>
34 #include <dev/mlx5/mlx5_core/fs_tcp.h>
35 
36 /*
37  * The flow tables with rules define the packet processing on receive.
38  * Currently the following structure is set up to handle different
39  * offloads like TLS RX offload, VLAN decapsulation, packet
40  * classification, RSS hashing, VxLAN checksum offloading:
41  *
42  *   +=========+       +=========+      +=================+
43  *   |TCP/IPv4 |       |TCP/IPv4 |      |TCP/IPv4 Match   |
44  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
45  *   |         |       |Catch-all|\     |                 |
46  *   +=========+       +=========+|     +=================+
47  *                                |
48  *       +------------------------+
49  *       V
50  *   +=========+       +=========+      +=================+
51  *   |TCP/IPv6 |       |TCP/IPv6 |      |TCP/IPv6 Match   |
52  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
53  *   |         |       |Catch-all|\     |                 |
54  *   +=========+       +=========+|     +=================+
55  *                                |
56  *       +------------------------+
57  *       V
58  *   +=========+       +=========+      +=================+
59  *   |VLAN ft: |       |VxLAN    |      |VxLAN Main       |
60  *   |CTAG/STAG|------>|      VNI|----->|Inner Proto Match|=====> Inner TIR n
61  *   |VID/noVID|/      |Catch-all|\     |                 |
62  *   +=========+       +=========+|     +=================+
63  *                                |
64  *                                |
65  *                                |
66  *                                v
67  *                      +=================+
68  *                      |Main             |
69  *                      |Outer Proto Match|=====> TIR n
70  *                      |                 |
71  *                      +=================+
72  *
73  * The path through flow rules directs each packet into an appropriate TIR,
74  * according to the:
75  * - VLAN encapsulation
76  * - Outer protocol
77  * - Presence of inner protocol
78  */
79 
80 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
81 
82 enum {
83 	MLX5E_FULLMATCH = 0,
84 	MLX5E_ALLMULTI = 1,
85 	MLX5E_PROMISC = 2,
86 };
87 
88 enum {
89 	MLX5E_UC = 0,
90 	MLX5E_MC_IPV4 = 1,
91 	MLX5E_MC_IPV6 = 2,
92 	MLX5E_MC_OTHER = 3,
93 };
94 
95 enum {
96 	MLX5E_ACTION_NONE = 0,
97 	MLX5E_ACTION_ADD = 1,
98 	MLX5E_ACTION_DEL = 2,
99 };
100 
101 struct mlx5e_eth_addr_hash_node {
102 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
103 	u8	action;
104 	u32	mpfs_index;
105 	struct mlx5e_eth_addr_info ai;
106 };
107 
108 static void mlx5e_del_all_vlan_rules(struct mlx5e_priv *);
109 
110 static inline int
111 mlx5e_hash_eth_addr(const u8 * addr)
112 {
113 	return (addr[5]);
114 }
115 
116 static bool
117 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
118     struct mlx5e_eth_addr_hash_node *hn_new)
119 {
120 	struct mlx5e_eth_addr_hash_node *hn;
121 	u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
122 
123 	LIST_FOREACH(hn, &hash[ix], hlist) {
124 		if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
125 			if (hn->action == MLX5E_ACTION_DEL)
126 				hn->action = MLX5E_ACTION_NONE;
127 			free(hn_new, M_MLX5EN);
128 			return (false);
129 		}
130 	}
131 	LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
132 	return (true);
133 }
134 
135 static void
136 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
137 {
138 	LIST_REMOVE(hn, hlist);
139 	free(hn, M_MLX5EN);
140 }
141 
142 static void
143 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
144     struct mlx5e_eth_addr_info *ai)
145 {
146 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
147 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
148 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
149 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
150 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
151 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
152 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
153 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
154 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6]);
155 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4]);
156 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_ANY]);
157 }
158 
159 static int
160 mlx5e_get_eth_addr_type(const u8 * addr)
161 {
162 	if (ETHER_IS_MULTICAST(addr) == 0)
163 		return (MLX5E_UC);
164 
165 	if ((addr[0] == 0x01) &&
166 	    (addr[1] == 0x00) &&
167 	    (addr[2] == 0x5e) &&
168 	    !(addr[3] & 0x80))
169 		return (MLX5E_MC_IPV4);
170 
171 	if ((addr[0] == 0x33) &&
172 	    (addr[1] == 0x33))
173 		return (MLX5E_MC_IPV6);
174 
175 	return (MLX5E_MC_OTHER);
176 }
177 
178 static	u32
179 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
180 {
181 	int eth_addr_type;
182 	u32 ret;
183 
184 	switch (type) {
185 	case MLX5E_FULLMATCH:
186 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
187 		switch (eth_addr_type) {
188 		case MLX5E_UC:
189 			ret =
190 			    (1 << MLX5E_TT_IPV4_TCP) |
191 			    (1 << MLX5E_TT_IPV6_TCP) |
192 			    (1 << MLX5E_TT_IPV4_UDP) |
193 			    (1 << MLX5E_TT_IPV6_UDP) |
194 			    (1 << MLX5E_TT_IPV4) |
195 			    (1 << MLX5E_TT_IPV6) |
196 			    (1 << MLX5E_TT_ANY) |
197 			    0;
198 			break;
199 
200 		case MLX5E_MC_IPV4:
201 			ret =
202 			    (1 << MLX5E_TT_IPV4_UDP) |
203 			    (1 << MLX5E_TT_IPV4) |
204 			    0;
205 			break;
206 
207 		case MLX5E_MC_IPV6:
208 			ret =
209 			    (1 << MLX5E_TT_IPV6_UDP) |
210 			    (1 << MLX5E_TT_IPV6) |
211 			    0;
212 			break;
213 
214 		default:
215 			ret =
216 			    (1 << MLX5E_TT_ANY) |
217 			    0;
218 			break;
219 		}
220 		break;
221 
222 	case MLX5E_ALLMULTI:
223 		ret =
224 		    (1 << MLX5E_TT_IPV4_UDP) |
225 		    (1 << MLX5E_TT_IPV6_UDP) |
226 		    (1 << MLX5E_TT_IPV4) |
227 		    (1 << MLX5E_TT_IPV6) |
228 		    (1 << MLX5E_TT_ANY) |
229 		    0;
230 		break;
231 
232 	default:			/* MLX5E_PROMISC */
233 		ret =
234 		    (1 << MLX5E_TT_IPV4_TCP) |
235 		    (1 << MLX5E_TT_IPV6_TCP) |
236 		    (1 << MLX5E_TT_IPV4_UDP) |
237 		    (1 << MLX5E_TT_IPV6_UDP) |
238 		    (1 << MLX5E_TT_IPV4) |
239 		    (1 << MLX5E_TT_IPV6) |
240 		    (1 << MLX5E_TT_ANY) |
241 		    0;
242 		break;
243 	}
244 
245 	return (ret);
246 }
247 
248 static int
249 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
250     struct mlx5e_eth_addr_info *ai, int type,
251     struct mlx5_flow_spec *spec)
252 {
253 	struct mlx5_flow_destination dest = {};
254 	u8 mc_enable = 0;
255 	struct mlx5_flow_handle **rule_p;
256 	struct mlx5_flow_table *ft = priv->fts.main.t;
257 	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
258 				   outer_headers.dmac_47_16);
259 	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
260 				   outer_headers.dmac_47_16);
261 	u32 *tirn = priv->tirn;
262 	u32 tt_vec;
263 	int err = 0;
264 	struct mlx5_flow_act flow_act = {
265 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
266 	};
267 	u8 *mc;
268 	u8 *mv;
269 
270 	mv = (u8 *)spec->match_value;
271 	mc = (u8 *)spec->match_criteria;
272 
273 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
274 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
275 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
276 
277 	switch (type) {
278 	case MLX5E_FULLMATCH:
279 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
280 		memset(mc_dmac, 0xff, ETH_ALEN);
281 		ether_addr_copy(mv_dmac, ai->addr);
282 		break;
283 
284 	case MLX5E_ALLMULTI:
285 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
286 		mc_dmac[0] = 0x01;
287 		mv_dmac[0] = 0x01;
288 		break;
289 
290 	case MLX5E_PROMISC:
291 		break;
292 	default:
293 		break;
294 	}
295 
296 	tt_vec = mlx5e_get_tt_vec(ai, type);
297 
298 	spec->match_criteria_enable = mc_enable;
299 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
300 		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
301 		dest.tir_num = tirn[MLX5E_TT_ANY];
302 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
303 		if (IS_ERR_OR_NULL(*rule_p))
304 			goto err_del_ai;
305 	}
306 
307 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
308 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
309 
310 	spec->match_criteria_enable = mc_enable;
311 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
312 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
313 		dest.tir_num = tirn[MLX5E_TT_IPV4];
314 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
315 			 ETHERTYPE_IP);
316 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
317 		if (IS_ERR_OR_NULL(*rule_p))
318 			goto err_del_ai;
319 	}
320 
321 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
322 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
323 		dest.tir_num = tirn[MLX5E_TT_IPV6];
324 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
325 			 ETHERTYPE_IPV6);
326 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
327 		if (IS_ERR_OR_NULL(*rule_p))
328 			goto err_del_ai;
329 	}
330 
331 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
332 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
333 
334 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
335 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
336 		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
337 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
338 			 ETHERTYPE_IP);
339 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
340 		if (IS_ERR_OR_NULL(*rule_p))
341 			goto err_del_ai;
342 	}
343 
344 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
345 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
346 		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
347 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
348 			 ETHERTYPE_IPV6);
349 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
350 		if (IS_ERR_OR_NULL(*rule_p))
351 			goto err_del_ai;
352 	}
353 
354 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
355 
356 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
357 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
358 		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
359 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
360 			 ETHERTYPE_IP);
361 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
362 		if (IS_ERR_OR_NULL(*rule_p))
363 			goto err_del_ai;
364 	}
365 
366 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
367 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
368 		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
369 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
370 			 ETHERTYPE_IPV6);
371 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
372 		if (IS_ERR_OR_NULL(*rule_p))
373 			goto err_del_ai;
374 	}
375 
376 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
377 
378 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
379 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
380 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
381 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
382 			 ETHERTYPE_IP);
383 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
384 		if (IS_ERR_OR_NULL(*rule_p))
385 			goto err_del_ai;
386 	}
387 
388 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
389 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
390 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
391 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
392 			 ETHERTYPE_IPV6);
393 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
394 		if (IS_ERR_OR_NULL(*rule_p))
395 			goto err_del_ai;
396 	}
397 
398 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
399 
400 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
401 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
402 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
403 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
404 			 ETHERTYPE_IP);
405 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
406 		if (IS_ERR_OR_NULL(*rule_p))
407 			goto err_del_ai;
408 	}
409 
410 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
411 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
412 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
413 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
414 			 ETHERTYPE_IPV6);
415 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
416 		if (IS_ERR_OR_NULL(*rule_p))
417 			goto err_del_ai;
418 	}
419 
420 	return 0;
421 
422 err_del_ai:
423 	err = PTR_ERR(*rule_p);
424 	*rule_p = NULL;
425 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
426 
427 	return err;
428 }
429 
430 static int
431 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
432     struct mlx5e_eth_addr_info *ai, int type)
433 {
434 	struct mlx5_flow_spec *spec;
435 	int err = 0;
436 
437 	spec = mlx5_vzalloc(sizeof(*spec));
438 	if (!spec) {
439 		mlx5_en_err(priv->ifp, "alloc failed\n");
440 		err = -ENOMEM;
441 		goto add_eth_addr_rule_out;
442 	}
443 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, spec);
444 
445 add_eth_addr_rule_out:
446 	kvfree(spec);
447 
448 	return (err);
449 }
450 
451 static void
452 mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
453 {
454 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
455 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
456 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
457 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
458 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
459 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
460 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
461 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
462 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
463 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
464 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
465 }
466 
467 static int
468 mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv,
469 			       struct mlx5_flow_spec *spec)
470 {
471 	struct mlx5_flow_destination dest = {};
472 	struct mlx5_flow_handle **rule_p;
473 	struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
474 	u32 *tirn = priv->tirn_inner_vxlan;
475 	struct mlx5_flow_act flow_act = {
476 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
477 	};
478 	int err = 0;
479 	u8 *mc;
480 	u8 *mv;
481 
482 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
483 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
484 
485 	mc = (u8 *)spec->match_criteria;
486 	mv = (u8 *)spec->match_value;
487 
488 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
489 
490 	spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
491 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
492 
493 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
494 	dest.tir_num = tirn[MLX5E_TT_IPV4];
495 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
496 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
497 	if (IS_ERR_OR_NULL(*rule_p))
498 		goto err_del_ai;
499 
500 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
501 	dest.tir_num = tirn[MLX5E_TT_IPV6];
502 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
503 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
504 	if (IS_ERR_OR_NULL(*rule_p))
505 		goto err_del_ai;
506 
507 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
508 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_UDP);
509 
510 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
511 	dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
512 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
513 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
514 	if (IS_ERR_OR_NULL(*rule_p))
515 		goto err_del_ai;
516 
517 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
518 	dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
519 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
520 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
521 	if (IS_ERR_OR_NULL(*rule_p))
522 		goto err_del_ai;
523 
524 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_TCP);
525 
526 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
527 	dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
528 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
529 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
530 	if (IS_ERR_OR_NULL(*rule_p))
531 		goto err_del_ai;
532 
533 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
534 	dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
535 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
536 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
537 	if (IS_ERR_OR_NULL(*rule_p))
538 		goto err_del_ai;
539 
540 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_AH);
541 
542 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
543 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
544 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
545 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
546 	if (IS_ERR_OR_NULL(*rule_p))
547 		goto err_del_ai;
548 
549 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
550 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
551 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
552 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
553 	if (IS_ERR_OR_NULL(*rule_p))
554 		goto err_del_ai;
555 
556 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_ESP);
557 
558 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
559 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
560 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
561 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
562 	if (IS_ERR_OR_NULL(*rule_p))
563 			goto err_del_ai;
564 
565 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP];
566 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
567 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
568 		 ETHERTYPE_IPV6);
569 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
570 	if (IS_ERR_OR_NULL(*rule_p))
571 		goto err_del_ai;
572 
573 	spec->match_criteria_enable = 0;
574 	memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
575 	memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
576 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
577 	dest.tir_num = tirn[MLX5E_TT_ANY];
578 	*rule_p = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
579 	if (IS_ERR_OR_NULL(*rule_p))
580 		goto err_del_ai;
581 
582 	return (0);
583 
584 err_del_ai:
585 	err = PTR_ERR(*rule_p);
586 	*rule_p = NULL;
587 	mlx5e_del_main_vxlan_rules(priv);
588 
589 	return (err);
590 }
591 
592 static int
593 mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
594 {
595 	struct mlx5_flow_spec *spec;
596 	int err = 0;
597 
598 	spec = mlx5_vzalloc(sizeof(*spec));
599 	if (!spec) {
600 		mlx5_en_err(priv->ifp, "alloc failed\n");
601 		err = -ENOMEM;
602 		goto add_main_vxlan_rules_out;
603 	}
604 	err = mlx5e_add_main_vxlan_rules_sub(priv, spec);
605 
606 add_main_vxlan_rules_out:
607 	kvfree(spec);
608 
609 	return (err);
610 }
611 
612 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
613 {
614 	if_t ifp = priv->ifp;
615 	int max_list_size;
616 	int list_size;
617 	u16 *vlans;
618 	int vlan;
619 	int err;
620 	int i;
621 
622 	list_size = 0;
623 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
624 		list_size++;
625 
626 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
627 
628 	if (list_size > max_list_size) {
629 		mlx5_en_err(ifp,
630 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
631 			    list_size, max_list_size);
632 		list_size = max_list_size;
633 	}
634 
635 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
636 	if (!vlans)
637 		return -ENOMEM;
638 
639 	i = 0;
640 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
641 		if (i >= list_size)
642 			break;
643 		vlans[i++] = vlan;
644 	}
645 
646 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
647 	if (err)
648 		mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
649 			   err);
650 
651 	kfree(vlans);
652 	return err;
653 }
654 
655 enum mlx5e_vlan_rule_type {
656 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
657 	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
658 	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
659 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
660 };
661 
662 static int
663 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
664     enum mlx5e_vlan_rule_type rule_type, u16 vid,
665     struct mlx5_flow_spec *spec)
666 {
667 	struct mlx5_flow_table *ft = priv->fts.vlan.t;
668 	struct mlx5_flow_destination dest = {};
669 	struct mlx5_flow_handle **rule_p;
670 	int err = 0;
671 	struct mlx5_flow_act flow_act = {
672 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
673 	};
674 	u8 *mv;
675 	u8 *mc;
676 
677 	mv = (u8 *)spec->match_value;
678 	mc = (u8 *)spec->match_criteria;
679 
680 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
681 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
682 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
683 	dest.ft = priv->fts.vxlan.t;
684 
685 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
686 
687 	switch (rule_type) {
688 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
689 		rule_p = &priv->vlan.untagged_ft_rule;
690 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
691 		break;
692 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
693 		rule_p = &priv->vlan.any_cvlan_ft_rule;
694 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
695 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
696 		break;
697 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
698 		rule_p = &priv->vlan.any_svlan_ft_rule;
699 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
700 		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
701 		break;
702 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
703 		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
704 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
705 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
706 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
707 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
708 		mlx5e_vport_context_update_vlans(priv);
709 		break;
710 	}
711 
712 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
713 	if (IS_ERR(*rule_p)) {
714 		err = PTR_ERR(*rule_p);
715 		*rule_p = NULL;
716 		mlx5_en_err(priv->ifp, "add rule failed\n");
717 	}
718 
719 	return (err);
720 }
721 
722 static int
723 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
724     enum mlx5e_vlan_rule_type rule_type, u16 vid)
725 {
726 	struct mlx5_flow_spec *spec;
727 	int err = 0;
728 
729 	spec = mlx5_vzalloc(sizeof(*spec));
730 	if (!spec) {
731 		mlx5_en_err(priv->ifp, "alloc failed\n");
732 		err = -ENOMEM;
733 		goto add_vlan_rule_out;
734 	}
735 
736 	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, spec);
737 
738 add_vlan_rule_out:
739 	kvfree(spec);
740 
741 	return (err);
742 }
743 
744 static void
745 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
746     enum mlx5e_vlan_rule_type rule_type, u16 vid)
747 {
748 	switch (rule_type) {
749 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
750 		mlx5_del_flow_rules(&priv->vlan.untagged_ft_rule);
751 		break;
752 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
753 		mlx5_del_flow_rules(&priv->vlan.any_cvlan_ft_rule);
754 		break;
755 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
756 		mlx5_del_flow_rules(&priv->vlan.any_svlan_ft_rule);
757 		break;
758 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
759 		mlx5_del_flow_rules(&priv->vlan.active_vlans_ft_rule[vid]);
760 		mlx5e_vport_context_update_vlans(priv);
761 		break;
762 	default:
763 		break;
764 	}
765 }
766 
767 static void
768 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
769 {
770 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
771 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
772 }
773 
774 static int
775 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
776 {
777 	int err;
778 
779 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
780 	if (err)
781 		return (err);
782 
783 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
784 	if (err)
785 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
786 
787 	return (err);
788 }
789 
790 void
791 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
792 {
793 	if (priv->vlan.filter_disabled) {
794 		priv->vlan.filter_disabled = false;
795 		if (if_getflags(priv->ifp) & IFF_PROMISC)
796 			return;
797 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
798 			mlx5e_del_any_vid_rules(priv);
799 	}
800 }
801 
802 void
803 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
804 {
805 	if (!priv->vlan.filter_disabled) {
806 		priv->vlan.filter_disabled = true;
807 		if (if_getflags(priv->ifp) & IFF_PROMISC)
808 			return;
809 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
810 			mlx5e_add_any_vid_rules(priv);
811 	}
812 }
813 
814 void
815 mlx5e_vlan_rx_add_vid(void *arg, if_t ifp, u16 vid)
816 {
817 	struct mlx5e_priv *priv = arg;
818 
819 	if (ifp != priv->ifp)
820 		return;
821 
822 	PRIV_LOCK(priv);
823 	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
824 	    test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
825 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
826 	PRIV_UNLOCK(priv);
827 }
828 
829 void
830 mlx5e_vlan_rx_kill_vid(void *arg, if_t ifp, u16 vid)
831 {
832 	struct mlx5e_priv *priv = arg;
833 
834 	if (ifp != priv->ifp)
835 		return;
836 
837 	PRIV_LOCK(priv);
838 	clear_bit(vid, priv->vlan.active_vlans);
839 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
840 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
841 	PRIV_UNLOCK(priv);
842 }
843 
844 static int
845 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
846 {
847 	int err;
848 	int i;
849 
850 	set_bit(0, priv->vlan.active_vlans);
851 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
852 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
853 					  i);
854 		if (err)
855 			goto error;
856 	}
857 
858 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
859 	if (err)
860 		goto error;
861 
862 	if (priv->vlan.filter_disabled) {
863 		err = mlx5e_add_any_vid_rules(priv);
864 		if (err)
865 			goto error;
866 	}
867 	return (0);
868 error:
869 	mlx5e_del_all_vlan_rules(priv);
870 	return (err);
871 }
872 
873 static void
874 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
875 {
876 	int i;
877 
878 	if (priv->vlan.filter_disabled)
879 		mlx5e_del_any_vid_rules(priv);
880 
881 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
882 
883 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
884 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
885 	clear_bit(0, priv->vlan.active_vlans);
886 }
887 
888 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
889 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
890 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
891 
892 static void
893 mlx5e_execute_action(struct mlx5e_priv *priv,
894     struct mlx5e_eth_addr_hash_node *hn)
895 {
896 	switch (hn->action) {
897 	case MLX5E_ACTION_ADD:
898 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
899 		hn->action = MLX5E_ACTION_NONE;
900 		break;
901 
902 	case MLX5E_ACTION_DEL:
903 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
904 		if (hn->mpfs_index != -1U)
905 			mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
906 		mlx5e_del_eth_addr_from_hash(hn);
907 		break;
908 
909 	default:
910 		break;
911 	}
912 }
913 
914 static struct mlx5e_eth_addr_hash_node *
915 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
916 {
917 	struct mlx5e_eth_addr_hash_node *hn;
918 
919 	hn = LIST_FIRST(fh);
920 	if (hn != NULL) {
921 		LIST_REMOVE(hn, hlist);
922 		LIST_INSERT_HEAD(uh, hn, hlist);
923 	}
924 	return (hn);
925 }
926 
927 static struct mlx5e_eth_addr_hash_node *
928 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
929 {
930 	struct mlx5e_eth_addr_hash_node *hn;
931 
932 	hn = LIST_FIRST(fh);
933 	if (hn != NULL)
934 		LIST_REMOVE(hn, hlist);
935 	return (hn);
936 }
937 
938 struct mlx5e_copy_addr_ctx {
939 	struct mlx5e_eth_addr_hash_head *free;
940 	struct mlx5e_eth_addr_hash_head *fill;
941 	bool success;
942 };
943 
944 static u_int
945 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
946 {
947 	struct mlx5e_copy_addr_ctx *ctx = arg;
948 	struct mlx5e_eth_addr_hash_node *hn;
949 
950 	hn = mlx5e_move_hn(ctx->free, ctx->fill);
951 	if (hn == NULL) {
952 		ctx->success = false;
953 		return (0);
954 	}
955 	ether_addr_copy(hn->ai.addr, LLADDR(sdl));
956 
957 	return (1);
958 }
959 
960 static void
961 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
962 {
963 	struct mlx5e_copy_addr_ctx ctx;
964 	struct mlx5e_eth_addr_hash_head head_free;
965 	struct mlx5e_eth_addr_hash_head head_uc;
966 	struct mlx5e_eth_addr_hash_head head_mc;
967 	struct mlx5e_eth_addr_hash_node *hn;
968 	if_t ifp = priv->ifp;
969 	size_t x;
970 	size_t num;
971 
972 	PRIV_ASSERT_LOCKED(priv);
973 
974 retry:
975 	LIST_INIT(&head_free);
976 	LIST_INIT(&head_uc);
977 	LIST_INIT(&head_mc);
978 	num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp);
979 
980 	/* allocate place holders */
981 	for (x = 0; x != num; x++) {
982 		hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
983 		hn->action = MLX5E_ACTION_ADD;
984 		hn->mpfs_index = -1U;
985 		LIST_INSERT_HEAD(&head_free, hn, hlist);
986 	}
987 
988 	hn = mlx5e_move_hn(&head_free, &head_uc);
989 	MPASS(hn != NULL);
990 
991 	ether_addr_copy(hn->ai.addr, if_getlladdr(ifp));
992 
993 	ctx.free = &head_free;
994 	ctx.fill = &head_uc;
995 	ctx.success = true;
996 	if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx);
997 	if (ctx.success == false)
998 		goto cleanup;
999 
1000 	ctx.fill = &head_mc;
1001 	if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx);
1002 	if (ctx.success == false)
1003 		goto cleanup;
1004 
1005 	/* insert L2 unicast addresses into hash list */
1006 
1007 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
1008 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
1009 			continue;
1010 		if (hn->mpfs_index == -1U)
1011 			mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index,
1012 			    hn->ai.addr, 0, 0);
1013 	}
1014 
1015 	/* insert L2 multicast addresses into hash list */
1016 
1017 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
1018 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
1019 			continue;
1020 	}
1021 
1022 cleanup:
1023 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
1024 		free(hn, M_MLX5EN);
1025 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
1026 		free(hn, M_MLX5EN);
1027 	while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
1028 		free(hn, M_MLX5EN);
1029 
1030 	if (ctx.success == false)
1031 		goto retry;
1032 }
1033 
1034 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
1035 				  u8 addr_array[][ETH_ALEN], int size)
1036 {
1037 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1038 	if_t ifp = priv->ifp;
1039 	struct mlx5e_eth_addr_hash_node *hn;
1040 	struct mlx5e_eth_addr_hash_head *addr_list;
1041 	struct mlx5e_eth_addr_hash_node *tmp;
1042 	int i = 0;
1043 	int hi;
1044 
1045 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1046 
1047 	if (is_uc) /* Make sure our own address is pushed first */
1048 		ether_addr_copy(addr_array[i++], if_getlladdr(ifp));
1049 	else if (priv->eth_addr.broadcast_enabled)
1050 		ether_addr_copy(addr_array[i++], if_getbroadcastaddr(ifp));
1051 
1052 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
1053 		if (ether_addr_equal(if_getlladdr(ifp), hn->ai.addr))
1054 			continue;
1055 		if (i >= size)
1056 			break;
1057 		ether_addr_copy(addr_array[i++], hn->ai.addr);
1058 	}
1059 }
1060 
1061 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
1062 						 int list_type)
1063 {
1064 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1065 	struct mlx5e_eth_addr_hash_node *hn;
1066 	u8 (*addr_array)[ETH_ALEN] = NULL;
1067 	struct mlx5e_eth_addr_hash_head *addr_list;
1068 	struct mlx5e_eth_addr_hash_node *tmp;
1069 	int max_size;
1070 	int size;
1071 	int err;
1072 	int hi;
1073 
1074 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
1075 	max_size = is_uc ?
1076 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1077 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
1078 
1079 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1080 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
1081 		size++;
1082 
1083 	if (size > max_size) {
1084 		mlx5_en_err(priv->ifp,
1085 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
1086 			    is_uc ? "UC" : "MC", size, max_size);
1087 		size = max_size;
1088 	}
1089 
1090 	if (size) {
1091 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
1092 		if (!addr_array) {
1093 			err = -ENOMEM;
1094 			goto out;
1095 		}
1096 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
1097 	}
1098 
1099 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
1100 out:
1101 	if (err)
1102 		mlx5_en_err(priv->ifp,
1103 			   "Failed to modify vport %s list err(%d)\n",
1104 			   is_uc ? "UC" : "MC", err);
1105 	kfree(addr_array);
1106 }
1107 
1108 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
1109 {
1110 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1111 
1112 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
1113 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
1114 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
1115 				      ea->allmulti_enabled,
1116 				      ea->promisc_enabled);
1117 }
1118 
1119 static void
1120 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
1121 {
1122 	struct mlx5e_eth_addr_hash_node *hn;
1123 	struct mlx5e_eth_addr_hash_node *tmp;
1124 	int i;
1125 
1126 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1127 	    mlx5e_execute_action(priv, hn);
1128 
1129 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1130 	    mlx5e_execute_action(priv, hn);
1131 }
1132 
1133 static void
1134 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv, bool rx_mode_enable)
1135 {
1136 	struct mlx5e_eth_addr_hash_node *hn;
1137 	struct mlx5e_eth_addr_hash_node *tmp;
1138 	int i;
1139 
1140 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1141 	    hn->action = MLX5E_ACTION_DEL;
1142 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1143 	    hn->action = MLX5E_ACTION_DEL;
1144 
1145 	if (rx_mode_enable)
1146 		mlx5e_sync_ifp_addr(priv);
1147 
1148 	mlx5e_apply_ifp_addr(priv);
1149 }
1150 
1151 static void
1152 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
1153 {
1154 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1155 	if_t ndev = priv->ifp;
1156 	int ndev_flags = if_getflags(ndev);
1157 
1158 	bool promisc_enabled = rx_mode_enable && (ndev_flags & IFF_PROMISC);
1159 	bool allmulti_enabled = rx_mode_enable && (ndev_flags & IFF_ALLMULTI);
1160 	bool broadcast_enabled = rx_mode_enable;
1161 
1162 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1163 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1164 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1165 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1166 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1167 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1168 
1169 	/* update broadcast address */
1170 	ether_addr_copy(priv->eth_addr.broadcast.addr,
1171 	    if_getbroadcastaddr(priv->ifp));
1172 
1173 	if (enable_promisc) {
1174 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1175 		if (!priv->vlan.filter_disabled)
1176 			mlx5e_add_any_vid_rules(priv);
1177 	}
1178 	if (enable_allmulti)
1179 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1180 	if (enable_broadcast)
1181 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1182 
1183 	mlx5e_handle_ifp_addr(priv, rx_mode_enable);
1184 
1185 	if (disable_broadcast)
1186 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1187 	if (disable_allmulti)
1188 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1189 	if (disable_promisc) {
1190 		if (!priv->vlan.filter_disabled)
1191 			mlx5e_del_any_vid_rules(priv);
1192 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1193 	}
1194 
1195 	ea->promisc_enabled = promisc_enabled;
1196 	ea->allmulti_enabled = allmulti_enabled;
1197 	ea->broadcast_enabled = broadcast_enabled;
1198 
1199 	mlx5e_vport_context_update(priv);
1200 }
1201 
1202 void
1203 mlx5e_set_rx_mode_work(struct work_struct *work)
1204 {
1205 	struct mlx5e_priv *priv =
1206 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
1207 
1208 	PRIV_LOCK(priv);
1209 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1210 		mlx5e_set_rx_mode_core(priv, true);
1211 	PRIV_UNLOCK(priv);
1212 }
1213 
1214 static void
1215 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1216 {
1217 	int i;
1218 
1219 	for (i = ft->num_groups - 1; i >= 0; i--) {
1220 		if (!IS_ERR_OR_NULL(ft->g[i]))
1221 			mlx5_destroy_flow_group(ft->g[i]);
1222 		ft->g[i] = NULL;
1223 	}
1224 	ft->num_groups = 0;
1225 }
1226 
1227 static void
1228 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1229 {
1230 	mlx5e_destroy_groups(ft);
1231 	kfree(ft->g);
1232 	mlx5_destroy_flow_table(ft->t);
1233 	ft->t = NULL;
1234 }
1235 
1236 #define MLX5E_NUM_MAIN_GROUPS	10
1237 #define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
1238 #define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
1239 #define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
1240 #define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
1241 #define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
1242 #define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
1243 #define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
1244 #define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
1245 #define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1246 #define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1247 #define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1248 				 MLX5E_MAIN_GROUP1_SIZE +\
1249 				 MLX5E_MAIN_GROUP2_SIZE +\
1250 				 MLX5E_MAIN_GROUP3_SIZE +\
1251 				 MLX5E_MAIN_GROUP4_SIZE +\
1252 				 MLX5E_MAIN_GROUP5_SIZE +\
1253 				 MLX5E_MAIN_GROUP6_SIZE +\
1254 				 MLX5E_MAIN_GROUP7_SIZE +\
1255 				 MLX5E_MAIN_GROUP8_SIZE +\
1256 				 MLX5E_MAIN_GROUP9_SIZE +\
1257 				 0)
1258 
1259 static int
1260 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1261 				      int inlen)
1262 {
1263 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1264 	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1265 				match_criteria.outer_headers.dmac_47_16);
1266 	int err;
1267 	int ix = 0;
1268 
1269 	/* Tunnel rules need to be first in this list of groups */
1270 
1271 	/* Start tunnel rules */
1272 	memset(in, 0, inlen);
1273 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1274 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1275 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1276 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1277 	MLX5_SET_CFG(in, start_flow_index, ix);
1278 	ix += MLX5E_MAIN_GROUP0_SIZE;
1279 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1280 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1281 	if (IS_ERR(ft->g[ft->num_groups]))
1282 		goto err_destory_groups;
1283 	ft->num_groups++;
1284 	/* End Tunnel Rules */
1285 
1286 	memset(in, 0, inlen);
1287 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1288 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1289 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1290 	MLX5_SET_CFG(in, start_flow_index, ix);
1291 	ix += MLX5E_MAIN_GROUP1_SIZE;
1292 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1293 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1294 	if (IS_ERR(ft->g[ft->num_groups]))
1295 		goto err_destory_groups;
1296 	ft->num_groups++;
1297 
1298 	memset(in, 0, inlen);
1299 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1300 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1301 	MLX5_SET_CFG(in, start_flow_index, ix);
1302 	ix += MLX5E_MAIN_GROUP2_SIZE;
1303 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1304 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1305 	if (IS_ERR(ft->g[ft->num_groups]))
1306 		goto err_destory_groups;
1307 	ft->num_groups++;
1308 
1309 	memset(in, 0, inlen);
1310 	MLX5_SET_CFG(in, start_flow_index, ix);
1311 	ix += MLX5E_MAIN_GROUP3_SIZE;
1312 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1313 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1314 	if (IS_ERR(ft->g[ft->num_groups]))
1315 		goto err_destory_groups;
1316 	ft->num_groups++;
1317 
1318 	memset(in, 0, inlen);
1319 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1320 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1321 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1322 	memset(dmac, 0xff, ETH_ALEN);
1323 	MLX5_SET_CFG(in, start_flow_index, ix);
1324 	ix += MLX5E_MAIN_GROUP4_SIZE;
1325 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1326 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1327 	if (IS_ERR(ft->g[ft->num_groups]))
1328 		goto err_destory_groups;
1329 	ft->num_groups++;
1330 
1331 	memset(in, 0, inlen);
1332 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1333 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1334 	memset(dmac, 0xff, ETH_ALEN);
1335 	MLX5_SET_CFG(in, start_flow_index, ix);
1336 	ix += MLX5E_MAIN_GROUP5_SIZE;
1337 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1338 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1339 	if (IS_ERR(ft->g[ft->num_groups]))
1340 		goto err_destory_groups;
1341 	ft->num_groups++;
1342 
1343 	memset(in, 0, inlen);
1344 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1345 	memset(dmac, 0xff, ETH_ALEN);
1346 	MLX5_SET_CFG(in, start_flow_index, ix);
1347 	ix += MLX5E_MAIN_GROUP6_SIZE;
1348 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1349 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1350 	if (IS_ERR(ft->g[ft->num_groups]))
1351 		goto err_destory_groups;
1352 	ft->num_groups++;
1353 
1354 	memset(in, 0, inlen);
1355 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1356 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1357 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1358 	dmac[0] = 0x01;
1359 	MLX5_SET_CFG(in, start_flow_index, ix);
1360 	ix += MLX5E_MAIN_GROUP7_SIZE;
1361 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1362 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1363 	if (IS_ERR(ft->g[ft->num_groups]))
1364 		goto err_destory_groups;
1365 	ft->num_groups++;
1366 
1367 	memset(in, 0, inlen);
1368 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1369 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1370 	dmac[0] = 0x01;
1371 	MLX5_SET_CFG(in, start_flow_index, ix);
1372 	ix += MLX5E_MAIN_GROUP8_SIZE;
1373 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1374 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1375 	if (IS_ERR(ft->g[ft->num_groups]))
1376 		goto err_destory_groups;
1377 	ft->num_groups++;
1378 
1379 	memset(in, 0, inlen);
1380 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1381 	dmac[0] = 0x01;
1382 	MLX5_SET_CFG(in, start_flow_index, ix);
1383 	ix += MLX5E_MAIN_GROUP9_SIZE;
1384 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1385 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1386 	if (IS_ERR(ft->g[ft->num_groups]))
1387 		goto err_destory_groups;
1388 	ft->num_groups++;
1389 
1390 	return (0);
1391 
1392 err_destory_groups:
1393 	err = PTR_ERR(ft->g[ft->num_groups]);
1394 	ft->g[ft->num_groups] = NULL;
1395 	mlx5e_destroy_groups(ft);
1396 
1397 	return (err);
1398 }
1399 
1400 static int
1401 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1402 {
1403 	u32 *in;
1404 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1405 	int err;
1406 
1407 	in = mlx5_vzalloc(inlen);
1408 	if (!in)
1409 		return (-ENOMEM);
1410 
1411 	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1412 
1413 	kvfree(in);
1414 	return (err);
1415 }
1416 
1417 #define MLX5E_MAIN_VXLAN_GROUP0_SIZE	BIT(3)
1418 #define MLX5E_MAIN_VXLAN_GROUP1_SIZE	BIT(3)
1419 #define MLX5E_MAIN_VXLAN_GROUP2_SIZE	BIT(0)
1420 static int
1421 mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1422     int inlen)
1423 {
1424 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1425 	int err;
1426 	int ix = 0;
1427 
1428 	memset(in, 0, inlen);
1429 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1430 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1431 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1432 	MLX5_SET_CFG(in, start_flow_index, ix);
1433 	ix += MLX5E_MAIN_VXLAN_GROUP0_SIZE;
1434 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1435 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1436 	if (IS_ERR(ft->g[ft->num_groups]))
1437 		goto err_destory_groups;
1438 	ft->num_groups++;
1439 
1440 	memset(in, 0, inlen);
1441 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1442 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1443 	MLX5_SET_CFG(in, start_flow_index, ix);
1444 	ix += MLX5E_MAIN_VXLAN_GROUP1_SIZE;
1445 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1446 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1447 	if (IS_ERR(ft->g[ft->num_groups]))
1448 		goto err_destory_groups;
1449 	ft->num_groups++;
1450 
1451 	memset(in, 0, inlen);
1452 	MLX5_SET_CFG(in, start_flow_index, ix);
1453 	ix += MLX5E_MAIN_VXLAN_GROUP2_SIZE;
1454 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1455 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1456 	if (IS_ERR(ft->g[ft->num_groups]))
1457 		goto err_destory_groups;
1458 	ft->num_groups++;
1459 
1460 	return (0);
1461 
1462 err_destory_groups:
1463 	err = PTR_ERR(ft->g[ft->num_groups]);
1464 	ft->g[ft->num_groups] = NULL;
1465 	mlx5e_destroy_groups(ft);
1466 
1467 	return (err);
1468 }
1469 
1470 static int
1471 mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table *ft)
1472 {
1473 	u32 *in;
1474 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1475 	int err;
1476 
1477 	in = mlx5_vzalloc(inlen);
1478 	if (!in)
1479 		return (-ENOMEM);
1480 
1481 	err = mlx5e_create_main_vxlan_groups_sub(ft, in, inlen);
1482 
1483 	kvfree(in);
1484 	return (err);
1485 }
1486 
1487 
1488 static int
1489 mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
1490 {
1491 	struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
1492 	    &priv->fts.main;
1493 	struct mlx5_flow_table_attr ft_attr = {};
1494 	int err;
1495 
1496 	ft->num_groups = 0;
1497 	ft_attr.max_fte = MLX5E_MAIN_TABLE_SIZE;
1498 	if (priv->ipsec)
1499 		ft_attr.level = inner_vxlan ? 10 : 12;
1500 	else
1501 		ft_attr.level = inner_vxlan ? 2 : 4;
1502 	ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
1503 
1504 	if (IS_ERR(ft->t)) {
1505 		err = PTR_ERR(ft->t);
1506 		ft->t = NULL;
1507 		return (err);
1508 	}
1509 	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1510 	if (!ft->g) {
1511 		err = -ENOMEM;
1512 		goto err_destroy_main_flow_table;
1513 	}
1514 
1515 	err = inner_vxlan ? mlx5e_create_main_vxlan_groups(ft) :
1516 	    mlx5e_create_main_groups(ft);
1517 	if (err)
1518 		goto err_free_g;
1519 	return (0);
1520 
1521 err_free_g:
1522 	kfree(ft->g);
1523 
1524 err_destroy_main_flow_table:
1525 	mlx5_destroy_flow_table(ft->t);
1526 	ft->t = NULL;
1527 
1528 	return (err);
1529 }
1530 
1531 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1532 {
1533 	mlx5e_destroy_flow_table(&priv->fts.main);
1534 }
1535 
1536 static void mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv *priv)
1537 {
1538 	mlx5e_destroy_flow_table(&priv->fts.main_vxlan);
1539 }
1540 
1541 #define MLX5E_NUM_VLAN_GROUPS	3
1542 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1543 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1544 #define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1545 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1546 				 MLX5E_VLAN_GROUP1_SIZE +\
1547 				 MLX5E_VLAN_GROUP2_SIZE +\
1548 				 0)
1549 
1550 static int
1551 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1552 				      int inlen)
1553 {
1554 	int err;
1555 	int ix = 0;
1556 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1557 
1558 	memset(in, 0, inlen);
1559 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1560 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1561 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1562 	MLX5_SET_CFG(in, start_flow_index, ix);
1563 	ix += MLX5E_VLAN_GROUP0_SIZE;
1564 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1565 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1566 	if (IS_ERR(ft->g[ft->num_groups]))
1567 		goto err_destory_groups;
1568 	ft->num_groups++;
1569 
1570 	memset(in, 0, inlen);
1571 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1572 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1573 	MLX5_SET_CFG(in, start_flow_index, ix);
1574 	ix += MLX5E_VLAN_GROUP1_SIZE;
1575 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1576 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1577 	if (IS_ERR(ft->g[ft->num_groups]))
1578 		goto err_destory_groups;
1579 	ft->num_groups++;
1580 
1581 	memset(in, 0, inlen);
1582 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1583 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1584 	MLX5_SET_CFG(in, start_flow_index, ix);
1585 	ix += MLX5E_VLAN_GROUP2_SIZE;
1586 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1587 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1588 	if (IS_ERR(ft->g[ft->num_groups]))
1589 		goto err_destory_groups;
1590 	ft->num_groups++;
1591 
1592 	return (0);
1593 
1594 err_destory_groups:
1595 	err = PTR_ERR(ft->g[ft->num_groups]);
1596 	ft->g[ft->num_groups] = NULL;
1597 	mlx5e_destroy_groups(ft);
1598 
1599 	return (err);
1600 }
1601 
1602 static int
1603 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1604 {
1605 	u32 *in;
1606 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1607 	int err;
1608 
1609 	in = mlx5_vzalloc(inlen);
1610 	if (!in)
1611 		return (-ENOMEM);
1612 
1613 	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1614 
1615 	kvfree(in);
1616 	return (err);
1617 }
1618 
1619 static int
1620 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1621 {
1622 	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1623 	struct mlx5_flow_table_attr ft_attr = {};
1624 	int err;
1625 
1626 	ft->num_groups = 0;
1627 	ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1628 	ft_attr.level = (priv->ipsec) ? 8 : 0;
1629 	ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
1630 
1631 	if (IS_ERR(ft->t)) {
1632 		err = PTR_ERR(ft->t);
1633 		ft->t = NULL;
1634 		return (err);
1635 	}
1636 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1637 	if (!ft->g) {
1638 		err = -ENOMEM;
1639 		goto err_destroy_vlan_flow_table;
1640 	}
1641 
1642 	err = mlx5e_create_vlan_groups(ft);
1643 	if (err)
1644 		goto err_free_g;
1645 
1646 	return (0);
1647 
1648 err_free_g:
1649 	kfree(ft->g);
1650 
1651 err_destroy_vlan_flow_table:
1652 	mlx5_destroy_flow_table(ft->t);
1653 	ft->t = NULL;
1654 
1655 	return (err);
1656 }
1657 
1658 static void
1659 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1660 {
1661 	mlx5e_destroy_flow_table(&priv->fts.vlan);
1662 }
1663 
1664 static int
1665 mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
1666     struct mlx5e_vxlan_db_el *el)
1667 {
1668 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1669 	struct mlx5_flow_destination dest = {};
1670 	struct mlx5_flow_handle **rule_p;
1671 	int err = 0;
1672 	struct mlx5_flow_act flow_act = {
1673 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
1674 	};
1675 	u8 *mc;
1676 	u8 *mv;
1677 
1678 	mv = (u8 *)spec->match_value;
1679 	mc = (u8 *)spec->match_criteria;
1680 
1681 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
1682 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
1683 
1684 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1685 	dest.ft = priv->fts.main_vxlan.t;
1686 
1687 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1688 	rule_p = &el->vxlan_ft_rule;
1689 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1690 	MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
1691 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1692 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
1693 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1694 	MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
1695 
1696 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1697 
1698 	if (IS_ERR(*rule_p)) {
1699 		err = PTR_ERR(*rule_p);
1700 		*rule_p = NULL;
1701 		mlx5_en_err(priv->ifp, "add rule failed\n");
1702 	}
1703 
1704 	return (err);
1705 }
1706 
1707 static struct mlx5e_vxlan_db_el *
1708 mlx5e_vxlan_find_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1709 {
1710 	struct mlx5e_vxlan_db_el *el;
1711 
1712 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1713 		if (el->proto == proto && el->port == port)
1714 			return (el);
1715 	}
1716 	return (NULL);
1717 }
1718 
1719 static struct mlx5e_vxlan_db_el *
1720 mlx5e_vxlan_alloc_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1721 {
1722 	struct mlx5e_vxlan_db_el *el;
1723 
1724 	el = mlx5_vzalloc(sizeof(*el));
1725 	el->refcount = 1;
1726 	el->proto = proto;
1727 	el->port = port;
1728 	el->vxlan_ft_rule = NULL;
1729 	return (el);
1730 }
1731 
1732 static int
1733 mlx5e_vxlan_family_to_proto(sa_family_t family, u_int *proto)
1734 {
1735 	switch (family) {
1736 	case AF_INET:
1737 		*proto = ETHERTYPE_IP;
1738 		return (0);
1739 	case AF_INET6:
1740 		*proto = ETHERTYPE_IPV6;
1741 		return (0);
1742 	default:
1743 		return (-EINVAL);
1744 	}
1745 }
1746 
1747 static int
1748 mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
1749     struct mlx5e_vxlan_db_el *el)
1750 {
1751 	struct mlx5_flow_spec *spec;
1752 	int err;
1753 
1754 	spec = mlx5_vzalloc(sizeof(*spec));
1755 	if (!spec) {
1756 		mlx5_en_err(priv->ifp, "alloc failed\n");
1757 		err = -ENOMEM;
1758 		goto add_vxlan_rule_out;
1759 	}
1760 
1761 	err = mlx5e_add_vxlan_rule_sub(priv, spec, el);
1762 
1763 add_vxlan_rule_out:
1764 	kvfree(spec);
1765 
1766 	return (err);
1767 }
1768 
1769 static int
1770 mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1771 {
1772 	struct mlx5e_vxlan_db_el *el;
1773 	u_int proto;
1774 	int err;
1775 
1776 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1777 	if (err != 0)
1778 		return (err);
1779 
1780 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1781 	if (el != NULL) {
1782 		el->refcount++;
1783 		if (el->installed)
1784 			return (0);
1785 	}
1786 	el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
1787 
1788 	if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
1789 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1790 		if (err == 0)
1791 			el->installed = true;
1792 	}
1793 	if (err == 0)
1794 		TAILQ_INSERT_TAIL(&priv->vxlan.head, el, link);
1795 	else
1796 		kvfree(el);
1797 
1798 	return (err);
1799 }
1800 
1801 static int
1802 mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv,
1803 				  struct mlx5_flow_spec *spec)
1804 {
1805 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1806 	struct mlx5_flow_destination dest = {};
1807 	struct mlx5_flow_handle **rule_p;
1808 	int err = 0;
1809 	struct mlx5_flow_act flow_act = {
1810 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
1811 	};
1812 
1813 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
1814 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
1815 
1816 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1817 	dest.ft = priv->fts.main.t;
1818 
1819 	rule_p = &priv->fts.vxlan_catchall_ft_rule;
1820 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1821 
1822 	if (IS_ERR(*rule_p)) {
1823 		err = PTR_ERR(*rule_p);
1824 		*rule_p = NULL;
1825 		mlx5_en_err(priv->ifp, "add rule failed\n");
1826 	}
1827 
1828 	return (err);
1829 }
1830 
1831 
1832 static int
1833 mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
1834 {
1835 	struct mlx5_flow_spec *spec;
1836 	int err;
1837 
1838 	spec = mlx5_vzalloc(sizeof(*spec));
1839 	if (!spec) {
1840 		mlx5_en_err(priv->ifp, "alloc failed\n");
1841 		err = -ENOMEM;
1842 		goto add_vxlan_rule_out;
1843 	}
1844 
1845 	err = mlx5e_add_vxlan_catchall_rule_sub(priv, spec);
1846 
1847 add_vxlan_rule_out:
1848 	kvfree(spec);
1849 
1850 	return (err);
1851 }
1852 
1853 int
1854 mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv)
1855 {
1856 	struct mlx5e_vxlan_db_el *el;
1857 	int err;
1858 
1859 	err = 0;
1860 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1861 		if (el->installed)
1862 			continue;
1863 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1864 		if (err != 0)
1865 			break;
1866 		el->installed = true;
1867 	}
1868 
1869 	return (err);
1870 }
1871 
1872 static int
1873 mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1874 {
1875 	struct mlx5e_vxlan_db_el *el;
1876 	u_int proto;
1877 	int err;
1878 
1879 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1880 	if (err != 0)
1881 		return (err);
1882 
1883 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1884 	if (el == NULL)
1885 		return (0);
1886 	if (el->refcount > 1) {
1887 		el->refcount--;
1888 		return (0);
1889 	}
1890 
1891 	if (el->installed)
1892 		mlx5_del_flow_rules(&el->vxlan_ft_rule);
1893 	TAILQ_REMOVE(&priv->vxlan.head, el, link);
1894 	kvfree(el);
1895 	return (0);
1896 }
1897 
1898 void
1899 mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
1900 {
1901 	struct mlx5e_vxlan_db_el *el;
1902 
1903 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1904 		if (!el->installed)
1905 			continue;
1906 		mlx5_del_flow_rules(&el->vxlan_ft_rule);
1907 		el->installed = false;
1908 	}
1909 }
1910 
1911 static void
1912 mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
1913 {
1914 	mlx5_del_flow_rules(&priv->fts.vxlan_catchall_ft_rule);
1915 }
1916 
1917 void
1918 mlx5e_vxlan_start(void *arg, if_t ifp __unused, sa_family_t family,
1919     u_int port)
1920 {
1921 	struct mlx5e_priv *priv = arg;
1922 	int err;
1923 
1924 	PRIV_LOCK(priv);
1925 	err = mlx5_vxlan_udp_port_add(priv->mdev, port);
1926 	if (err == 0 && test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1927 		mlx5e_add_vxlan_rule(priv, family, port);
1928 	PRIV_UNLOCK(priv);
1929 }
1930 
1931 void
1932 mlx5e_vxlan_stop(void *arg, if_t ifp __unused, sa_family_t family,
1933     u_int port)
1934 {
1935 	struct mlx5e_priv *priv = arg;
1936 
1937 	PRIV_LOCK(priv);
1938 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1939 		mlx5e_del_vxlan_rule(priv, family, port);
1940 	(void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
1941 	PRIV_UNLOCK(priv);
1942 }
1943 
1944 #define	MLX5E_VXLAN_GROUP0_SIZE	BIT(3)	/* XXXKIB */
1945 #define	MLX5E_VXLAN_GROUP1_SIZE	BIT(0)
1946 #define	MLX5E_NUM_VXLAN_GROUPS	BIT(1)
1947 #define	MLX5E_VXLAN_TABLE_SIZE	\
1948     (MLX5E_VXLAN_GROUP0_SIZE + MLX5E_VXLAN_GROUP1_SIZE)
1949 
1950 static int
1951 mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1952 				      int inlen)
1953 {
1954 	int err;
1955 	int ix = 0;
1956 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1957 
1958 	memset(in, 0, inlen);
1959 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1960 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1961 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1962 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1963 	MLX5_SET_CFG(in, start_flow_index, ix);
1964 	ix += MLX5E_VXLAN_GROUP0_SIZE;
1965 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1966 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1967 	if (IS_ERR(ft->g[ft->num_groups]))
1968 		goto err_destory_groups;
1969 	ft->num_groups++;
1970 
1971 	memset(in, 0, inlen);
1972 	MLX5_SET_CFG(in, start_flow_index, ix);
1973 	ix += MLX5E_VXLAN_GROUP1_SIZE;
1974 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1975 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1976 	if (IS_ERR(ft->g[ft->num_groups]))
1977 		goto err_destory_groups;
1978 	ft->num_groups++;
1979 
1980 	return (0);
1981 
1982 err_destory_groups:
1983 	err = PTR_ERR(ft->g[ft->num_groups]);
1984 	ft->g[ft->num_groups] = NULL;
1985 	mlx5e_destroy_groups(ft);
1986 
1987 	return (err);
1988 }
1989 
1990 static int
1991 mlx5e_create_vxlan_groups(struct mlx5e_flow_table *ft)
1992 {
1993 	u32 *in;
1994 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1995 	int err;
1996 
1997 	in = mlx5_vzalloc(inlen);
1998 	if (!in)
1999 		return (-ENOMEM);
2000 
2001 	err = mlx5e_create_vxlan_groups_sub(ft, in, inlen);
2002 
2003 	kvfree(in);
2004 	return (err);
2005 }
2006 
2007 static int
2008 mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
2009 {
2010 	struct mlx5e_flow_table *ft = &priv->fts.vxlan;
2011 	struct mlx5_flow_table_attr ft_attr = {};
2012 	int err;
2013 
2014 	ft->num_groups = 0;
2015 	ft_attr.max_fte = MLX5E_VXLAN_TABLE_SIZE;
2016 	ft_attr.level = (priv->ipsec) ? 9 : 1;
2017 	ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
2018 
2019 	if (IS_ERR(ft->t)) {
2020 		err = PTR_ERR(ft->t);
2021 		ft->t = NULL;
2022 		return (err);
2023 	}
2024 	ft->g = kcalloc(MLX5E_NUM_VXLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
2025 	if (!ft->g) {
2026 		err = -ENOMEM;
2027 		goto err_destroy_vxlan_flow_table;
2028 	}
2029 
2030 	err = mlx5e_create_vxlan_groups(ft);
2031 	if (err)
2032 		goto err_free_g;
2033 
2034 	TAILQ_INIT(&priv->vxlan.head);
2035 	return (0);
2036 
2037 err_free_g:
2038 	kfree(ft->g);
2039 
2040 err_destroy_vxlan_flow_table:
2041 	mlx5_destroy_flow_table(ft->t);
2042 	ft->t = NULL;
2043 
2044 	return (err);
2045 }
2046 
2047 #define MLX5E_NUM_INNER_RSS_GROUPS	3
2048 #define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
2049 #define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
2050 #define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
2051 #define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
2052 					 MLX5E_INNER_RSS_GROUP1_SIZE +\
2053 					 MLX5E_INNER_RSS_GROUP2_SIZE +\
2054 					 0)
2055 
2056 static int
2057 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2058 					   int inlen)
2059 {
2060 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2061 	int err;
2062 	int ix = 0;
2063 
2064 	memset(in, 0, inlen);
2065 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2066 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2067 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
2068 	MLX5_SET_CFG(in, start_flow_index, ix);
2069 	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
2070 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2071 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2072 	if (IS_ERR(ft->g[ft->num_groups]))
2073 		goto err_destory_groups;
2074 	ft->num_groups++;
2075 
2076 	memset(in, 0, inlen);
2077 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2078 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2079 	MLX5_SET_CFG(in, start_flow_index, ix);
2080 	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
2081 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2082 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2083 	if (IS_ERR(ft->g[ft->num_groups]))
2084 		goto err_destory_groups;
2085 	ft->num_groups++;
2086 
2087 	memset(in, 0, inlen);
2088 	MLX5_SET_CFG(in, start_flow_index, ix);
2089 	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
2090 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2091 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2092 	if (IS_ERR(ft->g[ft->num_groups]))
2093 		goto err_destory_groups;
2094 	ft->num_groups++;
2095 
2096 	return (0);
2097 
2098 err_destory_groups:
2099 	err = PTR_ERR(ft->g[ft->num_groups]);
2100 	ft->g[ft->num_groups] = NULL;
2101 	mlx5e_destroy_groups(ft);
2102 
2103 	return (err);
2104 }
2105 
2106 static int
2107 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
2108 {
2109 	u32 *in;
2110 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2111 	int err;
2112 
2113 	in = mlx5_vzalloc(inlen);
2114 	if (!in)
2115 		return (-ENOMEM);
2116 
2117 	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
2118 
2119 	kvfree(in);
2120 	return (err);
2121 }
2122 
2123 static int
2124 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
2125 {
2126 	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
2127 	struct mlx5_flow_table_attr ft_attr = {};
2128 	int err;
2129 
2130 	ft->num_groups = 0;
2131 	ft_attr.max_fte = MLX5E_INNER_RSS_TABLE_SIZE;
2132 	ft_attr.level = (priv->ipsec) ? 11 : 3;
2133 	ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
2134 
2135 	if (IS_ERR(ft->t)) {
2136 		err = PTR_ERR(ft->t);
2137 		ft->t = NULL;
2138 		return (err);
2139 	}
2140 	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
2141 			GFP_KERNEL);
2142 	if (!ft->g) {
2143 		err = -ENOMEM;
2144 		goto err_destroy_inner_rss_flow_table;
2145 	}
2146 
2147 	err = mlx5e_create_inner_rss_groups(ft);
2148 	if (err)
2149 		goto err_free_g;
2150 
2151 	return (0);
2152 
2153 err_free_g:
2154 	kfree(ft->g);
2155 
2156 err_destroy_inner_rss_flow_table:
2157 	mlx5_destroy_flow_table(ft->t);
2158 	ft->t = NULL;
2159 
2160 	return (err);
2161 }
2162 
2163 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
2164 {
2165 	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
2166 }
2167 
2168 static void
2169 mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
2170 {
2171 	mlx5e_destroy_flow_table(&priv->fts.vxlan);
2172 }
2173 
2174 int
2175 mlx5e_open_flow_tables(struct mlx5e_priv *priv)
2176 {
2177 	int err;
2178 
2179 	/* setup namespace pointer */
2180 	priv->fts.ns = mlx5_get_flow_namespace(
2181 	    priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2182 
2183 	err = mlx5e_accel_ipsec_fs_rx_tables_create(priv);
2184 	if (err)
2185 		return err;
2186 
2187 	err = mlx5e_create_vlan_flow_table(priv);
2188 	if (err)
2189 		goto err_destroy_ipsec_flow_table;
2190 
2191 	err = mlx5e_create_vxlan_flow_table(priv);
2192 	if (err)
2193 		goto err_destroy_vlan_flow_table;
2194 
2195 	err = mlx5e_create_main_flow_table(priv, true);
2196 	if (err)
2197 		goto err_destroy_vxlan_flow_table;
2198 
2199 	err = mlx5e_create_inner_rss_flow_table(priv);
2200 	if (err)
2201 		goto err_destroy_main_flow_table_true;
2202 
2203 	err = mlx5e_create_main_flow_table(priv, false);
2204 	if (err)
2205 		goto err_destroy_inner_rss_flow_table;
2206 
2207 	err = mlx5e_add_vxlan_catchall_rule(priv);
2208 	if (err)
2209 		goto err_destroy_main_flow_table_false;
2210 
2211 	err = mlx5e_accel_ipsec_fs_rx_catchall_rules(priv);
2212 	if (err)
2213 		goto err_destroy_vxlan_catchall_rule;
2214 
2215 	err = mlx5e_accel_fs_tcp_create(priv);
2216 	if (err)
2217 		goto err_destroy_ipsec_catchall_rules;
2218 
2219 	return (0);
2220 
2221 err_destroy_ipsec_catchall_rules:
2222 	mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(priv);
2223 err_destroy_vxlan_catchall_rule:
2224 	mlx5e_del_vxlan_catchall_rule(priv);
2225 err_destroy_main_flow_table_false:
2226 	mlx5e_destroy_main_flow_table(priv);
2227 err_destroy_inner_rss_flow_table:
2228 	mlx5e_destroy_inner_rss_flow_table(priv);
2229 err_destroy_main_flow_table_true:
2230 	mlx5e_destroy_main_vxlan_flow_table(priv);
2231 err_destroy_vxlan_flow_table:
2232 	mlx5e_destroy_vxlan_flow_table(priv);
2233 err_destroy_vlan_flow_table:
2234 	mlx5e_destroy_vlan_flow_table(priv);
2235 err_destroy_ipsec_flow_table:
2236 	mlx5e_accel_ipsec_fs_rx_tables_destroy(priv);
2237 
2238 	return (err);
2239 }
2240 
2241 void
2242 mlx5e_close_flow_tables(struct mlx5e_priv *priv)
2243 {
2244 	mlx5e_accel_fs_tcp_destroy(priv);
2245 	mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(priv);
2246 	mlx5e_del_vxlan_catchall_rule(priv);
2247 	mlx5e_destroy_main_flow_table(priv);
2248 	mlx5e_destroy_inner_rss_flow_table(priv);
2249 	mlx5e_destroy_main_vxlan_flow_table(priv);
2250 	mlx5e_destroy_vxlan_flow_table(priv);
2251 	mlx5e_destroy_vlan_flow_table(priv);
2252 	mlx5e_accel_ipsec_fs_rx_tables_destroy(priv);
2253 }
2254 
2255 int
2256 mlx5e_open_flow_rules(struct mlx5e_priv *priv)
2257 {
2258 	int err;
2259 
2260 	err = mlx5e_add_all_vlan_rules(priv);
2261 	if (err)
2262 		return (err);
2263 
2264 	err = mlx5e_add_main_vxlan_rules(priv);
2265 	if (err)
2266 		goto err_del_all_vlan_rules;
2267 
2268 	err = mlx5e_add_all_vxlan_rules(priv);
2269 	if (err)
2270 		goto err_del_main_vxlan_rules;
2271 
2272 	mlx5e_set_rx_mode_core(priv, true);
2273 
2274 	set_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2275 
2276 	return (0);
2277 
2278 err_del_main_vxlan_rules:
2279 	mlx5e_del_main_vxlan_rules(priv);
2280 
2281 err_del_all_vlan_rules:
2282 	mlx5e_del_all_vlan_rules(priv);
2283 
2284 	return (err);
2285 }
2286 
2287 void
2288 mlx5e_close_flow_rules(struct mlx5e_priv *priv)
2289 {
2290 	clear_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2291 
2292 	mlx5e_set_rx_mode_core(priv, false);
2293 	mlx5e_del_all_vxlan_rules(priv);
2294 	mlx5e_del_main_vxlan_rules(priv);
2295 	mlx5e_del_all_vlan_rules(priv);
2296 }
2297