xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 036d2e814bf0f5d88ffb4b24c159320894541757)
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 
30 #include <linux/list.h>
31 #include <dev/mlx5/fs.h>
32 #include <dev/mlx5/mpfs.h>
33 
34 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
35 
36 enum {
37 	MLX5E_FULLMATCH = 0,
38 	MLX5E_ALLMULTI = 1,
39 	MLX5E_PROMISC = 2,
40 };
41 
42 enum {
43 	MLX5E_UC = 0,
44 	MLX5E_MC_IPV4 = 1,
45 	MLX5E_MC_IPV6 = 2,
46 	MLX5E_MC_OTHER = 3,
47 };
48 
49 enum {
50 	MLX5E_ACTION_NONE = 0,
51 	MLX5E_ACTION_ADD = 1,
52 	MLX5E_ACTION_DEL = 2,
53 };
54 
55 struct mlx5e_eth_addr_hash_node {
56 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
57 	u8	action;
58 	u32	mpfs_index;
59 	struct mlx5e_eth_addr_info ai;
60 };
61 
62 static inline int
63 mlx5e_hash_eth_addr(const u8 * addr)
64 {
65 	return (addr[5]);
66 }
67 
68 static bool
69 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
70     struct mlx5e_eth_addr_hash_node *hn_new)
71 {
72 	struct mlx5e_eth_addr_hash_node *hn;
73 	u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
74 
75 	LIST_FOREACH(hn, &hash[ix], hlist) {
76 		if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
77 			if (hn->action == MLX5E_ACTION_DEL)
78 				hn->action = MLX5E_ACTION_NONE;
79 			free(hn_new, M_MLX5EN);
80 			return (false);
81 		}
82 	}
83 	LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
84 	return (true);
85 }
86 
87 static void
88 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
89 {
90 	LIST_REMOVE(hn, hlist);
91 	free(hn, M_MLX5EN);
92 }
93 
94 static void
95 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
96     struct mlx5e_eth_addr_info *ai)
97 {
98 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP))
99 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
100 
101 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP))
102 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
103 
104 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH))
105 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
106 
107 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH))
108 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
109 
110 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
111 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
112 
113 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
114 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
115 
116 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
117 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
118 
119 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
120 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
121 
122 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
123 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
124 
125 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
126 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
127 
128 	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
129 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
130 
131 	/* ensure the rules are not freed again */
132 	ai->tt_vec = 0;
133 }
134 
135 static int
136 mlx5e_get_eth_addr_type(const u8 * addr)
137 {
138 	if (ETHER_IS_MULTICAST(addr) == 0)
139 		return (MLX5E_UC);
140 
141 	if ((addr[0] == 0x01) &&
142 	    (addr[1] == 0x00) &&
143 	    (addr[2] == 0x5e) &&
144 	    !(addr[3] & 0x80))
145 		return (MLX5E_MC_IPV4);
146 
147 	if ((addr[0] == 0x33) &&
148 	    (addr[1] == 0x33))
149 		return (MLX5E_MC_IPV6);
150 
151 	return (MLX5E_MC_OTHER);
152 }
153 
154 static	u32
155 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
156 {
157 	int eth_addr_type;
158 	u32 ret;
159 
160 	switch (type) {
161 	case MLX5E_FULLMATCH:
162 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
163 		switch (eth_addr_type) {
164 		case MLX5E_UC:
165 			ret =
166 			    (1 << MLX5E_TT_IPV4_TCP) |
167 			    (1 << MLX5E_TT_IPV6_TCP) |
168 			    (1 << MLX5E_TT_IPV4_UDP) |
169 			    (1 << MLX5E_TT_IPV6_UDP) |
170 			    (1 << MLX5E_TT_IPV4) |
171 			    (1 << MLX5E_TT_IPV6) |
172 			    (1 << MLX5E_TT_ANY) |
173 			    0;
174 			break;
175 
176 		case MLX5E_MC_IPV4:
177 			ret =
178 			    (1 << MLX5E_TT_IPV4_UDP) |
179 			    (1 << MLX5E_TT_IPV4) |
180 			    0;
181 			break;
182 
183 		case MLX5E_MC_IPV6:
184 			ret =
185 			    (1 << MLX5E_TT_IPV6_UDP) |
186 			    (1 << MLX5E_TT_IPV6) |
187 			    0;
188 			break;
189 
190 		default:
191 			ret =
192 			    (1 << MLX5E_TT_ANY) |
193 			    0;
194 			break;
195 		}
196 		break;
197 
198 	case MLX5E_ALLMULTI:
199 		ret =
200 		    (1 << MLX5E_TT_IPV4_UDP) |
201 		    (1 << MLX5E_TT_IPV6_UDP) |
202 		    (1 << MLX5E_TT_IPV4) |
203 		    (1 << MLX5E_TT_IPV6) |
204 		    (1 << MLX5E_TT_ANY) |
205 		    0;
206 		break;
207 
208 	default:			/* MLX5E_PROMISC */
209 		ret =
210 		    (1 << MLX5E_TT_IPV4_TCP) |
211 		    (1 << MLX5E_TT_IPV6_TCP) |
212 		    (1 << MLX5E_TT_IPV4_UDP) |
213 		    (1 << MLX5E_TT_IPV6_UDP) |
214 		    (1 << MLX5E_TT_IPV4) |
215 		    (1 << MLX5E_TT_IPV6) |
216 		    (1 << MLX5E_TT_ANY) |
217 		    0;
218 		break;
219 	}
220 
221 	return (ret);
222 }
223 
224 static int
225 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
226     struct mlx5e_eth_addr_info *ai, int type,
227     u32 *mc, u32 *mv)
228 {
229 	struct mlx5_flow_destination dest = {};
230 	u8 mc_enable = 0;
231 	struct mlx5_flow_rule **rule_p;
232 	struct mlx5_flow_table *ft = priv->fts.main.t;
233 	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
234 				   outer_headers.dmac_47_16);
235 	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
236 				   outer_headers.dmac_47_16);
237 	u32 *tirn = priv->tirn;
238 	u32 tt_vec;
239 	int err = 0;
240 
241 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
242 
243 	switch (type) {
244 	case MLX5E_FULLMATCH:
245 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
246 		memset(mc_dmac, 0xff, ETH_ALEN);
247 		ether_addr_copy(mv_dmac, ai->addr);
248 		break;
249 
250 	case MLX5E_ALLMULTI:
251 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
252 		mc_dmac[0] = 0x01;
253 		mv_dmac[0] = 0x01;
254 		break;
255 
256 	case MLX5E_PROMISC:
257 		break;
258 	default:
259 		break;
260 	}
261 
262 	tt_vec = mlx5e_get_tt_vec(ai, type);
263 
264 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
265 		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
266 		dest.tir_num = tirn[MLX5E_TT_ANY];
267 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
268 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
269 					     MLX5_FS_ETH_FLOW_TAG, &dest);
270 		if (IS_ERR_OR_NULL(*rule_p))
271 			goto err_del_ai;
272 		ai->tt_vec |= BIT(MLX5E_TT_ANY);
273 	}
274 
275 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
276 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
277 
278 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
279 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
280 		dest.tir_num = tirn[MLX5E_TT_IPV4];
281 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
282 			 ETHERTYPE_IP);
283 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
284 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
285 					     MLX5_FS_ETH_FLOW_TAG, &dest);
286 		if (IS_ERR_OR_NULL(*rule_p))
287 			goto err_del_ai;
288 		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
289 	}
290 
291 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
292 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
293 		dest.tir_num = tirn[MLX5E_TT_IPV6];
294 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
295 			 ETHERTYPE_IPV6);
296 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
297 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
298 					     MLX5_FS_ETH_FLOW_TAG, &dest);
299 		if (IS_ERR_OR_NULL(*rule_p))
300 			goto err_del_ai;
301 		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
302 	}
303 
304 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
305 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
306 
307 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
308 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
309 		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
310 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
311 			 ETHERTYPE_IP);
312 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
313 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
314 					     MLX5_FS_ETH_FLOW_TAG, &dest);
315 		if (IS_ERR_OR_NULL(*rule_p))
316 			goto err_del_ai;
317 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
318 	}
319 
320 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
321 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
322 		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
323 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
324 			 ETHERTYPE_IPV6);
325 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
326 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
327 					     MLX5_FS_ETH_FLOW_TAG, &dest);
328 		if (IS_ERR_OR_NULL(*rule_p))
329 			goto err_del_ai;
330 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
331 	}
332 
333 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
334 
335 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
336 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
337 		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
338 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
339 			 ETHERTYPE_IP);
340 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
341 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
342 					     MLX5_FS_ETH_FLOW_TAG, &dest);
343 		if (IS_ERR_OR_NULL(*rule_p))
344 			goto err_del_ai;
345 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
346 	}
347 
348 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
349 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
350 		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
351 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
352 			 ETHERTYPE_IPV6);
353 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
354 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
355 					     MLX5_FS_ETH_FLOW_TAG, &dest);
356 		if (IS_ERR_OR_NULL(*rule_p))
357 			goto err_del_ai;
358 
359 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
360 	}
361 
362 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
363 
364 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
365 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
366 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
367 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
368 			 ETHERTYPE_IP);
369 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
370 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
371 					     MLX5_FS_ETH_FLOW_TAG, &dest);
372 		if (IS_ERR_OR_NULL(*rule_p))
373 			goto err_del_ai;
374 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
375 	}
376 
377 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
378 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
379 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
380 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
381 			 ETHERTYPE_IPV6);
382 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
383 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
384 					     MLX5_FS_ETH_FLOW_TAG, &dest);
385 		if (IS_ERR_OR_NULL(*rule_p))
386 			goto err_del_ai;
387 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
388 	}
389 
390 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
391 
392 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
393 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
394 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
395 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
396 			 ETHERTYPE_IP);
397 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
398 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
399 					     MLX5_FS_ETH_FLOW_TAG, &dest);
400 		if (IS_ERR_OR_NULL(*rule_p))
401 			goto err_del_ai;
402 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
403 	}
404 
405 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
406 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
407 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
408 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
409 			 ETHERTYPE_IPV6);
410 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
411 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
412 					     MLX5_FS_ETH_FLOW_TAG, &dest);
413 		if (IS_ERR_OR_NULL(*rule_p))
414 			goto err_del_ai;
415 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
416 	}
417 
418 	return 0;
419 
420 err_del_ai:
421 	err = PTR_ERR(*rule_p);
422 	*rule_p = NULL;
423 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
424 
425 	return err;
426 }
427 
428 static int
429 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
430     struct mlx5e_eth_addr_info *ai, int type)
431 {
432 	u32 *match_criteria;
433 	u32 *match_value;
434 	int err = 0;
435 
436 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
437 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
438 	if (!match_value || !match_criteria) {
439 		mlx5_en_err(priv->ifp, "alloc failed\n");
440 		err = -ENOMEM;
441 		goto add_eth_addr_rule_out;
442 	}
443 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
444 	    match_value);
445 
446 add_eth_addr_rule_out:
447 	kvfree(match_criteria);
448 	kvfree(match_value);
449 
450 	return (err);
451 }
452 
453 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
454 {
455 	struct ifnet *ifp = priv->ifp;
456 	int max_list_size;
457 	int list_size;
458 	u16 *vlans;
459 	int vlan;
460 	int err;
461 	int i;
462 
463 	list_size = 0;
464 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
465 		list_size++;
466 
467 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
468 
469 	if (list_size > max_list_size) {
470 		mlx5_en_err(ifp,
471 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
472 			    list_size, max_list_size);
473 		list_size = max_list_size;
474 	}
475 
476 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
477 	if (!vlans)
478 		return -ENOMEM;
479 
480 	i = 0;
481 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
482 		if (i >= list_size)
483 			break;
484 		vlans[i++] = vlan;
485 	}
486 
487 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
488 	if (err)
489 		mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
490 			   err);
491 
492 	kfree(vlans);
493 	return err;
494 }
495 
496 enum mlx5e_vlan_rule_type {
497 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
498 	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
499 	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
500 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
501 };
502 
503 static int
504 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
505     enum mlx5e_vlan_rule_type rule_type, u16 vid,
506     u32 *mc, u32 *mv)
507 {
508 	struct mlx5_flow_table *ft = priv->fts.vlan.t;
509 	struct mlx5_flow_destination dest = {};
510 	u8 mc_enable = 0;
511 	struct mlx5_flow_rule **rule_p;
512 	int err = 0;
513 
514 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
515 	dest.ft = priv->fts.main.t;
516 
517 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
518 
519 	switch (rule_type) {
520 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
521 		rule_p = &priv->vlan.untagged_ft_rule;
522 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
523 		break;
524 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
525 		rule_p = &priv->vlan.any_cvlan_ft_rule;
526 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
527 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
528 		break;
529 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
530 		rule_p = &priv->vlan.any_svlan_ft_rule;
531 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
532 		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
533 		break;
534 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
535 		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
536 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
537 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
538 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
539 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
540 		mlx5e_vport_context_update_vlans(priv);
541 		break;
542 	}
543 
544 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
545 				     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
546 				     MLX5_FS_ETH_FLOW_TAG,
547 				     &dest);
548 
549 	if (IS_ERR(*rule_p)) {
550 		err = PTR_ERR(*rule_p);
551 		*rule_p = NULL;
552 		mlx5_en_err(priv->ifp, "add rule failed\n");
553 	}
554 
555 	return (err);
556 }
557 
558 static int
559 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
560     enum mlx5e_vlan_rule_type rule_type, u16 vid)
561 {
562 	u32 *match_criteria;
563 	u32 *match_value;
564 	int err = 0;
565 
566 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
567 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
568 	if (!match_value || !match_criteria) {
569 		mlx5_en_err(priv->ifp, "alloc failed\n");
570 		err = -ENOMEM;
571 		goto add_vlan_rule_out;
572 	}
573 
574 	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
575 				    match_value);
576 
577 add_vlan_rule_out:
578 	kvfree(match_criteria);
579 	kvfree(match_value);
580 
581 	return (err);
582 }
583 
584 static void
585 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
586     enum mlx5e_vlan_rule_type rule_type, u16 vid)
587 {
588 	switch (rule_type) {
589 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
590 		if (priv->vlan.untagged_ft_rule) {
591 			mlx5_del_flow_rule(priv->vlan.untagged_ft_rule);
592 			priv->vlan.untagged_ft_rule = NULL;
593 		}
594 		break;
595 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
596 		if (priv->vlan.any_cvlan_ft_rule) {
597 			mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule);
598 			priv->vlan.any_cvlan_ft_rule = NULL;
599 		}
600 		break;
601 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
602 		if (priv->vlan.any_svlan_ft_rule) {
603 			mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule);
604 			priv->vlan.any_svlan_ft_rule = NULL;
605 		}
606 		break;
607 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
608 		if (priv->vlan.active_vlans_ft_rule[vid]) {
609 			mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]);
610 			priv->vlan.active_vlans_ft_rule[vid] = NULL;
611 		}
612 		mlx5e_vport_context_update_vlans(priv);
613 		break;
614 	default:
615 		break;
616 	}
617 }
618 
619 static void
620 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
621 {
622 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
623 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
624 }
625 
626 static int
627 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
628 {
629 	int err;
630 
631 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
632 	if (err)
633 		return (err);
634 
635 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
636 	if (err)
637 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
638 
639 	return (err);
640 }
641 
642 void
643 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
644 {
645 	if (priv->vlan.filter_disabled) {
646 		priv->vlan.filter_disabled = false;
647 		if (priv->ifp->if_flags & IFF_PROMISC)
648 			return;
649 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
650 			mlx5e_del_any_vid_rules(priv);
651 	}
652 }
653 
654 void
655 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
656 {
657 	if (!priv->vlan.filter_disabled) {
658 		priv->vlan.filter_disabled = true;
659 		if (priv->ifp->if_flags & IFF_PROMISC)
660 			return;
661 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
662 			mlx5e_add_any_vid_rules(priv);
663 	}
664 }
665 
666 void
667 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
668 {
669 	struct mlx5e_priv *priv = arg;
670 
671 	if (ifp != priv->ifp)
672 		return;
673 
674 	PRIV_LOCK(priv);
675 	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
676 	    test_bit(MLX5E_STATE_OPENED, &priv->state))
677 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
678 	PRIV_UNLOCK(priv);
679 }
680 
681 void
682 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
683 {
684 	struct mlx5e_priv *priv = arg;
685 
686 	if (ifp != priv->ifp)
687 		return;
688 
689 	PRIV_LOCK(priv);
690 	clear_bit(vid, priv->vlan.active_vlans);
691 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
692 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
693 	PRIV_UNLOCK(priv);
694 }
695 
696 int
697 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
698 {
699 	int err;
700 	int i;
701 
702 	set_bit(0, priv->vlan.active_vlans);
703 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
704 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
705 					  i);
706 		if (err)
707 			goto error;
708 	}
709 
710 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
711 	if (err)
712 		goto error;
713 
714 	if (priv->vlan.filter_disabled) {
715 		err = mlx5e_add_any_vid_rules(priv);
716 		if (err)
717 			goto error;
718 	}
719 	return (0);
720 error:
721 	mlx5e_del_all_vlan_rules(priv);
722 	return (err);
723 }
724 
725 void
726 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
727 {
728 	int i;
729 
730 	if (priv->vlan.filter_disabled)
731 		mlx5e_del_any_vid_rules(priv);
732 
733 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
734 
735 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
736 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
737 	clear_bit(0, priv->vlan.active_vlans);
738 }
739 
740 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
741 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
742 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
743 
744 static void
745 mlx5e_execute_action(struct mlx5e_priv *priv,
746     struct mlx5e_eth_addr_hash_node *hn)
747 {
748 	switch (hn->action) {
749 	case MLX5E_ACTION_ADD:
750 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
751 		hn->action = MLX5E_ACTION_NONE;
752 		break;
753 
754 	case MLX5E_ACTION_DEL:
755 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
756 		if (hn->mpfs_index != -1U)
757 			mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
758 		mlx5e_del_eth_addr_from_hash(hn);
759 		break;
760 
761 	default:
762 		break;
763 	}
764 }
765 
766 static struct mlx5e_eth_addr_hash_node *
767 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
768 {
769 	struct mlx5e_eth_addr_hash_node *hn;
770 
771 	hn = LIST_FIRST(fh);
772 	if (hn != NULL) {
773 		LIST_REMOVE(hn, hlist);
774 		LIST_INSERT_HEAD(uh, hn, hlist);
775 	}
776 	return (hn);
777 }
778 
779 static struct mlx5e_eth_addr_hash_node *
780 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
781 {
782 	struct mlx5e_eth_addr_hash_node *hn;
783 
784 	hn = LIST_FIRST(fh);
785 	if (hn != NULL)
786 		LIST_REMOVE(hn, hlist);
787 	return (hn);
788 }
789 
790 static void
791 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
792 {
793 	struct mlx5e_eth_addr_hash_head head_free;
794 	struct mlx5e_eth_addr_hash_head head_uc;
795 	struct mlx5e_eth_addr_hash_head head_mc;
796 	struct mlx5e_eth_addr_hash_node *hn;
797 	struct ifnet *ifp = priv->ifp;
798 	struct ifaddr *ifa;
799 	struct ifmultiaddr *ifma;
800 	bool success = false;
801 	size_t x;
802 	size_t num;
803 
804 	PRIV_ASSERT_LOCKED(priv);
805 
806 	LIST_INIT(&head_free);
807 	LIST_INIT(&head_uc);
808 	LIST_INIT(&head_mc);
809 retry:
810 	num = 1;
811 
812 	if_addr_rlock(ifp);
813 	CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
814 		if (ifa->ifa_addr->sa_family != AF_LINK)
815 			continue;
816 		num++;
817 	}
818 	if_addr_runlock(ifp);
819 
820 	if_maddr_rlock(ifp);
821 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
822 		if (ifma->ifma_addr->sa_family != AF_LINK)
823 			continue;
824 		num++;
825 	}
826 	if_maddr_runlock(ifp);
827 
828 	/* allocate place holders */
829 	for (x = 0; x != num; x++) {
830 		hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
831 		hn->action = MLX5E_ACTION_ADD;
832 		hn->mpfs_index = -1U;
833 		LIST_INSERT_HEAD(&head_free, hn, hlist);
834 	}
835 
836 	hn = mlx5e_move_hn(&head_free, &head_uc);
837 	if (hn == NULL)
838 		goto cleanup;
839 
840 	ether_addr_copy(hn->ai.addr,
841 	    LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
842 
843 	if_addr_rlock(ifp);
844 	CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
845 		if (ifa->ifa_addr->sa_family != AF_LINK)
846 			continue;
847 		hn = mlx5e_move_hn(&head_free, &head_uc);
848 		if (hn == NULL)
849 			break;
850 		ether_addr_copy(hn->ai.addr,
851 		    LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
852 	}
853 	if_addr_runlock(ifp);
854 	if (ifa != NULL)
855 		goto cleanup;
856 
857 	if_maddr_rlock(ifp);
858 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
859 		if (ifma->ifma_addr->sa_family != AF_LINK)
860 			continue;
861 		hn = mlx5e_move_hn(&head_free, &head_mc);
862 		if (hn == NULL)
863 			break;
864 		ether_addr_copy(hn->ai.addr,
865 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
866 	}
867 	if_maddr_runlock(ifp);
868 	if (ifma != NULL)
869 		goto cleanup;
870 
871 	/* insert L2 unicast addresses into hash list */
872 
873 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
874 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
875 			continue;
876 		if (hn->mpfs_index == -1U)
877 			mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index, hn->ai.addr);
878 	}
879 
880 	/* insert L2 multicast addresses into hash list */
881 
882 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
883 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
884 			continue;
885 	}
886 
887 	success = true;
888 
889 cleanup:
890 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
891 		free(hn, M_MLX5EN);
892 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
893 		free(hn, M_MLX5EN);
894 	while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
895 		free(hn, M_MLX5EN);
896 
897 	if (success == false)
898 		goto retry;
899 }
900 
901 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
902 				  u8 addr_array[][ETH_ALEN], int size)
903 {
904 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
905 	struct ifnet *ifp = priv->ifp;
906 	struct mlx5e_eth_addr_hash_node *hn;
907 	struct mlx5e_eth_addr_hash_head *addr_list;
908 	struct mlx5e_eth_addr_hash_node *tmp;
909 	int i = 0;
910 	int hi;
911 
912 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
913 
914 	if (is_uc) /* Make sure our own address is pushed first */
915 		ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
916 	else if (priv->eth_addr.broadcast_enabled)
917 		ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
918 
919 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
920 		if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
921 			continue;
922 		if (i >= size)
923 			break;
924 		ether_addr_copy(addr_array[i++], hn->ai.addr);
925 	}
926 }
927 
928 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
929 						 int list_type)
930 {
931 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
932 	struct mlx5e_eth_addr_hash_node *hn;
933 	u8 (*addr_array)[ETH_ALEN] = NULL;
934 	struct mlx5e_eth_addr_hash_head *addr_list;
935 	struct mlx5e_eth_addr_hash_node *tmp;
936 	int max_size;
937 	int size;
938 	int err;
939 	int hi;
940 
941 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
942 	max_size = is_uc ?
943 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
944 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
945 
946 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
947 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
948 		size++;
949 
950 	if (size > max_size) {
951 		mlx5_en_err(priv->ifp,
952 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
953 			    is_uc ? "UC" : "MC", size, max_size);
954 		size = max_size;
955 	}
956 
957 	if (size) {
958 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
959 		if (!addr_array) {
960 			err = -ENOMEM;
961 			goto out;
962 		}
963 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
964 	}
965 
966 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
967 out:
968 	if (err)
969 		mlx5_en_err(priv->ifp,
970 			   "Failed to modify vport %s list err(%d)\n",
971 			   is_uc ? "UC" : "MC", err);
972 	kfree(addr_array);
973 }
974 
975 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
976 {
977 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
978 
979 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
980 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
981 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
982 				      ea->allmulti_enabled,
983 				      ea->promisc_enabled);
984 }
985 
986 static void
987 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
988 {
989 	struct mlx5e_eth_addr_hash_node *hn;
990 	struct mlx5e_eth_addr_hash_node *tmp;
991 	int i;
992 
993 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
994 	    mlx5e_execute_action(priv, hn);
995 
996 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
997 	    mlx5e_execute_action(priv, hn);
998 }
999 
1000 static void
1001 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
1002 {
1003 	struct mlx5e_eth_addr_hash_node *hn;
1004 	struct mlx5e_eth_addr_hash_node *tmp;
1005 	int i;
1006 
1007 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1008 	    hn->action = MLX5E_ACTION_DEL;
1009 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1010 	    hn->action = MLX5E_ACTION_DEL;
1011 
1012 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1013 		mlx5e_sync_ifp_addr(priv);
1014 
1015 	mlx5e_apply_ifp_addr(priv);
1016 }
1017 
1018 void
1019 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
1020 {
1021 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1022 	struct ifnet *ndev = priv->ifp;
1023 
1024 	bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
1025 	bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
1026 	bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
1027 	bool broadcast_enabled = rx_mode_enable;
1028 
1029 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1030 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1031 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1032 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1033 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1034 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1035 
1036 	/* update broadcast address */
1037 	ether_addr_copy(priv->eth_addr.broadcast.addr,
1038 	    priv->ifp->if_broadcastaddr);
1039 
1040 	if (enable_promisc) {
1041 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1042 		if (!priv->vlan.filter_disabled)
1043 			mlx5e_add_any_vid_rules(priv);
1044 	}
1045 	if (enable_allmulti)
1046 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1047 	if (enable_broadcast)
1048 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1049 
1050 	mlx5e_handle_ifp_addr(priv);
1051 
1052 	if (disable_broadcast)
1053 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1054 	if (disable_allmulti)
1055 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1056 	if (disable_promisc) {
1057 		if (!priv->vlan.filter_disabled)
1058 			mlx5e_del_any_vid_rules(priv);
1059 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1060 	}
1061 
1062 	ea->promisc_enabled = promisc_enabled;
1063 	ea->allmulti_enabled = allmulti_enabled;
1064 	ea->broadcast_enabled = broadcast_enabled;
1065 
1066 	mlx5e_vport_context_update(priv);
1067 }
1068 
1069 void
1070 mlx5e_set_rx_mode_work(struct work_struct *work)
1071 {
1072 	struct mlx5e_priv *priv =
1073 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
1074 
1075 	PRIV_LOCK(priv);
1076 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1077 		mlx5e_set_rx_mode_core(priv);
1078 	PRIV_UNLOCK(priv);
1079 }
1080 
1081 static void
1082 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1083 {
1084 	int i;
1085 
1086 	for (i = ft->num_groups - 1; i >= 0; i--) {
1087 		if (!IS_ERR_OR_NULL(ft->g[i]))
1088 			mlx5_destroy_flow_group(ft->g[i]);
1089 		ft->g[i] = NULL;
1090 	}
1091 	ft->num_groups = 0;
1092 }
1093 
1094 static void
1095 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1096 {
1097 	mlx5e_destroy_groups(ft);
1098 	kfree(ft->g);
1099 	mlx5_destroy_flow_table(ft->t);
1100 	ft->t = NULL;
1101 }
1102 
1103 #define MLX5E_NUM_MAIN_GROUPS	10
1104 #define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
1105 #define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
1106 #define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
1107 #define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
1108 #define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
1109 #define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
1110 #define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
1111 #define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
1112 #define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1113 #define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1114 #define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1115 				 MLX5E_MAIN_GROUP1_SIZE +\
1116 				 MLX5E_MAIN_GROUP2_SIZE +\
1117 				 MLX5E_MAIN_GROUP3_SIZE +\
1118 				 MLX5E_MAIN_GROUP4_SIZE +\
1119 				 MLX5E_MAIN_GROUP5_SIZE +\
1120 				 MLX5E_MAIN_GROUP6_SIZE +\
1121 				 MLX5E_MAIN_GROUP7_SIZE +\
1122 				 MLX5E_MAIN_GROUP8_SIZE +\
1123 				 MLX5E_MAIN_GROUP9_SIZE +\
1124 				 0)
1125 
1126 static int
1127 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1128 				      int inlen)
1129 {
1130 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1131 	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1132 				match_criteria.outer_headers.dmac_47_16);
1133 	int err;
1134 	int ix = 0;
1135 
1136 	/* Tunnel rules need to be first in this list of groups */
1137 
1138 	/* Start tunnel rules */
1139 	memset(in, 0, inlen);
1140 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1141 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1142 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1143 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1144 	MLX5_SET_CFG(in, start_flow_index, ix);
1145 	ix += MLX5E_MAIN_GROUP0_SIZE;
1146 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1147 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1148 	if (IS_ERR(ft->g[ft->num_groups]))
1149 		goto err_destory_groups;
1150 	ft->num_groups++;
1151 	/* End Tunnel Rules */
1152 
1153 	memset(in, 0, inlen);
1154 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1155 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1156 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1157 	MLX5_SET_CFG(in, start_flow_index, ix);
1158 	ix += MLX5E_MAIN_GROUP1_SIZE;
1159 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1160 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1161 	if (IS_ERR(ft->g[ft->num_groups]))
1162 		goto err_destory_groups;
1163 	ft->num_groups++;
1164 
1165 	memset(in, 0, inlen);
1166 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1167 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1168 	MLX5_SET_CFG(in, start_flow_index, ix);
1169 	ix += MLX5E_MAIN_GROUP2_SIZE;
1170 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1171 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1172 	if (IS_ERR(ft->g[ft->num_groups]))
1173 		goto err_destory_groups;
1174 	ft->num_groups++;
1175 
1176 	memset(in, 0, inlen);
1177 	MLX5_SET_CFG(in, start_flow_index, ix);
1178 	ix += MLX5E_MAIN_GROUP3_SIZE;
1179 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1180 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1181 	if (IS_ERR(ft->g[ft->num_groups]))
1182 		goto err_destory_groups;
1183 	ft->num_groups++;
1184 
1185 	memset(in, 0, inlen);
1186 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1187 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1188 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1189 	memset(dmac, 0xff, ETH_ALEN);
1190 	MLX5_SET_CFG(in, start_flow_index, ix);
1191 	ix += MLX5E_MAIN_GROUP4_SIZE;
1192 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1193 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1194 	if (IS_ERR(ft->g[ft->num_groups]))
1195 		goto err_destory_groups;
1196 	ft->num_groups++;
1197 
1198 	memset(in, 0, inlen);
1199 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1200 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1201 	memset(dmac, 0xff, ETH_ALEN);
1202 	MLX5_SET_CFG(in, start_flow_index, ix);
1203 	ix += MLX5E_MAIN_GROUP5_SIZE;
1204 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1205 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1206 	if (IS_ERR(ft->g[ft->num_groups]))
1207 		goto err_destory_groups;
1208 	ft->num_groups++;
1209 
1210 	memset(in, 0, inlen);
1211 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1212 	memset(dmac, 0xff, ETH_ALEN);
1213 	MLX5_SET_CFG(in, start_flow_index, ix);
1214 	ix += MLX5E_MAIN_GROUP6_SIZE;
1215 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1216 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1217 	if (IS_ERR(ft->g[ft->num_groups]))
1218 		goto err_destory_groups;
1219 	ft->num_groups++;
1220 
1221 	memset(in, 0, inlen);
1222 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1223 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1224 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1225 	dmac[0] = 0x01;
1226 	MLX5_SET_CFG(in, start_flow_index, ix);
1227 	ix += MLX5E_MAIN_GROUP7_SIZE;
1228 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1229 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1230 	if (IS_ERR(ft->g[ft->num_groups]))
1231 		goto err_destory_groups;
1232 	ft->num_groups++;
1233 
1234 	memset(in, 0, inlen);
1235 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1236 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1237 	dmac[0] = 0x01;
1238 	MLX5_SET_CFG(in, start_flow_index, ix);
1239 	ix += MLX5E_MAIN_GROUP8_SIZE;
1240 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1241 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1242 	if (IS_ERR(ft->g[ft->num_groups]))
1243 		goto err_destory_groups;
1244 	ft->num_groups++;
1245 
1246 	memset(in, 0, inlen);
1247 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1248 	dmac[0] = 0x01;
1249 	MLX5_SET_CFG(in, start_flow_index, ix);
1250 	ix += MLX5E_MAIN_GROUP9_SIZE;
1251 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1252 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1253 	if (IS_ERR(ft->g[ft->num_groups]))
1254 		goto err_destory_groups;
1255 	ft->num_groups++;
1256 
1257 	return (0);
1258 
1259 err_destory_groups:
1260 	err = PTR_ERR(ft->g[ft->num_groups]);
1261 	ft->g[ft->num_groups] = NULL;
1262 	mlx5e_destroy_groups(ft);
1263 
1264 	return (err);
1265 }
1266 
1267 static int
1268 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1269 {
1270 	u32 *in;
1271 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1272 	int err;
1273 
1274 	in = mlx5_vzalloc(inlen);
1275 	if (!in)
1276 		return (-ENOMEM);
1277 
1278 	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1279 
1280 	kvfree(in);
1281 	return (err);
1282 }
1283 
1284 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1285 {
1286 	struct mlx5e_flow_table *ft = &priv->fts.main;
1287 	int err;
1288 
1289 	ft->num_groups = 0;
1290 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main",
1291 				       MLX5E_MAIN_TABLE_SIZE);
1292 
1293 	if (IS_ERR(ft->t)) {
1294 		err = PTR_ERR(ft->t);
1295 		ft->t = NULL;
1296 		return (err);
1297 	}
1298 	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1299 	if (!ft->g) {
1300 		err = -ENOMEM;
1301 		goto err_destroy_main_flow_table;
1302 	}
1303 
1304 	err = mlx5e_create_main_groups(ft);
1305 	if (err)
1306 		goto err_free_g;
1307 	return (0);
1308 
1309 err_free_g:
1310 	kfree(ft->g);
1311 
1312 err_destroy_main_flow_table:
1313 	mlx5_destroy_flow_table(ft->t);
1314 	ft->t = NULL;
1315 
1316 	return (err);
1317 }
1318 
1319 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1320 {
1321 	mlx5e_destroy_flow_table(&priv->fts.main);
1322 }
1323 
1324 #define MLX5E_NUM_VLAN_GROUPS	3
1325 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1326 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1327 #define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1328 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1329 				 MLX5E_VLAN_GROUP1_SIZE +\
1330 				 MLX5E_VLAN_GROUP2_SIZE +\
1331 				 0)
1332 
1333 static int
1334 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1335 				      int inlen)
1336 {
1337 	int err;
1338 	int ix = 0;
1339 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1340 
1341 	memset(in, 0, inlen);
1342 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1343 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1344 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1345 	MLX5_SET_CFG(in, start_flow_index, ix);
1346 	ix += MLX5E_VLAN_GROUP0_SIZE;
1347 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1348 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1349 	if (IS_ERR(ft->g[ft->num_groups]))
1350 		goto err_destory_groups;
1351 	ft->num_groups++;
1352 
1353 	memset(in, 0, inlen);
1354 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1355 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1356 	MLX5_SET_CFG(in, start_flow_index, ix);
1357 	ix += MLX5E_VLAN_GROUP1_SIZE;
1358 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1359 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1360 	if (IS_ERR(ft->g[ft->num_groups]))
1361 		goto err_destory_groups;
1362 	ft->num_groups++;
1363 
1364 	memset(in, 0, inlen);
1365 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1366 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1367 	MLX5_SET_CFG(in, start_flow_index, ix);
1368 	ix += MLX5E_VLAN_GROUP2_SIZE;
1369 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1370 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1371 	if (IS_ERR(ft->g[ft->num_groups]))
1372 		goto err_destory_groups;
1373 	ft->num_groups++;
1374 
1375 	return (0);
1376 
1377 err_destory_groups:
1378 	err = PTR_ERR(ft->g[ft->num_groups]);
1379 	ft->g[ft->num_groups] = NULL;
1380 	mlx5e_destroy_groups(ft);
1381 
1382 	return (err);
1383 }
1384 
1385 static int
1386 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1387 {
1388 	u32 *in;
1389 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1390 	int err;
1391 
1392 	in = mlx5_vzalloc(inlen);
1393 	if (!in)
1394 		return (-ENOMEM);
1395 
1396 	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1397 
1398 	kvfree(in);
1399 	return (err);
1400 }
1401 
1402 static int
1403 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1404 {
1405 	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1406 	int err;
1407 
1408 	ft->num_groups = 0;
1409 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1410 				       MLX5E_VLAN_TABLE_SIZE);
1411 
1412 	if (IS_ERR(ft->t)) {
1413 		err = PTR_ERR(ft->t);
1414 		ft->t = NULL;
1415 		return (err);
1416 	}
1417 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1418 	if (!ft->g) {
1419 		err = -ENOMEM;
1420 		goto err_destroy_vlan_flow_table;
1421 	}
1422 
1423 	err = mlx5e_create_vlan_groups(ft);
1424 	if (err)
1425 		goto err_free_g;
1426 
1427 	return (0);
1428 
1429 err_free_g:
1430 	kfree(ft->g);
1431 
1432 err_destroy_vlan_flow_table:
1433 	mlx5_destroy_flow_table(ft->t);
1434 	ft->t = NULL;
1435 
1436 	return (err);
1437 }
1438 
1439 static void
1440 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1441 {
1442 	mlx5e_destroy_flow_table(&priv->fts.vlan);
1443 }
1444 
1445 #define MLX5E_NUM_INNER_RSS_GROUPS	3
1446 #define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
1447 #define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
1448 #define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
1449 #define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
1450 					 MLX5E_INNER_RSS_GROUP1_SIZE +\
1451 					 MLX5E_INNER_RSS_GROUP2_SIZE +\
1452 					 0)
1453 
1454 static int
1455 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1456 					   int inlen)
1457 {
1458 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1459 	int err;
1460 	int ix = 0;
1461 
1462 	memset(in, 0, inlen);
1463 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1464 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1465 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1466 	MLX5_SET_CFG(in, start_flow_index, ix);
1467 	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
1468 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1469 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1470 	if (IS_ERR(ft->g[ft->num_groups]))
1471 		goto err_destory_groups;
1472 	ft->num_groups++;
1473 
1474 	memset(in, 0, inlen);
1475 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1476 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1477 	MLX5_SET_CFG(in, start_flow_index, ix);
1478 	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
1479 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1480 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1481 	if (IS_ERR(ft->g[ft->num_groups]))
1482 		goto err_destory_groups;
1483 	ft->num_groups++;
1484 
1485 	memset(in, 0, inlen);
1486 	MLX5_SET_CFG(in, start_flow_index, ix);
1487 	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
1488 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1489 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1490 	if (IS_ERR(ft->g[ft->num_groups]))
1491 		goto err_destory_groups;
1492 	ft->num_groups++;
1493 
1494 	return (0);
1495 
1496 err_destory_groups:
1497 	err = PTR_ERR(ft->g[ft->num_groups]);
1498 	ft->g[ft->num_groups] = NULL;
1499 	mlx5e_destroy_groups(ft);
1500 
1501 	return (err);
1502 }
1503 
1504 static int
1505 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
1506 {
1507 	u32 *in;
1508 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1509 	int err;
1510 
1511 	in = mlx5_vzalloc(inlen);
1512 	if (!in)
1513 		return (-ENOMEM);
1514 
1515 	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
1516 
1517 	kvfree(in);
1518 	return (err);
1519 }
1520 
1521 static int
1522 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
1523 {
1524 	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
1525 	int err;
1526 
1527 	ft->num_groups = 0;
1528 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
1529 				       MLX5E_INNER_RSS_TABLE_SIZE);
1530 
1531 	if (IS_ERR(ft->t)) {
1532 		err = PTR_ERR(ft->t);
1533 		ft->t = NULL;
1534 		return (err);
1535 	}
1536 	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
1537 			GFP_KERNEL);
1538 	if (!ft->g) {
1539 		err = -ENOMEM;
1540 		goto err_destroy_inner_rss_flow_table;
1541 	}
1542 
1543 	err = mlx5e_create_inner_rss_groups(ft);
1544 	if (err)
1545 		goto err_free_g;
1546 
1547 	return (0);
1548 
1549 err_free_g:
1550 	kfree(ft->g);
1551 
1552 err_destroy_inner_rss_flow_table:
1553 	mlx5_destroy_flow_table(ft->t);
1554 	ft->t = NULL;
1555 
1556 	return (err);
1557 }
1558 
1559 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
1560 {
1561 	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
1562 }
1563 
1564 int
1565 mlx5e_open_flow_table(struct mlx5e_priv *priv)
1566 {
1567 	int err;
1568 
1569 	priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1570 					       MLX5_FLOW_NAMESPACE_KERNEL);
1571 
1572 	err = mlx5e_create_vlan_flow_table(priv);
1573 	if (err)
1574 		return (err);
1575 
1576 	err = mlx5e_create_main_flow_table(priv);
1577 	if (err)
1578 		goto err_destroy_vlan_flow_table;
1579 
1580 	err = mlx5e_create_inner_rss_flow_table(priv);
1581 	if (err)
1582 		goto err_destroy_main_flow_table;
1583 
1584 	return (0);
1585 
1586 err_destroy_main_flow_table:
1587 	mlx5e_destroy_main_flow_table(priv);
1588 err_destroy_vlan_flow_table:
1589 	mlx5e_destroy_vlan_flow_table(priv);
1590 
1591 	return (err);
1592 }
1593 
1594 void
1595 mlx5e_close_flow_table(struct mlx5e_priv *priv)
1596 {
1597 
1598 	mlx5e_handle_ifp_addr(priv);
1599 	mlx5e_destroy_inner_rss_flow_table(priv);
1600 	mlx5e_destroy_main_flow_table(priv);
1601 	mlx5e_destroy_vlan_flow_table(priv);
1602 }
1603