xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 86aa9539fef591a363b06a0ebd3aa7a07f4c1579)
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 
30 #include <linux/list.h>
31 #include <dev/mlx5/fs.h>
32 
33 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
34 
35 enum {
36 	MLX5E_FULLMATCH = 0,
37 	MLX5E_ALLMULTI = 1,
38 	MLX5E_PROMISC = 2,
39 };
40 
41 enum {
42 	MLX5E_UC = 0,
43 	MLX5E_MC_IPV4 = 1,
44 	MLX5E_MC_IPV6 = 2,
45 	MLX5E_MC_OTHER = 3,
46 };
47 
48 enum {
49 	MLX5E_ACTION_NONE = 0,
50 	MLX5E_ACTION_ADD = 1,
51 	MLX5E_ACTION_DEL = 2,
52 };
53 
54 struct mlx5e_eth_addr_hash_node {
55 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
56 	u8	action;
57 	struct mlx5e_eth_addr_info ai;
58 };
59 
60 static inline int
61 mlx5e_hash_eth_addr(const u8 * addr)
62 {
63 	return (addr[5]);
64 }
65 
66 static void
67 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
68     const u8 * addr)
69 {
70 	struct mlx5e_eth_addr_hash_node *hn;
71 	int ix = mlx5e_hash_eth_addr(addr);
72 
73 	LIST_FOREACH(hn, &hash[ix], hlist) {
74 		if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
75 			if (hn->action == MLX5E_ACTION_DEL)
76 				hn->action = MLX5E_ACTION_NONE;
77 			return;
78 		}
79 	}
80 
81 	hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
82 	if (hn == NULL)
83 		return;
84 
85 	ether_addr_copy(hn->ai.addr, addr);
86 	hn->action = MLX5E_ACTION_ADD;
87 
88 	LIST_INSERT_HEAD(&hash[ix], hn, hlist);
89 }
90 
91 static void
92 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
93 {
94 	LIST_REMOVE(hn, hlist);
95 	free(hn, M_MLX5EN);
96 }
97 
98 static void
99 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
100     struct mlx5e_eth_addr_info *ai)
101 {
102 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP))
103 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
104 
105 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP))
106 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
107 
108 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH))
109 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
110 
111 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH))
112 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
113 
114 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
115 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
116 
117 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
118 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
119 
120 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
121 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
122 
123 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
124 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
125 
126 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
127 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
128 
129 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
130 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
131 
132 	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
133 		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
134 
135 	/* ensure the rules are not freed again */
136 	ai->tt_vec = 0;
137 }
138 
139 static int
140 mlx5e_get_eth_addr_type(const u8 * addr)
141 {
142 	if (ETHER_IS_MULTICAST(addr) == 0)
143 		return (MLX5E_UC);
144 
145 	if ((addr[0] == 0x01) &&
146 	    (addr[1] == 0x00) &&
147 	    (addr[2] == 0x5e) &&
148 	    !(addr[3] & 0x80))
149 		return (MLX5E_MC_IPV4);
150 
151 	if ((addr[0] == 0x33) &&
152 	    (addr[1] == 0x33))
153 		return (MLX5E_MC_IPV6);
154 
155 	return (MLX5E_MC_OTHER);
156 }
157 
158 static	u32
159 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
160 {
161 	int eth_addr_type;
162 	u32 ret;
163 
164 	switch (type) {
165 	case MLX5E_FULLMATCH:
166 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
167 		switch (eth_addr_type) {
168 		case MLX5E_UC:
169 			ret =
170 			    (1 << MLX5E_TT_IPV4_TCP) |
171 			    (1 << MLX5E_TT_IPV6_TCP) |
172 			    (1 << MLX5E_TT_IPV4_UDP) |
173 			    (1 << MLX5E_TT_IPV6_UDP) |
174 			    (1 << MLX5E_TT_IPV4) |
175 			    (1 << MLX5E_TT_IPV6) |
176 			    (1 << MLX5E_TT_ANY) |
177 			    0;
178 			break;
179 
180 		case MLX5E_MC_IPV4:
181 			ret =
182 			    (1 << MLX5E_TT_IPV4_UDP) |
183 			    (1 << MLX5E_TT_IPV4) |
184 			    0;
185 			break;
186 
187 		case MLX5E_MC_IPV6:
188 			ret =
189 			    (1 << MLX5E_TT_IPV6_UDP) |
190 			    (1 << MLX5E_TT_IPV6) |
191 			    0;
192 			break;
193 
194 		default:
195 			ret =
196 			    (1 << MLX5E_TT_ANY) |
197 			    0;
198 			break;
199 		}
200 		break;
201 
202 	case MLX5E_ALLMULTI:
203 		ret =
204 		    (1 << MLX5E_TT_IPV4_UDP) |
205 		    (1 << MLX5E_TT_IPV6_UDP) |
206 		    (1 << MLX5E_TT_IPV4) |
207 		    (1 << MLX5E_TT_IPV6) |
208 		    (1 << MLX5E_TT_ANY) |
209 		    0;
210 		break;
211 
212 	default:			/* MLX5E_PROMISC */
213 		ret =
214 		    (1 << MLX5E_TT_IPV4_TCP) |
215 		    (1 << MLX5E_TT_IPV6_TCP) |
216 		    (1 << MLX5E_TT_IPV4_UDP) |
217 		    (1 << MLX5E_TT_IPV6_UDP) |
218 		    (1 << MLX5E_TT_IPV4) |
219 		    (1 << MLX5E_TT_IPV6) |
220 		    (1 << MLX5E_TT_ANY) |
221 		    0;
222 		break;
223 	}
224 
225 	return (ret);
226 }
227 
228 static int
229 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
230     struct mlx5e_eth_addr_info *ai, int type,
231     u32 *mc, u32 *mv)
232 {
233 	struct mlx5_flow_destination dest = {};
234 	u8 mc_enable = 0;
235 	struct mlx5_flow_rule **rule_p;
236 	struct mlx5_flow_table *ft = priv->fts.main.t;
237 	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
238 				   outer_headers.dmac_47_16);
239 	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
240 				   outer_headers.dmac_47_16);
241 	u32 *tirn = priv->tirn;
242 	u32 tt_vec;
243 	int err = 0;
244 
245 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
246 
247 	switch (type) {
248 	case MLX5E_FULLMATCH:
249 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
250 		memset(mc_dmac, 0xff, ETH_ALEN);
251 		ether_addr_copy(mv_dmac, ai->addr);
252 		break;
253 
254 	case MLX5E_ALLMULTI:
255 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
256 		mc_dmac[0] = 0x01;
257 		mv_dmac[0] = 0x01;
258 		break;
259 
260 	case MLX5E_PROMISC:
261 		break;
262 	default:
263 		break;
264 	}
265 
266 	tt_vec = mlx5e_get_tt_vec(ai, type);
267 
268 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
269 		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
270 		dest.tir_num = tirn[MLX5E_TT_ANY];
271 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
272 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
273 					     MLX5_FS_ETH_FLOW_TAG, &dest);
274 		if (IS_ERR_OR_NULL(*rule_p))
275 			goto err_del_ai;
276 		ai->tt_vec |= BIT(MLX5E_TT_ANY);
277 	}
278 
279 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
280 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
281 
282 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
283 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
284 		dest.tir_num = tirn[MLX5E_TT_IPV4];
285 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
286 			 ETHERTYPE_IP);
287 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
288 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
289 					     MLX5_FS_ETH_FLOW_TAG, &dest);
290 		if (IS_ERR_OR_NULL(*rule_p))
291 			goto err_del_ai;
292 		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
293 	}
294 
295 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
296 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
297 		dest.tir_num = tirn[MLX5E_TT_IPV6];
298 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
299 			 ETHERTYPE_IPV6);
300 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
301 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
302 					     MLX5_FS_ETH_FLOW_TAG, &dest);
303 		if (IS_ERR_OR_NULL(*rule_p))
304 			goto err_del_ai;
305 		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
306 	}
307 
308 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
309 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
310 
311 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
312 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
313 		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
314 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
315 			 ETHERTYPE_IP);
316 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
317 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
318 					     MLX5_FS_ETH_FLOW_TAG, &dest);
319 		if (IS_ERR_OR_NULL(*rule_p))
320 			goto err_del_ai;
321 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
322 	}
323 
324 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
325 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
326 		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
327 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
328 			 ETHERTYPE_IPV6);
329 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
330 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
331 					     MLX5_FS_ETH_FLOW_TAG, &dest);
332 		if (IS_ERR_OR_NULL(*rule_p))
333 			goto err_del_ai;
334 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
335 	}
336 
337 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
338 
339 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
340 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
341 		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
342 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
343 			 ETHERTYPE_IP);
344 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
345 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
346 					     MLX5_FS_ETH_FLOW_TAG, &dest);
347 		if (IS_ERR_OR_NULL(*rule_p))
348 			goto err_del_ai;
349 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
350 	}
351 
352 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
353 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
354 		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
355 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
356 			 ETHERTYPE_IPV6);
357 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
358 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
359 					     MLX5_FS_ETH_FLOW_TAG, &dest);
360 		if (IS_ERR_OR_NULL(*rule_p))
361 			goto err_del_ai;
362 
363 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
364 	}
365 
366 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
367 
368 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
369 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
370 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
371 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
372 			 ETHERTYPE_IP);
373 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
374 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
375 					     MLX5_FS_ETH_FLOW_TAG, &dest);
376 		if (IS_ERR_OR_NULL(*rule_p))
377 			goto err_del_ai;
378 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
379 	}
380 
381 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
382 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
383 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
384 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
385 			 ETHERTYPE_IPV6);
386 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
387 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
388 					     MLX5_FS_ETH_FLOW_TAG, &dest);
389 		if (IS_ERR_OR_NULL(*rule_p))
390 			goto err_del_ai;
391 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
392 	}
393 
394 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
395 
396 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
397 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
398 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
399 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
400 			 ETHERTYPE_IP);
401 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
402 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
403 					     MLX5_FS_ETH_FLOW_TAG, &dest);
404 		if (IS_ERR_OR_NULL(*rule_p))
405 			goto err_del_ai;
406 		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
407 	}
408 
409 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
410 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
411 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
412 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
413 			 ETHERTYPE_IPV6);
414 		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
415 					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
416 					     MLX5_FS_ETH_FLOW_TAG, &dest);
417 		if (IS_ERR_OR_NULL(*rule_p))
418 			goto err_del_ai;
419 		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
420 	}
421 
422 	return 0;
423 
424 err_del_ai:
425 	err = PTR_ERR(*rule_p);
426 	*rule_p = NULL;
427 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
428 
429 	return err;
430 }
431 
432 static int
433 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
434     struct mlx5e_eth_addr_info *ai, int type)
435 {
436 	u32 *match_criteria;
437 	u32 *match_value;
438 	int err = 0;
439 
440 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
441 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
442 	if (!match_value || !match_criteria) {
443 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
444 		err = -ENOMEM;
445 		goto add_eth_addr_rule_out;
446 	}
447 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
448 	    match_value);
449 
450 add_eth_addr_rule_out:
451 	kvfree(match_criteria);
452 	kvfree(match_value);
453 
454 	return (err);
455 }
456 
457 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
458 {
459 	struct ifnet *ifp = priv->ifp;
460 	int max_list_size;
461 	int list_size;
462 	u16 *vlans;
463 	int vlan;
464 	int err;
465 	int i;
466 
467 	list_size = 0;
468 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
469 		list_size++;
470 
471 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
472 
473 	if (list_size > max_list_size) {
474 		if_printf(ifp,
475 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
476 			    list_size, max_list_size);
477 		list_size = max_list_size;
478 	}
479 
480 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
481 	if (!vlans)
482 		return -ENOMEM;
483 
484 	i = 0;
485 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
486 		if (i >= list_size)
487 			break;
488 		vlans[i++] = vlan;
489 	}
490 
491 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
492 	if (err)
493 		if_printf(ifp, "Failed to modify vport vlans list err(%d)\n",
494 			   err);
495 
496 	kfree(vlans);
497 	return err;
498 }
499 
500 enum mlx5e_vlan_rule_type {
501 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
502 	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
503 	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
504 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
505 };
506 
507 static int
508 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
509     enum mlx5e_vlan_rule_type rule_type, u16 vid,
510     u32 *mc, u32 *mv)
511 {
512 	struct mlx5_flow_table *ft = priv->fts.vlan.t;
513 	struct mlx5_flow_destination dest = {};
514 	u8 mc_enable = 0;
515 	struct mlx5_flow_rule **rule_p;
516 	int err = 0;
517 
518 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
519 	dest.ft = priv->fts.main.t;
520 
521 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
522 
523 	switch (rule_type) {
524 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
525 		rule_p = &priv->vlan.untagged_ft_rule;
526 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
527 		break;
528 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
529 		rule_p = &priv->vlan.any_cvlan_ft_rule;
530 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
531 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
532 		break;
533 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
534 		rule_p = &priv->vlan.any_svlan_ft_rule;
535 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
536 		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
537 		break;
538 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
539 		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
540 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
541 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
542 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
543 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
544 		mlx5e_vport_context_update_vlans(priv);
545 		break;
546 	}
547 
548 	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
549 				     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
550 				     MLX5_FS_ETH_FLOW_TAG,
551 				     &dest);
552 
553 	if (IS_ERR(*rule_p)) {
554 		err = PTR_ERR(*rule_p);
555 		*rule_p = NULL;
556 		if_printf(priv->ifp, "%s: add rule failed\n", __func__);
557 	}
558 
559 	return (err);
560 }
561 
562 static int
563 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
564     enum mlx5e_vlan_rule_type rule_type, u16 vid)
565 {
566 	u32 *match_criteria;
567 	u32 *match_value;
568 	int err = 0;
569 
570 	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
571 	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
572 	if (!match_value || !match_criteria) {
573 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
574 		err = -ENOMEM;
575 		goto add_vlan_rule_out;
576 	}
577 
578 	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
579 				    match_value);
580 
581 add_vlan_rule_out:
582 	kvfree(match_criteria);
583 	kvfree(match_value);
584 
585 	return (err);
586 }
587 
588 static void
589 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
590     enum mlx5e_vlan_rule_type rule_type, u16 vid)
591 {
592 	switch (rule_type) {
593 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
594 		if (priv->vlan.untagged_ft_rule) {
595 			mlx5_del_flow_rule(priv->vlan.untagged_ft_rule);
596 			priv->vlan.untagged_ft_rule = NULL;
597 		}
598 		break;
599 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
600 		if (priv->vlan.any_cvlan_ft_rule) {
601 			mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule);
602 			priv->vlan.any_cvlan_ft_rule = NULL;
603 		}
604 		break;
605 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
606 		if (priv->vlan.any_svlan_ft_rule) {
607 			mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule);
608 			priv->vlan.any_svlan_ft_rule = NULL;
609 		}
610 		break;
611 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
612 		if (priv->vlan.active_vlans_ft_rule[vid]) {
613 			mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]);
614 			priv->vlan.active_vlans_ft_rule[vid] = NULL;
615 		}
616 		mlx5e_vport_context_update_vlans(priv);
617 		break;
618 	default:
619 		break;
620 	}
621 }
622 
623 static void
624 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
625 {
626 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
627 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
628 }
629 
630 static int
631 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
632 {
633 	int err;
634 
635 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
636 	if (err)
637 		return (err);
638 
639 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
640 	if (err)
641 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
642 
643 	return (err);
644 }
645 
646 void
647 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
648 {
649 	if (priv->vlan.filter_disabled) {
650 		priv->vlan.filter_disabled = false;
651 		if (priv->ifp->if_flags & IFF_PROMISC)
652 			return;
653 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
654 			mlx5e_del_any_vid_rules(priv);
655 	}
656 }
657 
658 void
659 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
660 {
661 	if (!priv->vlan.filter_disabled) {
662 		priv->vlan.filter_disabled = true;
663 		if (priv->ifp->if_flags & IFF_PROMISC)
664 			return;
665 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
666 			mlx5e_add_any_vid_rules(priv);
667 	}
668 }
669 
670 void
671 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
672 {
673 	struct mlx5e_priv *priv = arg;
674 
675 	if (ifp != priv->ifp)
676 		return;
677 
678 	PRIV_LOCK(priv);
679 	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
680 	    test_bit(MLX5E_STATE_OPENED, &priv->state))
681 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
682 	PRIV_UNLOCK(priv);
683 }
684 
685 void
686 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
687 {
688 	struct mlx5e_priv *priv = arg;
689 
690 	if (ifp != priv->ifp)
691 		return;
692 
693 	PRIV_LOCK(priv);
694 	clear_bit(vid, priv->vlan.active_vlans);
695 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
696 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
697 	PRIV_UNLOCK(priv);
698 }
699 
700 int
701 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
702 {
703 	int err;
704 	int i;
705 
706 	set_bit(0, priv->vlan.active_vlans);
707 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
708 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
709 					  i);
710 		if (err)
711 			goto error;
712 	}
713 
714 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
715 	if (err)
716 		goto error;
717 
718 	if (priv->vlan.filter_disabled) {
719 		err = mlx5e_add_any_vid_rules(priv);
720 		if (err)
721 			goto error;
722 	}
723 	return (0);
724 error:
725 	mlx5e_del_all_vlan_rules(priv);
726 	return (err);
727 }
728 
729 void
730 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
731 {
732 	int i;
733 
734 	if (priv->vlan.filter_disabled)
735 		mlx5e_del_any_vid_rules(priv);
736 
737 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
738 
739 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
740 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
741 	clear_bit(0, priv->vlan.active_vlans);
742 }
743 
744 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
745 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
746 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
747 
748 static void
749 mlx5e_execute_action(struct mlx5e_priv *priv,
750     struct mlx5e_eth_addr_hash_node *hn)
751 {
752 	switch (hn->action) {
753 	case MLX5E_ACTION_ADD:
754 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
755 		hn->action = MLX5E_ACTION_NONE;
756 		break;
757 
758 	case MLX5E_ACTION_DEL:
759 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
760 		mlx5e_del_eth_addr_from_hash(hn);
761 		break;
762 
763 	default:
764 		break;
765 	}
766 }
767 
768 static void
769 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
770 {
771 	struct ifnet *ifp = priv->ifp;
772 	struct ifaddr *ifa;
773 	struct ifmultiaddr *ifma;
774 
775 	/* XXX adding this entry might not be needed */
776 	mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
777 	    LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
778 
779 	if_addr_rlock(ifp);
780 	CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
781 		if (ifa->ifa_addr->sa_family != AF_LINK)
782 			continue;
783 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
784 		    LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
785 	}
786 	if_addr_runlock(ifp);
787 
788 	if_maddr_rlock(ifp);
789 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
790 		if (ifma->ifma_addr->sa_family != AF_LINK)
791 			continue;
792 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
793 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
794 	}
795 	if_maddr_runlock(ifp);
796 }
797 
798 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
799 				  u8 addr_array[][ETH_ALEN], int size)
800 {
801 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
802 	struct ifnet *ifp = priv->ifp;
803 	struct mlx5e_eth_addr_hash_node *hn;
804 	struct mlx5e_eth_addr_hash_head *addr_list;
805 	struct mlx5e_eth_addr_hash_node *tmp;
806 	int i = 0;
807 	int hi;
808 
809 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
810 
811 	if (is_uc) /* Make sure our own address is pushed first */
812 		ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
813 	else if (priv->eth_addr.broadcast_enabled)
814 		ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
815 
816 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
817 		if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
818 			continue;
819 		if (i >= size)
820 			break;
821 		ether_addr_copy(addr_array[i++], hn->ai.addr);
822 	}
823 }
824 
825 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
826 						 int list_type)
827 {
828 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
829 	struct mlx5e_eth_addr_hash_node *hn;
830 	u8 (*addr_array)[ETH_ALEN] = NULL;
831 	struct mlx5e_eth_addr_hash_head *addr_list;
832 	struct mlx5e_eth_addr_hash_node *tmp;
833 	int max_size;
834 	int size;
835 	int err;
836 	int hi;
837 
838 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
839 	max_size = is_uc ?
840 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
841 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
842 
843 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
844 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
845 		size++;
846 
847 	if (size > max_size) {
848 		if_printf(priv->ifp,
849 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
850 			    is_uc ? "UC" : "MC", size, max_size);
851 		size = max_size;
852 	}
853 
854 	if (size) {
855 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
856 		if (!addr_array) {
857 			err = -ENOMEM;
858 			goto out;
859 		}
860 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
861 	}
862 
863 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
864 out:
865 	if (err)
866 		if_printf(priv->ifp,
867 			   "Failed to modify vport %s list err(%d)\n",
868 			   is_uc ? "UC" : "MC", err);
869 	kfree(addr_array);
870 }
871 
872 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
873 {
874 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
875 
876 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
877 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
878 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
879 				      ea->allmulti_enabled,
880 				      ea->promisc_enabled);
881 }
882 
883 static void
884 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
885 {
886 	struct mlx5e_eth_addr_hash_node *hn;
887 	struct mlx5e_eth_addr_hash_node *tmp;
888 	int i;
889 
890 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
891 	    mlx5e_execute_action(priv, hn);
892 
893 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
894 	    mlx5e_execute_action(priv, hn);
895 }
896 
897 static void
898 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
899 {
900 	struct mlx5e_eth_addr_hash_node *hn;
901 	struct mlx5e_eth_addr_hash_node *tmp;
902 	int i;
903 
904 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
905 	    hn->action = MLX5E_ACTION_DEL;
906 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
907 	    hn->action = MLX5E_ACTION_DEL;
908 
909 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
910 		mlx5e_sync_ifp_addr(priv);
911 
912 	mlx5e_apply_ifp_addr(priv);
913 }
914 
915 void
916 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
917 {
918 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
919 	struct ifnet *ndev = priv->ifp;
920 
921 	bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
922 	bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
923 	bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
924 	bool broadcast_enabled = rx_mode_enable;
925 
926 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
927 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
928 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
929 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
930 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
931 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
932 
933 	/* update broadcast address */
934 	ether_addr_copy(priv->eth_addr.broadcast.addr,
935 	    priv->ifp->if_broadcastaddr);
936 
937 	if (enable_promisc) {
938 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
939 		if (!priv->vlan.filter_disabled)
940 			mlx5e_add_any_vid_rules(priv);
941 	}
942 	if (enable_allmulti)
943 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
944 	if (enable_broadcast)
945 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
946 
947 	mlx5e_handle_ifp_addr(priv);
948 
949 	if (disable_broadcast)
950 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
951 	if (disable_allmulti)
952 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
953 	if (disable_promisc) {
954 		if (!priv->vlan.filter_disabled)
955 			mlx5e_del_any_vid_rules(priv);
956 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
957 	}
958 
959 	ea->promisc_enabled = promisc_enabled;
960 	ea->allmulti_enabled = allmulti_enabled;
961 	ea->broadcast_enabled = broadcast_enabled;
962 
963 	mlx5e_vport_context_update(priv);
964 }
965 
966 void
967 mlx5e_set_rx_mode_work(struct work_struct *work)
968 {
969 	struct mlx5e_priv *priv =
970 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
971 
972 	PRIV_LOCK(priv);
973 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
974 		mlx5e_set_rx_mode_core(priv);
975 	PRIV_UNLOCK(priv);
976 }
977 
978 static void
979 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
980 {
981 	int i;
982 
983 	for (i = ft->num_groups - 1; i >= 0; i--) {
984 		if (!IS_ERR_OR_NULL(ft->g[i]))
985 			mlx5_destroy_flow_group(ft->g[i]);
986 		ft->g[i] = NULL;
987 	}
988 	ft->num_groups = 0;
989 }
990 
991 static void
992 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
993 {
994 	mlx5e_destroy_groups(ft);
995 	kfree(ft->g);
996 	mlx5_destroy_flow_table(ft->t);
997 	ft->t = NULL;
998 }
999 
1000 #define MLX5E_NUM_MAIN_GROUPS	10
1001 #define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
1002 #define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
1003 #define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
1004 #define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
1005 #define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
1006 #define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
1007 #define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
1008 #define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
1009 #define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1010 #define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1011 #define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1012 				 MLX5E_MAIN_GROUP1_SIZE +\
1013 				 MLX5E_MAIN_GROUP2_SIZE +\
1014 				 MLX5E_MAIN_GROUP3_SIZE +\
1015 				 MLX5E_MAIN_GROUP4_SIZE +\
1016 				 MLX5E_MAIN_GROUP5_SIZE +\
1017 				 MLX5E_MAIN_GROUP6_SIZE +\
1018 				 MLX5E_MAIN_GROUP7_SIZE +\
1019 				 MLX5E_MAIN_GROUP8_SIZE +\
1020 				 MLX5E_MAIN_GROUP9_SIZE +\
1021 				 0)
1022 
1023 static int
1024 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1025 				      int inlen)
1026 {
1027 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1028 	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1029 				match_criteria.outer_headers.dmac_47_16);
1030 	int err;
1031 	int ix = 0;
1032 
1033 	/* Tunnel rules need to be first in this list of groups */
1034 
1035 	/* Start tunnel rules */
1036 	memset(in, 0, inlen);
1037 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1038 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1039 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1040 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1041 	MLX5_SET_CFG(in, start_flow_index, ix);
1042 	ix += MLX5E_MAIN_GROUP0_SIZE;
1043 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1044 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1045 	if (IS_ERR(ft->g[ft->num_groups]))
1046 		goto err_destory_groups;
1047 	ft->num_groups++;
1048 	/* End Tunnel Rules */
1049 
1050 	memset(in, 0, inlen);
1051 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1052 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1053 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1054 	MLX5_SET_CFG(in, start_flow_index, ix);
1055 	ix += MLX5E_MAIN_GROUP1_SIZE;
1056 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1057 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1058 	if (IS_ERR(ft->g[ft->num_groups]))
1059 		goto err_destory_groups;
1060 	ft->num_groups++;
1061 
1062 	memset(in, 0, inlen);
1063 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1064 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1065 	MLX5_SET_CFG(in, start_flow_index, ix);
1066 	ix += MLX5E_MAIN_GROUP2_SIZE;
1067 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1068 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1069 	if (IS_ERR(ft->g[ft->num_groups]))
1070 		goto err_destory_groups;
1071 	ft->num_groups++;
1072 
1073 	memset(in, 0, inlen);
1074 	MLX5_SET_CFG(in, start_flow_index, ix);
1075 	ix += MLX5E_MAIN_GROUP3_SIZE;
1076 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1077 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1078 	if (IS_ERR(ft->g[ft->num_groups]))
1079 		goto err_destory_groups;
1080 	ft->num_groups++;
1081 
1082 	memset(in, 0, inlen);
1083 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1084 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1085 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1086 	memset(dmac, 0xff, ETH_ALEN);
1087 	MLX5_SET_CFG(in, start_flow_index, ix);
1088 	ix += MLX5E_MAIN_GROUP4_SIZE;
1089 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1090 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1091 	if (IS_ERR(ft->g[ft->num_groups]))
1092 		goto err_destory_groups;
1093 	ft->num_groups++;
1094 
1095 	memset(in, 0, inlen);
1096 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1097 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1098 	memset(dmac, 0xff, ETH_ALEN);
1099 	MLX5_SET_CFG(in, start_flow_index, ix);
1100 	ix += MLX5E_MAIN_GROUP5_SIZE;
1101 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1102 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1103 	if (IS_ERR(ft->g[ft->num_groups]))
1104 		goto err_destory_groups;
1105 	ft->num_groups++;
1106 
1107 	memset(in, 0, inlen);
1108 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1109 	memset(dmac, 0xff, ETH_ALEN);
1110 	MLX5_SET_CFG(in, start_flow_index, ix);
1111 	ix += MLX5E_MAIN_GROUP6_SIZE;
1112 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1113 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1114 	if (IS_ERR(ft->g[ft->num_groups]))
1115 		goto err_destory_groups;
1116 	ft->num_groups++;
1117 
1118 	memset(in, 0, inlen);
1119 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1120 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1121 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1122 	dmac[0] = 0x01;
1123 	MLX5_SET_CFG(in, start_flow_index, ix);
1124 	ix += MLX5E_MAIN_GROUP7_SIZE;
1125 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1126 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1127 	if (IS_ERR(ft->g[ft->num_groups]))
1128 		goto err_destory_groups;
1129 	ft->num_groups++;
1130 
1131 	memset(in, 0, inlen);
1132 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1133 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1134 	dmac[0] = 0x01;
1135 	MLX5_SET_CFG(in, start_flow_index, ix);
1136 	ix += MLX5E_MAIN_GROUP8_SIZE;
1137 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1138 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1139 	if (IS_ERR(ft->g[ft->num_groups]))
1140 		goto err_destory_groups;
1141 	ft->num_groups++;
1142 
1143 	memset(in, 0, inlen);
1144 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1145 	dmac[0] = 0x01;
1146 	MLX5_SET_CFG(in, start_flow_index, ix);
1147 	ix += MLX5E_MAIN_GROUP9_SIZE;
1148 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1149 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1150 	if (IS_ERR(ft->g[ft->num_groups]))
1151 		goto err_destory_groups;
1152 	ft->num_groups++;
1153 
1154 	return (0);
1155 
1156 err_destory_groups:
1157 	err = PTR_ERR(ft->g[ft->num_groups]);
1158 	ft->g[ft->num_groups] = NULL;
1159 	mlx5e_destroy_groups(ft);
1160 
1161 	return (err);
1162 }
1163 
1164 static int
1165 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1166 {
1167 	u32 *in;
1168 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1169 	int err;
1170 
1171 	in = mlx5_vzalloc(inlen);
1172 	if (!in)
1173 		return (-ENOMEM);
1174 
1175 	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1176 
1177 	kvfree(in);
1178 	return (err);
1179 }
1180 
1181 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1182 {
1183 	struct mlx5e_flow_table *ft = &priv->fts.main;
1184 	int err;
1185 
1186 	ft->num_groups = 0;
1187 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main",
1188 				       MLX5E_MAIN_TABLE_SIZE);
1189 
1190 	if (IS_ERR(ft->t)) {
1191 		err = PTR_ERR(ft->t);
1192 		ft->t = NULL;
1193 		return (err);
1194 	}
1195 	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1196 	if (!ft->g) {
1197 		err = -ENOMEM;
1198 		goto err_destroy_main_flow_table;
1199 	}
1200 
1201 	err = mlx5e_create_main_groups(ft);
1202 	if (err)
1203 		goto err_free_g;
1204 	return (0);
1205 
1206 err_free_g:
1207 	kfree(ft->g);
1208 
1209 err_destroy_main_flow_table:
1210 	mlx5_destroy_flow_table(ft->t);
1211 	ft->t = NULL;
1212 
1213 	return (err);
1214 }
1215 
1216 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1217 {
1218 	mlx5e_destroy_flow_table(&priv->fts.main);
1219 }
1220 
1221 #define MLX5E_NUM_VLAN_GROUPS	3
1222 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1223 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1224 #define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1225 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1226 				 MLX5E_VLAN_GROUP1_SIZE +\
1227 				 MLX5E_VLAN_GROUP2_SIZE +\
1228 				 0)
1229 
1230 static int
1231 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1232 				      int inlen)
1233 {
1234 	int err;
1235 	int ix = 0;
1236 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1237 
1238 	memset(in, 0, inlen);
1239 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1240 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1241 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1242 	MLX5_SET_CFG(in, start_flow_index, ix);
1243 	ix += MLX5E_VLAN_GROUP0_SIZE;
1244 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1245 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1246 	if (IS_ERR(ft->g[ft->num_groups]))
1247 		goto err_destory_groups;
1248 	ft->num_groups++;
1249 
1250 	memset(in, 0, inlen);
1251 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1252 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1253 	MLX5_SET_CFG(in, start_flow_index, ix);
1254 	ix += MLX5E_VLAN_GROUP1_SIZE;
1255 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1256 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1257 	if (IS_ERR(ft->g[ft->num_groups]))
1258 		goto err_destory_groups;
1259 	ft->num_groups++;
1260 
1261 	memset(in, 0, inlen);
1262 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1263 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1264 	MLX5_SET_CFG(in, start_flow_index, ix);
1265 	ix += MLX5E_VLAN_GROUP2_SIZE;
1266 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1267 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1268 	if (IS_ERR(ft->g[ft->num_groups]))
1269 		goto err_destory_groups;
1270 	ft->num_groups++;
1271 
1272 	return (0);
1273 
1274 err_destory_groups:
1275 	err = PTR_ERR(ft->g[ft->num_groups]);
1276 	ft->g[ft->num_groups] = NULL;
1277 	mlx5e_destroy_groups(ft);
1278 
1279 	return (err);
1280 }
1281 
1282 static int
1283 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1284 {
1285 	u32 *in;
1286 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1287 	int err;
1288 
1289 	in = mlx5_vzalloc(inlen);
1290 	if (!in)
1291 		return (-ENOMEM);
1292 
1293 	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1294 
1295 	kvfree(in);
1296 	return (err);
1297 }
1298 
1299 static int
1300 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1301 {
1302 	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1303 	int err;
1304 
1305 	ft->num_groups = 0;
1306 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1307 				       MLX5E_VLAN_TABLE_SIZE);
1308 
1309 	if (IS_ERR(ft->t)) {
1310 		err = PTR_ERR(ft->t);
1311 		ft->t = NULL;
1312 		return (err);
1313 	}
1314 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1315 	if (!ft->g) {
1316 		err = -ENOMEM;
1317 		goto err_destroy_vlan_flow_table;
1318 	}
1319 
1320 	err = mlx5e_create_vlan_groups(ft);
1321 	if (err)
1322 		goto err_free_g;
1323 
1324 	return (0);
1325 
1326 err_free_g:
1327 	kfree(ft->g);
1328 
1329 err_destroy_vlan_flow_table:
1330 	mlx5_destroy_flow_table(ft->t);
1331 	ft->t = NULL;
1332 
1333 	return (err);
1334 }
1335 
1336 static void
1337 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1338 {
1339 	mlx5e_destroy_flow_table(&priv->fts.vlan);
1340 }
1341 
1342 #define MLX5E_NUM_INNER_RSS_GROUPS	3
1343 #define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
1344 #define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
1345 #define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
1346 #define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
1347 					 MLX5E_INNER_RSS_GROUP1_SIZE +\
1348 					 MLX5E_INNER_RSS_GROUP2_SIZE +\
1349 					 0)
1350 
1351 static int
1352 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1353 					   int inlen)
1354 {
1355 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1356 	int err;
1357 	int ix = 0;
1358 
1359 	memset(in, 0, inlen);
1360 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1361 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1362 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1363 	MLX5_SET_CFG(in, start_flow_index, ix);
1364 	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
1365 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1366 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1367 	if (IS_ERR(ft->g[ft->num_groups]))
1368 		goto err_destory_groups;
1369 	ft->num_groups++;
1370 
1371 	memset(in, 0, inlen);
1372 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1373 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1374 	MLX5_SET_CFG(in, start_flow_index, ix);
1375 	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
1376 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1377 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1378 	if (IS_ERR(ft->g[ft->num_groups]))
1379 		goto err_destory_groups;
1380 	ft->num_groups++;
1381 
1382 	memset(in, 0, inlen);
1383 	MLX5_SET_CFG(in, start_flow_index, ix);
1384 	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
1385 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1386 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1387 	if (IS_ERR(ft->g[ft->num_groups]))
1388 		goto err_destory_groups;
1389 	ft->num_groups++;
1390 
1391 	return (0);
1392 
1393 err_destory_groups:
1394 	err = PTR_ERR(ft->g[ft->num_groups]);
1395 	ft->g[ft->num_groups] = NULL;
1396 	mlx5e_destroy_groups(ft);
1397 
1398 	return (err);
1399 }
1400 
1401 static int
1402 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
1403 {
1404 	u32 *in;
1405 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1406 	int err;
1407 
1408 	in = mlx5_vzalloc(inlen);
1409 	if (!in)
1410 		return (-ENOMEM);
1411 
1412 	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
1413 
1414 	kvfree(in);
1415 	return (err);
1416 }
1417 
1418 static int
1419 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
1420 {
1421 	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
1422 	int err;
1423 
1424 	ft->num_groups = 0;
1425 	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
1426 				       MLX5E_INNER_RSS_TABLE_SIZE);
1427 
1428 	if (IS_ERR(ft->t)) {
1429 		err = PTR_ERR(ft->t);
1430 		ft->t = NULL;
1431 		return (err);
1432 	}
1433 	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
1434 			GFP_KERNEL);
1435 	if (!ft->g) {
1436 		err = -ENOMEM;
1437 		goto err_destroy_inner_rss_flow_table;
1438 	}
1439 
1440 	err = mlx5e_create_inner_rss_groups(ft);
1441 	if (err)
1442 		goto err_free_g;
1443 
1444 	return (0);
1445 
1446 err_free_g:
1447 	kfree(ft->g);
1448 
1449 err_destroy_inner_rss_flow_table:
1450 	mlx5_destroy_flow_table(ft->t);
1451 	ft->t = NULL;
1452 
1453 	return (err);
1454 }
1455 
1456 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
1457 {
1458 	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
1459 }
1460 
1461 int
1462 mlx5e_open_flow_table(struct mlx5e_priv *priv)
1463 {
1464 	int err;
1465 
1466 	priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1467 					       MLX5_FLOW_NAMESPACE_KERNEL);
1468 
1469 	err = mlx5e_create_vlan_flow_table(priv);
1470 	if (err)
1471 		return (err);
1472 
1473 	err = mlx5e_create_main_flow_table(priv);
1474 	if (err)
1475 		goto err_destroy_vlan_flow_table;
1476 
1477 	err = mlx5e_create_inner_rss_flow_table(priv);
1478 	if (err)
1479 		goto err_destroy_main_flow_table;
1480 
1481 	return (0);
1482 
1483 err_destroy_main_flow_table:
1484 	mlx5e_destroy_main_flow_table(priv);
1485 err_destroy_vlan_flow_table:
1486 	mlx5e_destroy_vlan_flow_table(priv);
1487 
1488 	return (err);
1489 }
1490 
1491 void
1492 mlx5e_close_flow_table(struct mlx5e_priv *priv)
1493 {
1494 	mlx5e_destroy_inner_rss_flow_table(priv);
1495 	mlx5e_destroy_main_flow_table(priv);
1496 	mlx5e_destroy_vlan_flow_table(priv);
1497 }
1498