xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 2787f8c39c60f7510f9bc04ec46fe0b37ece0e3e)
1 /*-
2  * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28 
29 #include <dev/mlx5/mlx5_en/en.h>
30 #include <dev/mlx5/mlx5_accel/ipsec.h>
31 
32 #include <linux/list.h>
33 #include <dev/mlx5/fs.h>
34 #include <dev/mlx5/mpfs.h>
35 #include <dev/mlx5/mlx5_core/fs_tcp.h>
36 
37 /*
38  * The flow tables with rules define the packet processing on receive.
39  * Currently the following structure is set up to handle different
40  * offloads like TLS RX offload, VLAN decapsulation, packet
41  * classification, RSS hashing, VxLAN checksum offloading:
42  *
43  *   +=========+       +=========+      +=================+
44  *   |TCP/IPv4 |       |TCP/IPv4 |      |TCP/IPv4 Match   |
45  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
46  *   |         |       |Catch-all|\     |                 |
47  *   +=========+       +=========+|     +=================+
48  *                                |
49  *       +------------------------+
50  *       V
51  *   +=========+       +=========+      +=================+
52  *   |TCP/IPv6 |       |TCP/IPv6 |      |TCP/IPv6 Match   |
53  *   |Flowtable|------>|         |----->|Outer Proto Match|=====> TLS TIR n
54  *   |         |       |Catch-all|\     |                 |
55  *   +=========+       +=========+|     +=================+
56  *                                |
57  *       +------------------------+
58  *       V
59  *   +=========+       +=========+      +=================+
60  *   |VLAN ft: |       |VxLAN    |      |VxLAN Main       |
61  *   |CTAG/STAG|------>|      VNI|----->|Inner Proto Match|=====> Inner TIR n
62  *   |VID/noVID|/      |Catch-all|\     |                 |
63  *   +=========+       +=========+|     +=================+
64  *                                |
65  *                                |
66  *                                |
67  *                                v
68  *                      +=================+
69  *                      |Main             |
70  *                      |Outer Proto Match|=====> TIR n
71  *                      |                 |
72  *                      +=================+
73  *
74  * The path through flow rules directs each packet into an appropriate TIR,
75  * according to the:
76  * - VLAN encapsulation
77  * - Outer protocol
78  * - Presence of inner protocol
79  */
80 
81 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
82 
83 enum {
84 	MLX5E_FULLMATCH = 0,
85 	MLX5E_ALLMULTI = 1,
86 	MLX5E_PROMISC = 2,
87 };
88 
89 enum {
90 	MLX5E_UC = 0,
91 	MLX5E_MC_IPV4 = 1,
92 	MLX5E_MC_IPV6 = 2,
93 	MLX5E_MC_OTHER = 3,
94 };
95 
96 enum {
97 	MLX5E_ACTION_NONE = 0,
98 	MLX5E_ACTION_ADD = 1,
99 	MLX5E_ACTION_DEL = 2,
100 };
101 
102 struct mlx5e_eth_addr_hash_node {
103 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
104 	u8	action;
105 	u32	mpfs_index;
106 	struct mlx5e_eth_addr_info ai;
107 };
108 
109 static void mlx5e_del_all_vlan_rules(struct mlx5e_priv *);
110 
111 static inline int
mlx5e_hash_eth_addr(const u8 * addr)112 mlx5e_hash_eth_addr(const u8 * addr)
113 {
114 	return (addr[5]);
115 }
116 
117 static bool
mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head * hash,struct mlx5e_eth_addr_hash_node * hn_new)118 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
119     struct mlx5e_eth_addr_hash_node *hn_new)
120 {
121 	struct mlx5e_eth_addr_hash_node *hn;
122 	u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
123 
124 	LIST_FOREACH(hn, &hash[ix], hlist) {
125 		if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
126 			if (hn->action == MLX5E_ACTION_DEL)
127 				hn->action = MLX5E_ACTION_NONE;
128 			free(hn_new, M_MLX5EN);
129 			return (false);
130 		}
131 	}
132 	LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
133 	return (true);
134 }
135 
136 static void
mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node * hn)137 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
138 {
139 	LIST_REMOVE(hn, hlist);
140 	free(hn, M_MLX5EN);
141 }
142 
143 static void
mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv * priv,struct mlx5e_eth_addr_info * ai)144 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
145     struct mlx5e_eth_addr_info *ai)
146 {
147 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
148 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
149 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
150 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
151 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
152 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
153 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
154 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
155 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6]);
156 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4]);
157 	mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_ANY]);
158 }
159 
160 static int
mlx5e_get_eth_addr_type(const u8 * addr)161 mlx5e_get_eth_addr_type(const u8 * addr)
162 {
163 	if (ETHER_IS_MULTICAST(addr) == 0)
164 		return (MLX5E_UC);
165 
166 	if ((addr[0] == 0x01) &&
167 	    (addr[1] == 0x00) &&
168 	    (addr[2] == 0x5e) &&
169 	    !(addr[3] & 0x80))
170 		return (MLX5E_MC_IPV4);
171 
172 	if ((addr[0] == 0x33) &&
173 	    (addr[1] == 0x33))
174 		return (MLX5E_MC_IPV6);
175 
176 	return (MLX5E_MC_OTHER);
177 }
178 
179 static	u32
mlx5e_get_tt_vec(struct mlx5e_eth_addr_info * ai,int type)180 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
181 {
182 	int eth_addr_type;
183 	u32 ret;
184 
185 	switch (type) {
186 	case MLX5E_FULLMATCH:
187 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
188 		switch (eth_addr_type) {
189 		case MLX5E_UC:
190 			ret =
191 			    (1 << MLX5E_TT_IPV4_TCP) |
192 			    (1 << MLX5E_TT_IPV6_TCP) |
193 			    (1 << MLX5E_TT_IPV4_UDP) |
194 			    (1 << MLX5E_TT_IPV6_UDP) |
195 			    (1 << MLX5E_TT_IPV4) |
196 			    (1 << MLX5E_TT_IPV6) |
197 			    (1 << MLX5E_TT_ANY) |
198 			    0;
199 			break;
200 
201 		case MLX5E_MC_IPV4:
202 			ret =
203 			    (1 << MLX5E_TT_IPV4_UDP) |
204 			    (1 << MLX5E_TT_IPV4) |
205 			    0;
206 			break;
207 
208 		case MLX5E_MC_IPV6:
209 			ret =
210 			    (1 << MLX5E_TT_IPV6_UDP) |
211 			    (1 << MLX5E_TT_IPV6) |
212 			    0;
213 			break;
214 
215 		default:
216 			ret =
217 			    (1 << MLX5E_TT_ANY) |
218 			    0;
219 			break;
220 		}
221 		break;
222 
223 	case MLX5E_ALLMULTI:
224 		ret =
225 		    (1 << MLX5E_TT_IPV4_UDP) |
226 		    (1 << MLX5E_TT_IPV6_UDP) |
227 		    (1 << MLX5E_TT_IPV4) |
228 		    (1 << MLX5E_TT_IPV6) |
229 		    (1 << MLX5E_TT_ANY) |
230 		    0;
231 		break;
232 
233 	default:			/* MLX5E_PROMISC */
234 		ret =
235 		    (1 << MLX5E_TT_IPV4_TCP) |
236 		    (1 << MLX5E_TT_IPV6_TCP) |
237 		    (1 << MLX5E_TT_IPV4_UDP) |
238 		    (1 << MLX5E_TT_IPV6_UDP) |
239 		    (1 << MLX5E_TT_IPV4) |
240 		    (1 << MLX5E_TT_IPV6) |
241 		    (1 << MLX5E_TT_ANY) |
242 		    0;
243 		break;
244 	}
245 
246 	return (ret);
247 }
248 
249 static int
mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv * priv,struct mlx5e_eth_addr_info * ai,int type,struct mlx5_flow_spec * spec)250 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
251     struct mlx5e_eth_addr_info *ai, int type,
252     struct mlx5_flow_spec *spec)
253 {
254 	struct mlx5_flow_destination dest = {};
255 	u8 mc_enable = 0;
256 	struct mlx5_flow_handle **rule_p;
257 	struct mlx5_flow_table *ft = priv->fts.main.t;
258 	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
259 				   outer_headers.dmac_47_16);
260 	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
261 				   outer_headers.dmac_47_16);
262 	u32 *tirn = priv->tirn;
263 	u32 tt_vec;
264 	int err = 0;
265 	struct mlx5_flow_act flow_act = {
266 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
267 	};
268 	u8 *mc;
269 	u8 *mv;
270 
271 	mv = (u8 *)spec->match_value;
272 	mc = (u8 *)spec->match_criteria;
273 
274 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
275 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
276 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
277 
278 	switch (type) {
279 	case MLX5E_FULLMATCH:
280 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
281 		memset(mc_dmac, 0xff, ETH_ALEN);
282 		ether_addr_copy(mv_dmac, ai->addr);
283 		break;
284 
285 	case MLX5E_ALLMULTI:
286 		mc_enable = MLX5_MATCH_OUTER_HEADERS;
287 		mc_dmac[0] = 0x01;
288 		mv_dmac[0] = 0x01;
289 		break;
290 
291 	case MLX5E_PROMISC:
292 		break;
293 	default:
294 		break;
295 	}
296 
297 	tt_vec = mlx5e_get_tt_vec(ai, type);
298 
299 	spec->match_criteria_enable = mc_enable;
300 	if (tt_vec & BIT(MLX5E_TT_ANY)) {
301 		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
302 		dest.tir_num = tirn[MLX5E_TT_ANY];
303 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
304 		if (IS_ERR_OR_NULL(*rule_p))
305 			goto err_del_ai;
306 	}
307 
308 	mc_enable = MLX5_MATCH_OUTER_HEADERS;
309 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
310 
311 	spec->match_criteria_enable = mc_enable;
312 	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
313 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
314 		dest.tir_num = tirn[MLX5E_TT_IPV4];
315 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
316 			 ETHERTYPE_IP);
317 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
318 		if (IS_ERR_OR_NULL(*rule_p))
319 			goto err_del_ai;
320 	}
321 
322 	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
323 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
324 		dest.tir_num = tirn[MLX5E_TT_IPV6];
325 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
326 			 ETHERTYPE_IPV6);
327 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
328 		if (IS_ERR_OR_NULL(*rule_p))
329 			goto err_del_ai;
330 	}
331 
332 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
333 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
334 
335 	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
336 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
337 		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
338 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
339 			 ETHERTYPE_IP);
340 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
341 		if (IS_ERR_OR_NULL(*rule_p))
342 			goto err_del_ai;
343 	}
344 
345 	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
346 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
347 		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
348 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
349 			 ETHERTYPE_IPV6);
350 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
351 		if (IS_ERR_OR_NULL(*rule_p))
352 			goto err_del_ai;
353 	}
354 
355 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
356 
357 	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
358 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
359 		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
360 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
361 			 ETHERTYPE_IP);
362 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
363 		if (IS_ERR_OR_NULL(*rule_p))
364 			goto err_del_ai;
365 	}
366 
367 	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
368 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
369 		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
370 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
371 			 ETHERTYPE_IPV6);
372 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
373 		if (IS_ERR_OR_NULL(*rule_p))
374 			goto err_del_ai;
375 	}
376 
377 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
378 
379 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
380 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
381 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
382 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
383 			 ETHERTYPE_IP);
384 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
385 		if (IS_ERR_OR_NULL(*rule_p))
386 			goto err_del_ai;
387 	}
388 
389 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
390 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
391 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
392 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
393 			 ETHERTYPE_IPV6);
394 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
395 		if (IS_ERR_OR_NULL(*rule_p))
396 			goto err_del_ai;
397 	}
398 
399 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
400 
401 	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
402 		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
403 		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
404 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
405 			 ETHERTYPE_IP);
406 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
407 		if (IS_ERR_OR_NULL(*rule_p))
408 			goto err_del_ai;
409 	}
410 
411 	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
412 		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
413 		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
414 		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
415 			 ETHERTYPE_IPV6);
416 		*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
417 		if (IS_ERR_OR_NULL(*rule_p))
418 			goto err_del_ai;
419 	}
420 
421 	return 0;
422 
423 err_del_ai:
424 	err = PTR_ERR(*rule_p);
425 	*rule_p = NULL;
426 	mlx5e_del_eth_addr_from_flow_table(priv, ai);
427 
428 	return err;
429 }
430 
431 static int
mlx5e_add_eth_addr_rule(struct mlx5e_priv * priv,struct mlx5e_eth_addr_info * ai,int type)432 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
433     struct mlx5e_eth_addr_info *ai, int type)
434 {
435 	struct mlx5_flow_spec *spec;
436 	int err = 0;
437 
438 	spec = mlx5_vzalloc(sizeof(*spec));
439 	if (!spec) {
440 		mlx5_en_err(priv->ifp, "alloc failed\n");
441 		err = -ENOMEM;
442 		goto add_eth_addr_rule_out;
443 	}
444 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, spec);
445 
446 add_eth_addr_rule_out:
447 	kvfree(spec);
448 
449 	return (err);
450 }
451 
452 static void
mlx5e_del_main_vxlan_rules(struct mlx5e_priv * priv)453 mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
454 {
455 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
456 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
457 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
458 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
459 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
460 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
461 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
462 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
463 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
464 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
465 	mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
466 }
467 
468 static int
mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec)469 mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv,
470 			       struct mlx5_flow_spec *spec)
471 {
472 	struct mlx5_flow_destination dest = {};
473 	struct mlx5_flow_handle **rule_p;
474 	struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
475 	u32 *tirn = priv->tirn_inner_vxlan;
476 	struct mlx5_flow_act flow_act = {
477 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
478 	};
479 	int err = 0;
480 	u8 *mc;
481 	u8 *mv;
482 
483 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
484 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
485 
486 	mc = (u8 *)spec->match_criteria;
487 	mv = (u8 *)spec->match_value;
488 
489 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
490 
491 	spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
492 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
493 
494 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
495 	dest.tir_num = tirn[MLX5E_TT_IPV4];
496 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
497 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
498 	if (IS_ERR_OR_NULL(*rule_p))
499 		goto err_del_ai;
500 
501 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
502 	dest.tir_num = tirn[MLX5E_TT_IPV6];
503 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
504 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
505 	if (IS_ERR_OR_NULL(*rule_p))
506 		goto err_del_ai;
507 
508 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
509 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_UDP);
510 
511 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
512 	dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
513 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
514 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
515 	if (IS_ERR_OR_NULL(*rule_p))
516 		goto err_del_ai;
517 
518 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
519 	dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
520 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
521 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
522 	if (IS_ERR_OR_NULL(*rule_p))
523 		goto err_del_ai;
524 
525 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_TCP);
526 
527 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
528 	dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
529 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
530 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
531 	if (IS_ERR_OR_NULL(*rule_p))
532 		goto err_del_ai;
533 
534 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
535 	dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
536 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
537 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
538 	if (IS_ERR_OR_NULL(*rule_p))
539 		goto err_del_ai;
540 
541 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_AH);
542 
543 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
544 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
545 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
546 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
547 	if (IS_ERR_OR_NULL(*rule_p))
548 		goto err_del_ai;
549 
550 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
551 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
552 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
553 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
554 	if (IS_ERR_OR_NULL(*rule_p))
555 		goto err_del_ai;
556 
557 	MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_ESP);
558 
559 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
560 	dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
561 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
562 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
563 	if (IS_ERR_OR_NULL(*rule_p))
564 			goto err_del_ai;
565 
566 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP];
567 	dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
568 	MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
569 		 ETHERTYPE_IPV6);
570 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
571 	if (IS_ERR_OR_NULL(*rule_p))
572 		goto err_del_ai;
573 
574 	spec->match_criteria_enable = 0;
575 	memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
576 	memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
577 	rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
578 	dest.tir_num = tirn[MLX5E_TT_ANY];
579 	*rule_p = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
580 	if (IS_ERR_OR_NULL(*rule_p))
581 		goto err_del_ai;
582 
583 	return (0);
584 
585 err_del_ai:
586 	err = PTR_ERR(*rule_p);
587 	*rule_p = NULL;
588 	mlx5e_del_main_vxlan_rules(priv);
589 
590 	return (err);
591 }
592 
593 static int
mlx5e_add_main_vxlan_rules(struct mlx5e_priv * priv)594 mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
595 {
596 	struct mlx5_flow_spec *spec;
597 	int err = 0;
598 
599 	spec = mlx5_vzalloc(sizeof(*spec));
600 	if (!spec) {
601 		mlx5_en_err(priv->ifp, "alloc failed\n");
602 		err = -ENOMEM;
603 		goto add_main_vxlan_rules_out;
604 	}
605 	err = mlx5e_add_main_vxlan_rules_sub(priv, spec);
606 
607 add_main_vxlan_rules_out:
608 	kvfree(spec);
609 
610 	return (err);
611 }
612 
mlx5e_vport_context_update_vlans(struct mlx5e_priv * priv)613 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
614 {
615 	if_t ifp = priv->ifp;
616 	int max_list_size;
617 	int list_size;
618 	u16 *vlans;
619 	int vlan;
620 	int err;
621 	int i;
622 
623 	list_size = 0;
624 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
625 		list_size++;
626 
627 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
628 
629 	if (list_size > max_list_size) {
630 		mlx5_en_err(ifp,
631 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
632 			    list_size, max_list_size);
633 		list_size = max_list_size;
634 	}
635 
636 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
637 	if (!vlans)
638 		return -ENOMEM;
639 
640 	i = 0;
641 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
642 		if (i >= list_size)
643 			break;
644 		vlans[i++] = vlan;
645 	}
646 
647 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
648 	if (err)
649 		mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
650 			   err);
651 
652 	kfree(vlans);
653 	return err;
654 }
655 
656 enum mlx5e_vlan_rule_type {
657 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
658 	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
659 	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
660 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
661 };
662 
663 static int
mlx5e_add_vlan_rule_sub(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid,struct mlx5_flow_spec * spec)664 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
665     enum mlx5e_vlan_rule_type rule_type, u16 vid,
666     struct mlx5_flow_spec *spec)
667 {
668 	struct mlx5_flow_table *ft = priv->fts.vlan.t;
669 	struct mlx5_flow_destination dest = {};
670 	struct mlx5_flow_handle **rule_p;
671 	int err = 0;
672 	struct mlx5_flow_act flow_act = {
673 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
674 	};
675 	u8 *mv;
676 	u8 *mc;
677 
678 	mv = (u8 *)spec->match_value;
679 	mc = (u8 *)spec->match_criteria;
680 
681 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
682 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
683 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
684 	dest.ft = priv->fts.vxlan.t;
685 
686 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
687 
688 	switch (rule_type) {
689 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
690 		rule_p = &priv->vlan.untagged_ft_rule;
691 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
692 		break;
693 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
694 		rule_p = &priv->vlan.any_cvlan_ft_rule;
695 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
696 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
697 		break;
698 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
699 		rule_p = &priv->vlan.any_svlan_ft_rule;
700 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
701 		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
702 		break;
703 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
704 		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
705 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
706 		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
707 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
708 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
709 		mlx5e_vport_context_update_vlans(priv);
710 		break;
711 	}
712 
713 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
714 	if (IS_ERR(*rule_p)) {
715 		err = PTR_ERR(*rule_p);
716 		*rule_p = NULL;
717 		mlx5_en_err(priv->ifp, "add rule failed\n");
718 	}
719 
720 	return (err);
721 }
722 
723 static int
mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)724 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
725     enum mlx5e_vlan_rule_type rule_type, u16 vid)
726 {
727 	struct mlx5_flow_spec *spec;
728 	int err = 0;
729 
730 	spec = mlx5_vzalloc(sizeof(*spec));
731 	if (!spec) {
732 		mlx5_en_err(priv->ifp, "alloc failed\n");
733 		err = -ENOMEM;
734 		goto add_vlan_rule_out;
735 	}
736 
737 	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, spec);
738 
739 add_vlan_rule_out:
740 	kvfree(spec);
741 
742 	return (err);
743 }
744 
745 static void
mlx5e_del_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)746 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
747     enum mlx5e_vlan_rule_type rule_type, u16 vid)
748 {
749 	switch (rule_type) {
750 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
751 		mlx5_del_flow_rules(&priv->vlan.untagged_ft_rule);
752 		break;
753 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
754 		mlx5_del_flow_rules(&priv->vlan.any_cvlan_ft_rule);
755 		break;
756 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
757 		mlx5_del_flow_rules(&priv->vlan.any_svlan_ft_rule);
758 		break;
759 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
760 		mlx5_del_flow_rules(&priv->vlan.active_vlans_ft_rule[vid]);
761 		mlx5e_vport_context_update_vlans(priv);
762 		break;
763 	default:
764 		break;
765 	}
766 }
767 
768 static void
mlx5e_del_any_vid_rules(struct mlx5e_priv * priv)769 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
770 {
771 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
772 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
773 }
774 
775 static int
mlx5e_add_any_vid_rules(struct mlx5e_priv * priv)776 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
777 {
778 	int err;
779 
780 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
781 	if (err)
782 		return (err);
783 
784 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
785 	if (err)
786 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
787 
788 	return (err);
789 }
790 
791 void
mlx5e_enable_vlan_filter(struct mlx5e_priv * priv)792 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
793 {
794 	if (priv->vlan.filter_disabled) {
795 		priv->vlan.filter_disabled = false;
796 		if (if_getflags(priv->ifp) & IFF_PROMISC)
797 			return;
798 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
799 			mlx5e_del_any_vid_rules(priv);
800 	}
801 }
802 
803 void
mlx5e_disable_vlan_filter(struct mlx5e_priv * priv)804 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
805 {
806 	if (!priv->vlan.filter_disabled) {
807 		priv->vlan.filter_disabled = true;
808 		if (if_getflags(priv->ifp) & IFF_PROMISC)
809 			return;
810 		if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
811 			mlx5e_add_any_vid_rules(priv);
812 	}
813 }
814 
815 void
mlx5e_vlan_rx_add_vid(void * arg,if_t ifp,u16 vid)816 mlx5e_vlan_rx_add_vid(void *arg, if_t ifp, u16 vid)
817 {
818 	struct mlx5e_priv *priv = arg;
819 
820 	if (ifp != priv->ifp)
821 		return;
822 
823 	PRIV_LOCK(priv);
824 	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
825 	    test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
826 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
827 	PRIV_UNLOCK(priv);
828 }
829 
830 void
mlx5e_vlan_rx_kill_vid(void * arg,if_t ifp,u16 vid)831 mlx5e_vlan_rx_kill_vid(void *arg, if_t ifp, u16 vid)
832 {
833 	struct mlx5e_priv *priv = arg;
834 
835 	if (ifp != priv->ifp)
836 		return;
837 
838 	PRIV_LOCK(priv);
839 	clear_bit(vid, priv->vlan.active_vlans);
840 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
841 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
842 	PRIV_UNLOCK(priv);
843 }
844 
845 static int
mlx5e_add_all_vlan_rules(struct mlx5e_priv * priv)846 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
847 {
848 	int err;
849 	int i;
850 
851 	set_bit(0, priv->vlan.active_vlans);
852 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
853 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
854 					  i);
855 		if (err)
856 			goto error;
857 	}
858 
859 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
860 	if (err)
861 		goto error;
862 
863 	if (priv->vlan.filter_disabled) {
864 		err = mlx5e_add_any_vid_rules(priv);
865 		if (err)
866 			goto error;
867 	}
868 	return (0);
869 error:
870 	mlx5e_del_all_vlan_rules(priv);
871 	return (err);
872 }
873 
874 static void
mlx5e_del_all_vlan_rules(struct mlx5e_priv * priv)875 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
876 {
877 	int i;
878 
879 	if (priv->vlan.filter_disabled)
880 		mlx5e_del_any_vid_rules(priv);
881 
882 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
883 
884 	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
885 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
886 	clear_bit(0, priv->vlan.active_vlans);
887 }
888 
889 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
890 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
891 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
892 
893 static void
mlx5e_execute_action(struct mlx5e_priv * priv,struct mlx5e_eth_addr_hash_node * hn)894 mlx5e_execute_action(struct mlx5e_priv *priv,
895     struct mlx5e_eth_addr_hash_node *hn)
896 {
897 	switch (hn->action) {
898 	case MLX5E_ACTION_ADD:
899 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
900 		hn->action = MLX5E_ACTION_NONE;
901 		break;
902 
903 	case MLX5E_ACTION_DEL:
904 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
905 		if (hn->mpfs_index != -1U)
906 			mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
907 		mlx5e_del_eth_addr_from_hash(hn);
908 		break;
909 
910 	default:
911 		break;
912 	}
913 }
914 
915 static struct mlx5e_eth_addr_hash_node *
mlx5e_move_hn(struct mlx5e_eth_addr_hash_head * fh,struct mlx5e_eth_addr_hash_head * uh)916 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
917 {
918 	struct mlx5e_eth_addr_hash_node *hn;
919 
920 	hn = LIST_FIRST(fh);
921 	if (hn != NULL) {
922 		LIST_REMOVE(hn, hlist);
923 		LIST_INSERT_HEAD(uh, hn, hlist);
924 	}
925 	return (hn);
926 }
927 
928 static struct mlx5e_eth_addr_hash_node *
mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head * fh)929 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
930 {
931 	struct mlx5e_eth_addr_hash_node *hn;
932 
933 	hn = LIST_FIRST(fh);
934 	if (hn != NULL)
935 		LIST_REMOVE(hn, hlist);
936 	return (hn);
937 }
938 
939 struct mlx5e_copy_addr_ctx {
940 	struct mlx5e_eth_addr_hash_head *free;
941 	struct mlx5e_eth_addr_hash_head *fill;
942 	bool success;
943 };
944 
945 static u_int
mlx5e_copy_addr(void * arg,struct sockaddr_dl * sdl,u_int cnt)946 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
947 {
948 	struct mlx5e_copy_addr_ctx *ctx = arg;
949 	struct mlx5e_eth_addr_hash_node *hn;
950 
951 	hn = mlx5e_move_hn(ctx->free, ctx->fill);
952 	if (hn == NULL) {
953 		ctx->success = false;
954 		return (0);
955 	}
956 	ether_addr_copy(hn->ai.addr, LLADDR(sdl));
957 
958 	return (1);
959 }
960 
961 static void
mlx5e_sync_ifp_addr(struct mlx5e_priv * priv)962 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
963 {
964 	struct mlx5e_copy_addr_ctx ctx;
965 	struct mlx5e_eth_addr_hash_head head_free;
966 	struct mlx5e_eth_addr_hash_head head_uc;
967 	struct mlx5e_eth_addr_hash_head head_mc;
968 	struct mlx5e_eth_addr_hash_node *hn;
969 	if_t ifp = priv->ifp;
970 	size_t x;
971 	size_t num;
972 
973 	PRIV_ASSERT_LOCKED(priv);
974 
975 retry:
976 	LIST_INIT(&head_free);
977 	LIST_INIT(&head_uc);
978 	LIST_INIT(&head_mc);
979 	num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp);
980 
981 	/* allocate place holders */
982 	for (x = 0; x != num; x++) {
983 		hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
984 		hn->action = MLX5E_ACTION_ADD;
985 		hn->mpfs_index = -1U;
986 		LIST_INSERT_HEAD(&head_free, hn, hlist);
987 	}
988 
989 	hn = mlx5e_move_hn(&head_free, &head_uc);
990 	MPASS(hn != NULL);
991 
992 	ether_addr_copy(hn->ai.addr, if_getlladdr(ifp));
993 
994 	ctx.free = &head_free;
995 	ctx.fill = &head_uc;
996 	ctx.success = true;
997 	if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx);
998 	if (ctx.success == false)
999 		goto cleanup;
1000 
1001 	ctx.fill = &head_mc;
1002 	if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx);
1003 	if (ctx.success == false)
1004 		goto cleanup;
1005 
1006 	/* insert L2 unicast addresses into hash list */
1007 
1008 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
1009 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
1010 			continue;
1011 		if (hn->mpfs_index == -1U)
1012 			mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index,
1013 			    hn->ai.addr, 0, 0);
1014 	}
1015 
1016 	/* insert L2 multicast addresses into hash list */
1017 
1018 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
1019 		if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
1020 			continue;
1021 	}
1022 
1023 cleanup:
1024 	while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
1025 		free(hn, M_MLX5EN);
1026 	while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
1027 		free(hn, M_MLX5EN);
1028 	while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
1029 		free(hn, M_MLX5EN);
1030 
1031 	if (ctx.success == false)
1032 		goto retry;
1033 }
1034 
mlx5e_fill_addr_array(struct mlx5e_priv * priv,int list_type,u8 addr_array[][ETH_ALEN],int size)1035 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
1036 				  u8 addr_array[][ETH_ALEN], int size)
1037 {
1038 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1039 	if_t ifp = priv->ifp;
1040 	struct mlx5e_eth_addr_hash_node *hn;
1041 	struct mlx5e_eth_addr_hash_head *addr_list;
1042 	struct mlx5e_eth_addr_hash_node *tmp;
1043 	int i = 0;
1044 	int hi;
1045 
1046 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1047 
1048 	if (is_uc) /* Make sure our own address is pushed first */
1049 		ether_addr_copy(addr_array[i++], if_getlladdr(ifp));
1050 	else if (priv->eth_addr.broadcast_enabled)
1051 		ether_addr_copy(addr_array[i++], if_getbroadcastaddr(ifp));
1052 
1053 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
1054 		if (ether_addr_equal(if_getlladdr(ifp), hn->ai.addr))
1055 			continue;
1056 		if (i >= size)
1057 			break;
1058 		ether_addr_copy(addr_array[i++], hn->ai.addr);
1059 	}
1060 }
1061 
mlx5e_vport_context_update_addr_list(struct mlx5e_priv * priv,int list_type)1062 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
1063 						 int list_type)
1064 {
1065 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1066 	struct mlx5e_eth_addr_hash_node *hn;
1067 	u8 (*addr_array)[ETH_ALEN] = NULL;
1068 	struct mlx5e_eth_addr_hash_head *addr_list;
1069 	struct mlx5e_eth_addr_hash_node *tmp;
1070 	int max_size;
1071 	int size;
1072 	int err;
1073 	int hi;
1074 
1075 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
1076 	max_size = is_uc ?
1077 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1078 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
1079 
1080 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1081 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
1082 		size++;
1083 
1084 	if (size > max_size) {
1085 		mlx5_en_err(priv->ifp,
1086 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
1087 			    is_uc ? "UC" : "MC", size, max_size);
1088 		size = max_size;
1089 	}
1090 
1091 	if (size) {
1092 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
1093 		if (!addr_array) {
1094 			err = -ENOMEM;
1095 			goto out;
1096 		}
1097 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
1098 	}
1099 
1100 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
1101 out:
1102 	if (err)
1103 		mlx5_en_err(priv->ifp,
1104 			   "Failed to modify vport %s list err(%d)\n",
1105 			   is_uc ? "UC" : "MC", err);
1106 	kfree(addr_array);
1107 }
1108 
mlx5e_vport_context_update(struct mlx5e_priv * priv)1109 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
1110 {
1111 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1112 
1113 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
1114 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
1115 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
1116 				      ea->allmulti_enabled,
1117 				      ea->promisc_enabled);
1118 }
1119 
1120 static void
mlx5e_apply_ifp_addr(struct mlx5e_priv * priv)1121 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
1122 {
1123 	struct mlx5e_eth_addr_hash_node *hn;
1124 	struct mlx5e_eth_addr_hash_node *tmp;
1125 	int i;
1126 
1127 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1128 	    mlx5e_execute_action(priv, hn);
1129 
1130 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1131 	    mlx5e_execute_action(priv, hn);
1132 }
1133 
1134 static void
mlx5e_handle_ifp_addr(struct mlx5e_priv * priv,bool rx_mode_enable)1135 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv, bool rx_mode_enable)
1136 {
1137 	struct mlx5e_eth_addr_hash_node *hn;
1138 	struct mlx5e_eth_addr_hash_node *tmp;
1139 	int i;
1140 
1141 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1142 	    hn->action = MLX5E_ACTION_DEL;
1143 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1144 	    hn->action = MLX5E_ACTION_DEL;
1145 
1146 	if (rx_mode_enable)
1147 		mlx5e_sync_ifp_addr(priv);
1148 
1149 	mlx5e_apply_ifp_addr(priv);
1150 }
1151 
1152 static void
mlx5e_set_rx_mode_core(struct mlx5e_priv * priv,bool rx_mode_enable)1153 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
1154 {
1155 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1156 	if_t ndev = priv->ifp;
1157 	int ndev_flags = if_getflags(ndev);
1158 
1159 	bool promisc_enabled = rx_mode_enable && (ndev_flags & IFF_PROMISC);
1160 	bool allmulti_enabled = rx_mode_enable && (ndev_flags & IFF_ALLMULTI);
1161 	bool broadcast_enabled = rx_mode_enable;
1162 
1163 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1164 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1165 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1166 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1167 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1168 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1169 
1170 	/* update broadcast address */
1171 	ether_addr_copy(priv->eth_addr.broadcast.addr,
1172 	    if_getbroadcastaddr(priv->ifp));
1173 
1174 	if (enable_promisc) {
1175 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1176 		if (!priv->vlan.filter_disabled)
1177 			mlx5e_add_any_vid_rules(priv);
1178 	}
1179 	if (enable_allmulti)
1180 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1181 	if (enable_broadcast)
1182 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1183 
1184 	mlx5e_handle_ifp_addr(priv, rx_mode_enable);
1185 
1186 	if (disable_broadcast)
1187 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1188 	if (disable_allmulti)
1189 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1190 	if (disable_promisc) {
1191 		if (!priv->vlan.filter_disabled)
1192 			mlx5e_del_any_vid_rules(priv);
1193 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1194 	}
1195 
1196 	ea->promisc_enabled = promisc_enabled;
1197 	ea->allmulti_enabled = allmulti_enabled;
1198 	ea->broadcast_enabled = broadcast_enabled;
1199 
1200 	mlx5e_vport_context_update(priv);
1201 }
1202 
1203 void
mlx5e_set_rx_mode_work(struct work_struct * work)1204 mlx5e_set_rx_mode_work(struct work_struct *work)
1205 {
1206 	struct mlx5e_priv *priv =
1207 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
1208 
1209 	PRIV_LOCK(priv);
1210 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1211 		mlx5e_set_rx_mode_core(priv, true);
1212 	PRIV_UNLOCK(priv);
1213 }
1214 
1215 static void
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)1216 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1217 {
1218 	int i;
1219 
1220 	for (i = ft->num_groups - 1; i >= 0; i--) {
1221 		if (!IS_ERR_OR_NULL(ft->g[i]))
1222 			mlx5_destroy_flow_group(ft->g[i]);
1223 		ft->g[i] = NULL;
1224 	}
1225 	ft->num_groups = 0;
1226 }
1227 
1228 static void
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)1229 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1230 {
1231 	mlx5e_destroy_groups(ft);
1232 	kfree(ft->g);
1233 	mlx5_destroy_flow_table(ft->t);
1234 	ft->t = NULL;
1235 }
1236 
1237 #define MLX5E_NUM_MAIN_GROUPS	10
1238 #define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
1239 #define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
1240 #define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
1241 #define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
1242 #define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
1243 #define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
1244 #define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
1245 #define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
1246 #define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1247 #define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1248 #define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1249 				 MLX5E_MAIN_GROUP1_SIZE +\
1250 				 MLX5E_MAIN_GROUP2_SIZE +\
1251 				 MLX5E_MAIN_GROUP3_SIZE +\
1252 				 MLX5E_MAIN_GROUP4_SIZE +\
1253 				 MLX5E_MAIN_GROUP5_SIZE +\
1254 				 MLX5E_MAIN_GROUP6_SIZE +\
1255 				 MLX5E_MAIN_GROUP7_SIZE +\
1256 				 MLX5E_MAIN_GROUP8_SIZE +\
1257 				 MLX5E_MAIN_GROUP9_SIZE +\
1258 				 0)
1259 
1260 static int
mlx5e_create_main_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)1261 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1262 				      int inlen)
1263 {
1264 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1265 	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1266 				match_criteria.outer_headers.dmac_47_16);
1267 	int err;
1268 	int ix = 0;
1269 
1270 	/* Tunnel rules need to be first in this list of groups */
1271 
1272 	/* Start tunnel rules */
1273 	memset(in, 0, inlen);
1274 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1275 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1276 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1277 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1278 	MLX5_SET_CFG(in, start_flow_index, ix);
1279 	ix += MLX5E_MAIN_GROUP0_SIZE;
1280 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1281 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1282 	if (IS_ERR(ft->g[ft->num_groups]))
1283 		goto err_destory_groups;
1284 	ft->num_groups++;
1285 	/* End Tunnel Rules */
1286 
1287 	memset(in, 0, inlen);
1288 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1289 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1290 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1291 	MLX5_SET_CFG(in, start_flow_index, ix);
1292 	ix += MLX5E_MAIN_GROUP1_SIZE;
1293 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1294 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1295 	if (IS_ERR(ft->g[ft->num_groups]))
1296 		goto err_destory_groups;
1297 	ft->num_groups++;
1298 
1299 	memset(in, 0, inlen);
1300 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1301 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1302 	MLX5_SET_CFG(in, start_flow_index, ix);
1303 	ix += MLX5E_MAIN_GROUP2_SIZE;
1304 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1305 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1306 	if (IS_ERR(ft->g[ft->num_groups]))
1307 		goto err_destory_groups;
1308 	ft->num_groups++;
1309 
1310 	memset(in, 0, inlen);
1311 	MLX5_SET_CFG(in, start_flow_index, ix);
1312 	ix += MLX5E_MAIN_GROUP3_SIZE;
1313 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1314 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1315 	if (IS_ERR(ft->g[ft->num_groups]))
1316 		goto err_destory_groups;
1317 	ft->num_groups++;
1318 
1319 	memset(in, 0, inlen);
1320 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1321 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1322 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1323 	memset(dmac, 0xff, ETH_ALEN);
1324 	MLX5_SET_CFG(in, start_flow_index, ix);
1325 	ix += MLX5E_MAIN_GROUP4_SIZE;
1326 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1327 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1328 	if (IS_ERR(ft->g[ft->num_groups]))
1329 		goto err_destory_groups;
1330 	ft->num_groups++;
1331 
1332 	memset(in, 0, inlen);
1333 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1334 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1335 	memset(dmac, 0xff, ETH_ALEN);
1336 	MLX5_SET_CFG(in, start_flow_index, ix);
1337 	ix += MLX5E_MAIN_GROUP5_SIZE;
1338 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1339 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1340 	if (IS_ERR(ft->g[ft->num_groups]))
1341 		goto err_destory_groups;
1342 	ft->num_groups++;
1343 
1344 	memset(in, 0, inlen);
1345 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1346 	memset(dmac, 0xff, ETH_ALEN);
1347 	MLX5_SET_CFG(in, start_flow_index, ix);
1348 	ix += MLX5E_MAIN_GROUP6_SIZE;
1349 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1350 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1351 	if (IS_ERR(ft->g[ft->num_groups]))
1352 		goto err_destory_groups;
1353 	ft->num_groups++;
1354 
1355 	memset(in, 0, inlen);
1356 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1357 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1358 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1359 	dmac[0] = 0x01;
1360 	MLX5_SET_CFG(in, start_flow_index, ix);
1361 	ix += MLX5E_MAIN_GROUP7_SIZE;
1362 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1363 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1364 	if (IS_ERR(ft->g[ft->num_groups]))
1365 		goto err_destory_groups;
1366 	ft->num_groups++;
1367 
1368 	memset(in, 0, inlen);
1369 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1370 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1371 	dmac[0] = 0x01;
1372 	MLX5_SET_CFG(in, start_flow_index, ix);
1373 	ix += MLX5E_MAIN_GROUP8_SIZE;
1374 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1375 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1376 	if (IS_ERR(ft->g[ft->num_groups]))
1377 		goto err_destory_groups;
1378 	ft->num_groups++;
1379 
1380 	memset(in, 0, inlen);
1381 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1382 	dmac[0] = 0x01;
1383 	MLX5_SET_CFG(in, start_flow_index, ix);
1384 	ix += MLX5E_MAIN_GROUP9_SIZE;
1385 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1386 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1387 	if (IS_ERR(ft->g[ft->num_groups]))
1388 		goto err_destory_groups;
1389 	ft->num_groups++;
1390 
1391 	return (0);
1392 
1393 err_destory_groups:
1394 	err = PTR_ERR(ft->g[ft->num_groups]);
1395 	ft->g[ft->num_groups] = NULL;
1396 	mlx5e_destroy_groups(ft);
1397 
1398 	return (err);
1399 }
1400 
1401 static int
mlx5e_create_main_groups(struct mlx5e_flow_table * ft)1402 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1403 {
1404 	u32 *in;
1405 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1406 	int err;
1407 
1408 	in = mlx5_vzalloc(inlen);
1409 	if (!in)
1410 		return (-ENOMEM);
1411 
1412 	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1413 
1414 	kvfree(in);
1415 	return (err);
1416 }
1417 
1418 #define MLX5E_MAIN_VXLAN_GROUP0_SIZE	BIT(3)
1419 #define MLX5E_MAIN_VXLAN_GROUP1_SIZE	BIT(3)
1420 #define MLX5E_MAIN_VXLAN_GROUP2_SIZE	BIT(0)
1421 static int
mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)1422 mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1423     int inlen)
1424 {
1425 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1426 	int err;
1427 	int ix = 0;
1428 
1429 	memset(in, 0, inlen);
1430 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1431 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1432 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1433 	MLX5_SET_CFG(in, start_flow_index, ix);
1434 	ix += MLX5E_MAIN_VXLAN_GROUP0_SIZE;
1435 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1436 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1437 	if (IS_ERR(ft->g[ft->num_groups]))
1438 		goto err_destory_groups;
1439 	ft->num_groups++;
1440 
1441 	memset(in, 0, inlen);
1442 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1443 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1444 	MLX5_SET_CFG(in, start_flow_index, ix);
1445 	ix += MLX5E_MAIN_VXLAN_GROUP1_SIZE;
1446 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1447 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1448 	if (IS_ERR(ft->g[ft->num_groups]))
1449 		goto err_destory_groups;
1450 	ft->num_groups++;
1451 
1452 	memset(in, 0, inlen);
1453 	MLX5_SET_CFG(in, start_flow_index, ix);
1454 	ix += MLX5E_MAIN_VXLAN_GROUP2_SIZE;
1455 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1456 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1457 	if (IS_ERR(ft->g[ft->num_groups]))
1458 		goto err_destory_groups;
1459 	ft->num_groups++;
1460 
1461 	return (0);
1462 
1463 err_destory_groups:
1464 	err = PTR_ERR(ft->g[ft->num_groups]);
1465 	ft->g[ft->num_groups] = NULL;
1466 	mlx5e_destroy_groups(ft);
1467 
1468 	return (err);
1469 }
1470 
1471 static int
mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table * ft)1472 mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table *ft)
1473 {
1474 	u32 *in;
1475 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1476 	int err;
1477 
1478 	in = mlx5_vzalloc(inlen);
1479 	if (!in)
1480 		return (-ENOMEM);
1481 
1482 	err = mlx5e_create_main_vxlan_groups_sub(ft, in, inlen);
1483 
1484 	kvfree(in);
1485 	return (err);
1486 }
1487 
1488 
1489 static int
mlx5e_create_main_flow_table(struct mlx5e_priv * priv,bool inner_vxlan)1490 mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
1491 {
1492 	struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
1493 	    &priv->fts.main;
1494 	struct mlx5_flow_table_attr ft_attr = {};
1495 	int err;
1496 
1497 	ft->num_groups = 0;
1498 	ft_attr.max_fte = MLX5E_MAIN_TABLE_SIZE;
1499 	if (priv->ipsec)
1500 		ft_attr.level = inner_vxlan ? 10 : 12;
1501 	else
1502 		ft_attr.level = inner_vxlan ? 2 : 4;
1503 	ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
1504 
1505 	if (IS_ERR(ft->t)) {
1506 		err = PTR_ERR(ft->t);
1507 		ft->t = NULL;
1508 		return (err);
1509 	}
1510 	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1511 	if (!ft->g) {
1512 		err = -ENOMEM;
1513 		goto err_destroy_main_flow_table;
1514 	}
1515 
1516 	err = inner_vxlan ? mlx5e_create_main_vxlan_groups(ft) :
1517 	    mlx5e_create_main_groups(ft);
1518 	if (err)
1519 		goto err_free_g;
1520 	return (0);
1521 
1522 err_free_g:
1523 	kfree(ft->g);
1524 
1525 err_destroy_main_flow_table:
1526 	mlx5_destroy_flow_table(ft->t);
1527 	ft->t = NULL;
1528 
1529 	return (err);
1530 }
1531 
mlx5e_destroy_main_flow_table(struct mlx5e_priv * priv)1532 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1533 {
1534 	mlx5e_destroy_flow_table(&priv->fts.main);
1535 }
1536 
mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv * priv)1537 static void mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv *priv)
1538 {
1539 	mlx5e_destroy_flow_table(&priv->fts.main_vxlan);
1540 }
1541 
1542 #define MLX5E_NUM_VLAN_GROUPS	3
1543 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1544 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1545 #define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1546 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1547 				 MLX5E_VLAN_GROUP1_SIZE +\
1548 				 MLX5E_VLAN_GROUP2_SIZE +\
1549 				 0)
1550 
1551 static int
mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)1552 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1553 				      int inlen)
1554 {
1555 	int err;
1556 	int ix = 0;
1557 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1558 
1559 	memset(in, 0, inlen);
1560 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1561 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1562 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1563 	MLX5_SET_CFG(in, start_flow_index, ix);
1564 	ix += MLX5E_VLAN_GROUP0_SIZE;
1565 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1566 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1567 	if (IS_ERR(ft->g[ft->num_groups]))
1568 		goto err_destory_groups;
1569 	ft->num_groups++;
1570 
1571 	memset(in, 0, inlen);
1572 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1573 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1574 	MLX5_SET_CFG(in, start_flow_index, ix);
1575 	ix += MLX5E_VLAN_GROUP1_SIZE;
1576 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1577 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1578 	if (IS_ERR(ft->g[ft->num_groups]))
1579 		goto err_destory_groups;
1580 	ft->num_groups++;
1581 
1582 	memset(in, 0, inlen);
1583 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1584 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1585 	MLX5_SET_CFG(in, start_flow_index, ix);
1586 	ix += MLX5E_VLAN_GROUP2_SIZE;
1587 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1588 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1589 	if (IS_ERR(ft->g[ft->num_groups]))
1590 		goto err_destory_groups;
1591 	ft->num_groups++;
1592 
1593 	return (0);
1594 
1595 err_destory_groups:
1596 	err = PTR_ERR(ft->g[ft->num_groups]);
1597 	ft->g[ft->num_groups] = NULL;
1598 	mlx5e_destroy_groups(ft);
1599 
1600 	return (err);
1601 }
1602 
1603 static int
mlx5e_create_vlan_groups(struct mlx5e_flow_table * ft)1604 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1605 {
1606 	u32 *in;
1607 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1608 	int err;
1609 
1610 	in = mlx5_vzalloc(inlen);
1611 	if (!in)
1612 		return (-ENOMEM);
1613 
1614 	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1615 
1616 	kvfree(in);
1617 	return (err);
1618 }
1619 
1620 static int
mlx5e_create_vlan_flow_table(struct mlx5e_priv * priv)1621 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1622 {
1623 	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1624 	struct mlx5_flow_table_attr ft_attr = {};
1625 	int err;
1626 
1627 	ft->num_groups = 0;
1628 	ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1629 	ft_attr.level = (priv->ipsec) ? 8 : 0;
1630 	ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
1631 
1632 	if (IS_ERR(ft->t)) {
1633 		err = PTR_ERR(ft->t);
1634 		ft->t = NULL;
1635 		return (err);
1636 	}
1637 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1638 	if (!ft->g) {
1639 		err = -ENOMEM;
1640 		goto err_destroy_vlan_flow_table;
1641 	}
1642 
1643 	err = mlx5e_create_vlan_groups(ft);
1644 	if (err)
1645 		goto err_free_g;
1646 
1647 	return (0);
1648 
1649 err_free_g:
1650 	kfree(ft->g);
1651 
1652 err_destroy_vlan_flow_table:
1653 	mlx5_destroy_flow_table(ft->t);
1654 	ft->t = NULL;
1655 
1656 	return (err);
1657 }
1658 
1659 static void
mlx5e_destroy_vlan_flow_table(struct mlx5e_priv * priv)1660 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1661 {
1662 	mlx5e_destroy_flow_table(&priv->fts.vlan);
1663 }
1664 
1665 static int
mlx5e_add_vxlan_rule_sub(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec,struct mlx5e_vxlan_db_el * el)1666 mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
1667     struct mlx5e_vxlan_db_el *el)
1668 {
1669 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1670 	struct mlx5_flow_destination dest = {};
1671 	struct mlx5_flow_handle **rule_p;
1672 	int err = 0;
1673 	struct mlx5_flow_act flow_act = {
1674 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
1675 	};
1676 	u8 *mc;
1677 	u8 *mv;
1678 
1679 	mv = (u8 *)spec->match_value;
1680 	mc = (u8 *)spec->match_criteria;
1681 
1682 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
1683 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
1684 
1685 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1686 	dest.ft = priv->fts.main_vxlan.t;
1687 
1688 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1689 	rule_p = &el->vxlan_ft_rule;
1690 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1691 	MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
1692 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1693 	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
1694 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1695 	MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
1696 
1697 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1698 
1699 	if (IS_ERR(*rule_p)) {
1700 		err = PTR_ERR(*rule_p);
1701 		*rule_p = NULL;
1702 		mlx5_en_err(priv->ifp, "add rule failed\n");
1703 	}
1704 
1705 	return (err);
1706 }
1707 
1708 static struct mlx5e_vxlan_db_el *
mlx5e_vxlan_find_db_el(struct mlx5e_priv * priv,u_int proto,u_int port)1709 mlx5e_vxlan_find_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1710 {
1711 	struct mlx5e_vxlan_db_el *el;
1712 
1713 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1714 		if (el->proto == proto && el->port == port)
1715 			return (el);
1716 	}
1717 	return (NULL);
1718 }
1719 
1720 static struct mlx5e_vxlan_db_el *
mlx5e_vxlan_alloc_db_el(struct mlx5e_priv * priv,u_int proto,u_int port)1721 mlx5e_vxlan_alloc_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1722 {
1723 	struct mlx5e_vxlan_db_el *el;
1724 
1725 	el = mlx5_vzalloc(sizeof(*el));
1726 	el->refcount = 1;
1727 	el->proto = proto;
1728 	el->port = port;
1729 	el->vxlan_ft_rule = NULL;
1730 	return (el);
1731 }
1732 
1733 static int
mlx5e_vxlan_family_to_proto(sa_family_t family,u_int * proto)1734 mlx5e_vxlan_family_to_proto(sa_family_t family, u_int *proto)
1735 {
1736 	switch (family) {
1737 	case AF_INET:
1738 		*proto = ETHERTYPE_IP;
1739 		return (0);
1740 	case AF_INET6:
1741 		*proto = ETHERTYPE_IPV6;
1742 		return (0);
1743 	default:
1744 		return (-EINVAL);
1745 	}
1746 }
1747 
1748 static int
mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv * priv,struct mlx5e_vxlan_db_el * el)1749 mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
1750     struct mlx5e_vxlan_db_el *el)
1751 {
1752 	struct mlx5_flow_spec *spec;
1753 	int err;
1754 
1755 	spec = mlx5_vzalloc(sizeof(*spec));
1756 	if (!spec) {
1757 		mlx5_en_err(priv->ifp, "alloc failed\n");
1758 		err = -ENOMEM;
1759 		goto add_vxlan_rule_out;
1760 	}
1761 
1762 	err = mlx5e_add_vxlan_rule_sub(priv, spec, el);
1763 
1764 add_vxlan_rule_out:
1765 	kvfree(spec);
1766 
1767 	return (err);
1768 }
1769 
1770 static int
mlx5e_add_vxlan_rule(struct mlx5e_priv * priv,sa_family_t family,u_int port)1771 mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1772 {
1773 	struct mlx5e_vxlan_db_el *el;
1774 	u_int proto;
1775 	int err;
1776 
1777 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1778 	if (err != 0)
1779 		return (err);
1780 
1781 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1782 	if (el != NULL) {
1783 		el->refcount++;
1784 		if (el->installed)
1785 			return (0);
1786 	}
1787 	el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
1788 
1789 	if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
1790 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1791 		if (err == 0)
1792 			el->installed = true;
1793 	}
1794 	if (err == 0)
1795 		TAILQ_INSERT_TAIL(&priv->vxlan.head, el, link);
1796 	else
1797 		kvfree(el);
1798 
1799 	return (err);
1800 }
1801 
1802 static int
mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv * priv,struct mlx5_flow_spec * spec)1803 mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv,
1804 				  struct mlx5_flow_spec *spec)
1805 {
1806 	struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1807 	struct mlx5_flow_destination dest = {};
1808 	struct mlx5_flow_handle **rule_p;
1809 	int err = 0;
1810 	struct mlx5_flow_act flow_act = {
1811 		.action = MLX5_FLOW_RULE_FWD_ACTION_DEST,
1812 	};
1813 
1814 	spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
1815 	spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
1816 
1817 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1818 	dest.ft = priv->fts.main.t;
1819 
1820 	rule_p = &priv->fts.vxlan_catchall_ft_rule;
1821 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1822 
1823 	if (IS_ERR(*rule_p)) {
1824 		err = PTR_ERR(*rule_p);
1825 		*rule_p = NULL;
1826 		mlx5_en_err(priv->ifp, "add rule failed\n");
1827 	}
1828 
1829 	return (err);
1830 }
1831 
1832 
1833 static int
mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv * priv)1834 mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
1835 {
1836 	struct mlx5_flow_spec *spec;
1837 	int err;
1838 
1839 	spec = mlx5_vzalloc(sizeof(*spec));
1840 	if (!spec) {
1841 		mlx5_en_err(priv->ifp, "alloc failed\n");
1842 		err = -ENOMEM;
1843 		goto add_vxlan_rule_out;
1844 	}
1845 
1846 	err = mlx5e_add_vxlan_catchall_rule_sub(priv, spec);
1847 
1848 add_vxlan_rule_out:
1849 	kvfree(spec);
1850 
1851 	return (err);
1852 }
1853 
1854 int
mlx5e_add_all_vxlan_rules(struct mlx5e_priv * priv)1855 mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv)
1856 {
1857 	struct mlx5e_vxlan_db_el *el;
1858 	int err;
1859 
1860 	err = 0;
1861 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1862 		if (el->installed)
1863 			continue;
1864 		err = mlx5e_add_vxlan_rule_from_db(priv, el);
1865 		if (err != 0)
1866 			break;
1867 		el->installed = true;
1868 	}
1869 
1870 	return (err);
1871 }
1872 
1873 static int
mlx5e_del_vxlan_rule(struct mlx5e_priv * priv,sa_family_t family,u_int port)1874 mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1875 {
1876 	struct mlx5e_vxlan_db_el *el;
1877 	u_int proto;
1878 	int err;
1879 
1880 	err = mlx5e_vxlan_family_to_proto(family, &proto);
1881 	if (err != 0)
1882 		return (err);
1883 
1884 	el = mlx5e_vxlan_find_db_el(priv, proto, port);
1885 	if (el == NULL)
1886 		return (0);
1887 	if (el->refcount > 1) {
1888 		el->refcount--;
1889 		return (0);
1890 	}
1891 
1892 	if (el->installed)
1893 		mlx5_del_flow_rules(&el->vxlan_ft_rule);
1894 	TAILQ_REMOVE(&priv->vxlan.head, el, link);
1895 	kvfree(el);
1896 	return (0);
1897 }
1898 
1899 void
mlx5e_del_all_vxlan_rules(struct mlx5e_priv * priv)1900 mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
1901 {
1902 	struct mlx5e_vxlan_db_el *el;
1903 
1904 	TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1905 		if (!el->installed)
1906 			continue;
1907 		mlx5_del_flow_rules(&el->vxlan_ft_rule);
1908 		el->installed = false;
1909 	}
1910 }
1911 
1912 static void
mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv * priv)1913 mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
1914 {
1915 	mlx5_del_flow_rules(&priv->fts.vxlan_catchall_ft_rule);
1916 }
1917 
1918 void
mlx5e_vxlan_start(void * arg,if_t ifp __unused,sa_family_t family,u_int port)1919 mlx5e_vxlan_start(void *arg, if_t ifp __unused, sa_family_t family,
1920     u_int port)
1921 {
1922 	struct mlx5e_priv *priv = arg;
1923 	int err;
1924 
1925 	PRIV_LOCK(priv);
1926 	err = mlx5_vxlan_udp_port_add(priv->mdev, port);
1927 	if (err == 0 && test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1928 		mlx5e_add_vxlan_rule(priv, family, port);
1929 	PRIV_UNLOCK(priv);
1930 }
1931 
1932 void
mlx5e_vxlan_stop(void * arg,if_t ifp __unused,sa_family_t family,u_int port)1933 mlx5e_vxlan_stop(void *arg, if_t ifp __unused, sa_family_t family,
1934     u_int port)
1935 {
1936 	struct mlx5e_priv *priv = arg;
1937 
1938 	PRIV_LOCK(priv);
1939 	if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1940 		mlx5e_del_vxlan_rule(priv, family, port);
1941 	(void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
1942 	PRIV_UNLOCK(priv);
1943 }
1944 
1945 #define	MLX5E_VXLAN_GROUP0_SIZE	BIT(3)	/* XXXKIB */
1946 #define	MLX5E_VXLAN_GROUP1_SIZE	BIT(0)
1947 #define	MLX5E_NUM_VXLAN_GROUPS	BIT(1)
1948 #define	MLX5E_VXLAN_TABLE_SIZE	\
1949     (MLX5E_VXLAN_GROUP0_SIZE + MLX5E_VXLAN_GROUP1_SIZE)
1950 
1951 static int
mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)1952 mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1953 				      int inlen)
1954 {
1955 	int err;
1956 	int ix = 0;
1957 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1958 
1959 	memset(in, 0, inlen);
1960 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1961 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1962 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1963 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1964 	MLX5_SET_CFG(in, start_flow_index, ix);
1965 	ix += MLX5E_VXLAN_GROUP0_SIZE;
1966 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1967 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1968 	if (IS_ERR(ft->g[ft->num_groups]))
1969 		goto err_destory_groups;
1970 	ft->num_groups++;
1971 
1972 	memset(in, 0, inlen);
1973 	MLX5_SET_CFG(in, start_flow_index, ix);
1974 	ix += MLX5E_VXLAN_GROUP1_SIZE;
1975 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1976 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1977 	if (IS_ERR(ft->g[ft->num_groups]))
1978 		goto err_destory_groups;
1979 	ft->num_groups++;
1980 
1981 	return (0);
1982 
1983 err_destory_groups:
1984 	err = PTR_ERR(ft->g[ft->num_groups]);
1985 	ft->g[ft->num_groups] = NULL;
1986 	mlx5e_destroy_groups(ft);
1987 
1988 	return (err);
1989 }
1990 
1991 static int
mlx5e_create_vxlan_groups(struct mlx5e_flow_table * ft)1992 mlx5e_create_vxlan_groups(struct mlx5e_flow_table *ft)
1993 {
1994 	u32 *in;
1995 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1996 	int err;
1997 
1998 	in = mlx5_vzalloc(inlen);
1999 	if (!in)
2000 		return (-ENOMEM);
2001 
2002 	err = mlx5e_create_vxlan_groups_sub(ft, in, inlen);
2003 
2004 	kvfree(in);
2005 	return (err);
2006 }
2007 
2008 static int
mlx5e_create_vxlan_flow_table(struct mlx5e_priv * priv)2009 mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
2010 {
2011 	struct mlx5e_flow_table *ft = &priv->fts.vxlan;
2012 	struct mlx5_flow_table_attr ft_attr = {};
2013 	int err;
2014 
2015 	ft->num_groups = 0;
2016 	ft_attr.max_fte = MLX5E_VXLAN_TABLE_SIZE;
2017 	ft_attr.level = (priv->ipsec) ? 9 : 1;
2018 	ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
2019 
2020 	if (IS_ERR(ft->t)) {
2021 		err = PTR_ERR(ft->t);
2022 		ft->t = NULL;
2023 		return (err);
2024 	}
2025 	ft->g = kcalloc(MLX5E_NUM_VXLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
2026 	if (!ft->g) {
2027 		err = -ENOMEM;
2028 		goto err_destroy_vxlan_flow_table;
2029 	}
2030 
2031 	err = mlx5e_create_vxlan_groups(ft);
2032 	if (err)
2033 		goto err_free_g;
2034 
2035 	TAILQ_INIT(&priv->vxlan.head);
2036 	return (0);
2037 
2038 err_free_g:
2039 	kfree(ft->g);
2040 
2041 err_destroy_vxlan_flow_table:
2042 	mlx5_destroy_flow_table(ft->t);
2043 	ft->t = NULL;
2044 
2045 	return (err);
2046 }
2047 
2048 #define MLX5E_NUM_INNER_RSS_GROUPS	3
2049 #define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
2050 #define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
2051 #define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
2052 #define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
2053 					 MLX5E_INNER_RSS_GROUP1_SIZE +\
2054 					 MLX5E_INNER_RSS_GROUP2_SIZE +\
2055 					 0)
2056 
2057 static int
mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table * ft,u32 * in,int inlen)2058 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2059 					   int inlen)
2060 {
2061 	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2062 	int err;
2063 	int ix = 0;
2064 
2065 	memset(in, 0, inlen);
2066 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2067 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2068 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
2069 	MLX5_SET_CFG(in, start_flow_index, ix);
2070 	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
2071 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2072 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2073 	if (IS_ERR(ft->g[ft->num_groups]))
2074 		goto err_destory_groups;
2075 	ft->num_groups++;
2076 
2077 	memset(in, 0, inlen);
2078 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2079 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2080 	MLX5_SET_CFG(in, start_flow_index, ix);
2081 	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
2082 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2083 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2084 	if (IS_ERR(ft->g[ft->num_groups]))
2085 		goto err_destory_groups;
2086 	ft->num_groups++;
2087 
2088 	memset(in, 0, inlen);
2089 	MLX5_SET_CFG(in, start_flow_index, ix);
2090 	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
2091 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
2092 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2093 	if (IS_ERR(ft->g[ft->num_groups]))
2094 		goto err_destory_groups;
2095 	ft->num_groups++;
2096 
2097 	return (0);
2098 
2099 err_destory_groups:
2100 	err = PTR_ERR(ft->g[ft->num_groups]);
2101 	ft->g[ft->num_groups] = NULL;
2102 	mlx5e_destroy_groups(ft);
2103 
2104 	return (err);
2105 }
2106 
2107 static int
mlx5e_create_inner_rss_groups(struct mlx5e_flow_table * ft)2108 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
2109 {
2110 	u32 *in;
2111 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2112 	int err;
2113 
2114 	in = mlx5_vzalloc(inlen);
2115 	if (!in)
2116 		return (-ENOMEM);
2117 
2118 	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
2119 
2120 	kvfree(in);
2121 	return (err);
2122 }
2123 
2124 static int
mlx5e_create_inner_rss_flow_table(struct mlx5e_priv * priv)2125 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
2126 {
2127 	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
2128 	struct mlx5_flow_table_attr ft_attr = {};
2129 	int err;
2130 
2131 	ft->num_groups = 0;
2132 	ft_attr.max_fte = MLX5E_INNER_RSS_TABLE_SIZE;
2133 	ft_attr.level = (priv->ipsec) ? 11 : 3;
2134 	ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
2135 
2136 	if (IS_ERR(ft->t)) {
2137 		err = PTR_ERR(ft->t);
2138 		ft->t = NULL;
2139 		return (err);
2140 	}
2141 	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
2142 			GFP_KERNEL);
2143 	if (!ft->g) {
2144 		err = -ENOMEM;
2145 		goto err_destroy_inner_rss_flow_table;
2146 	}
2147 
2148 	err = mlx5e_create_inner_rss_groups(ft);
2149 	if (err)
2150 		goto err_free_g;
2151 
2152 	return (0);
2153 
2154 err_free_g:
2155 	kfree(ft->g);
2156 
2157 err_destroy_inner_rss_flow_table:
2158 	mlx5_destroy_flow_table(ft->t);
2159 	ft->t = NULL;
2160 
2161 	return (err);
2162 }
2163 
mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv * priv)2164 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
2165 {
2166 	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
2167 }
2168 
2169 static void
mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv * priv)2170 mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
2171 {
2172 	mlx5e_destroy_flow_table(&priv->fts.vxlan);
2173 }
2174 
2175 int
mlx5e_open_flow_tables(struct mlx5e_priv * priv)2176 mlx5e_open_flow_tables(struct mlx5e_priv *priv)
2177 {
2178 	int err;
2179 
2180 	/* setup namespace pointer */
2181 	priv->fts.ns = mlx5_get_flow_namespace(
2182 	    priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2183 
2184 	err = mlx5e_accel_ipsec_fs_rx_tables_create(priv);
2185 	if (err)
2186 		return err;
2187 
2188 	err = mlx5e_create_vlan_flow_table(priv);
2189 	if (err)
2190 		goto err_destroy_ipsec_flow_table;
2191 
2192 	err = mlx5e_create_vxlan_flow_table(priv);
2193 	if (err)
2194 		goto err_destroy_vlan_flow_table;
2195 
2196 	err = mlx5e_create_main_flow_table(priv, true);
2197 	if (err)
2198 		goto err_destroy_vxlan_flow_table;
2199 
2200 	err = mlx5e_create_inner_rss_flow_table(priv);
2201 	if (err)
2202 		goto err_destroy_main_flow_table_true;
2203 
2204 	err = mlx5e_create_main_flow_table(priv, false);
2205 	if (err)
2206 		goto err_destroy_inner_rss_flow_table;
2207 
2208 	err = mlx5e_add_vxlan_catchall_rule(priv);
2209 	if (err)
2210 		goto err_destroy_main_flow_table_false;
2211 
2212 	err = mlx5e_accel_ipsec_fs_rx_catchall_rules(priv);
2213 	if (err)
2214 		goto err_destroy_vxlan_catchall_rule;
2215 
2216 	err = mlx5e_accel_fs_tcp_create(priv);
2217 	if (err)
2218 		goto err_destroy_ipsec_catchall_rules;
2219 
2220 	return (0);
2221 
2222 err_destroy_ipsec_catchall_rules:
2223 	mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(priv);
2224 err_destroy_vxlan_catchall_rule:
2225 	mlx5e_del_vxlan_catchall_rule(priv);
2226 err_destroy_main_flow_table_false:
2227 	mlx5e_destroy_main_flow_table(priv);
2228 err_destroy_inner_rss_flow_table:
2229 	mlx5e_destroy_inner_rss_flow_table(priv);
2230 err_destroy_main_flow_table_true:
2231 	mlx5e_destroy_main_vxlan_flow_table(priv);
2232 err_destroy_vxlan_flow_table:
2233 	mlx5e_destroy_vxlan_flow_table(priv);
2234 err_destroy_vlan_flow_table:
2235 	mlx5e_destroy_vlan_flow_table(priv);
2236 err_destroy_ipsec_flow_table:
2237 	mlx5e_accel_ipsec_fs_rx_tables_destroy(priv);
2238 
2239 	return (err);
2240 }
2241 
2242 void
mlx5e_close_flow_tables(struct mlx5e_priv * priv)2243 mlx5e_close_flow_tables(struct mlx5e_priv *priv)
2244 {
2245 	mlx5e_accel_fs_tcp_destroy(priv);
2246 	mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(priv);
2247 	mlx5e_del_vxlan_catchall_rule(priv);
2248 	mlx5e_destroy_main_flow_table(priv);
2249 	mlx5e_destroy_inner_rss_flow_table(priv);
2250 	mlx5e_destroy_main_vxlan_flow_table(priv);
2251 	mlx5e_destroy_vxlan_flow_table(priv);
2252 	mlx5e_destroy_vlan_flow_table(priv);
2253 	mlx5e_accel_ipsec_fs_rx_tables_destroy(priv);
2254 }
2255 
2256 int
mlx5e_open_flow_rules(struct mlx5e_priv * priv)2257 mlx5e_open_flow_rules(struct mlx5e_priv *priv)
2258 {
2259 	int err;
2260 
2261 	err = mlx5e_add_all_vlan_rules(priv);
2262 	if (err)
2263 		return (err);
2264 
2265 	err = mlx5e_add_main_vxlan_rules(priv);
2266 	if (err)
2267 		goto err_del_all_vlan_rules;
2268 
2269 	err = mlx5e_add_all_vxlan_rules(priv);
2270 	if (err)
2271 		goto err_del_main_vxlan_rules;
2272 
2273 	mlx5e_set_rx_mode_core(priv, true);
2274 
2275 	set_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2276 
2277 	return (0);
2278 
2279 err_del_main_vxlan_rules:
2280 	mlx5e_del_main_vxlan_rules(priv);
2281 
2282 err_del_all_vlan_rules:
2283 	mlx5e_del_all_vlan_rules(priv);
2284 
2285 	return (err);
2286 }
2287 
2288 void
mlx5e_close_flow_rules(struct mlx5e_priv * priv)2289 mlx5e_close_flow_rules(struct mlx5e_priv *priv)
2290 {
2291 	clear_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2292 
2293 	mlx5e_set_rx_mode_core(priv, false);
2294 	mlx5e_del_all_vxlan_rules(priv);
2295 	mlx5e_del_main_vxlan_rules(priv);
2296 	mlx5e_del_all_vlan_rules(priv);
2297 }
2298