xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 2830819497fb2deae3dd71574592ace55f2fbdba)
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 
30 #include <linux/list.h>
31 #include <dev/mlx5/flow_table.h>
32 
33 enum {
34 	MLX5E_FULLMATCH = 0,
35 	MLX5E_ALLMULTI = 1,
36 	MLX5E_PROMISC = 2,
37 };
38 
39 enum {
40 	MLX5E_UC = 0,
41 	MLX5E_MC_IPV4 = 1,
42 	MLX5E_MC_IPV6 = 2,
43 	MLX5E_MC_OTHER = 3,
44 };
45 
46 enum {
47 	MLX5E_ACTION_NONE = 0,
48 	MLX5E_ACTION_ADD = 1,
49 	MLX5E_ACTION_DEL = 2,
50 };
51 
52 struct mlx5e_eth_addr_hash_node {
53 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
54 	u8	action;
55 	struct mlx5e_eth_addr_info ai;
56 };
57 
58 static inline int
59 mlx5e_hash_eth_addr(const u8 * addr)
60 {
61 	return (addr[5]);
62 }
63 
64 static void
65 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
66     const u8 * addr)
67 {
68 	struct mlx5e_eth_addr_hash_node *hn;
69 	int ix = mlx5e_hash_eth_addr(addr);
70 
71 	LIST_FOREACH(hn, &hash[ix], hlist) {
72 		if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
73 			if (hn->action == MLX5E_ACTION_DEL)
74 				hn->action = MLX5E_ACTION_NONE;
75 			return;
76 		}
77 	}
78 
79 	hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
80 	if (hn == NULL)
81 		return;
82 
83 	ether_addr_copy(hn->ai.addr, addr);
84 	hn->action = MLX5E_ACTION_ADD;
85 
86 	LIST_INSERT_HEAD(&hash[ix], hn, hlist);
87 }
88 
89 static void
90 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
91 {
92 	LIST_REMOVE(hn, hlist);
93 	free(hn, M_MLX5EN);
94 }
95 
96 static void
97 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
98     struct mlx5e_eth_addr_info *ai)
99 {
100 	void *ft = priv->ft.main;
101 
102 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
103 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
104 
105 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
106 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
107 
108 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
109 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
110 
111 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
112 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
113 
114 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
115 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
116 
117 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
118 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
119 
120 	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
121 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
122 }
123 
124 static int
125 mlx5e_get_eth_addr_type(const u8 * addr)
126 {
127 	if (ETHER_IS_MULTICAST(addr) == 0)
128 		return (MLX5E_UC);
129 
130 	if ((addr[0] == 0x01) &&
131 	    (addr[1] == 0x00) &&
132 	    (addr[2] == 0x5e) &&
133 	    !(addr[3] & 0x80))
134 		return (MLX5E_MC_IPV4);
135 
136 	if ((addr[0] == 0x33) &&
137 	    (addr[1] == 0x33))
138 		return (MLX5E_MC_IPV6);
139 
140 	return (MLX5E_MC_OTHER);
141 }
142 
143 static	u32
144 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
145 {
146 	int eth_addr_type;
147 	u32 ret;
148 
149 	switch (type) {
150 	case MLX5E_FULLMATCH:
151 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
152 		switch (eth_addr_type) {
153 		case MLX5E_UC:
154 			ret =
155 			    (1 << MLX5E_TT_IPV4_TCP) |
156 			    (1 << MLX5E_TT_IPV6_TCP) |
157 			    (1 << MLX5E_TT_IPV4_UDP) |
158 			    (1 << MLX5E_TT_IPV6_UDP) |
159 			    (1 << MLX5E_TT_IPV4) |
160 			    (1 << MLX5E_TT_IPV6) |
161 			    (1 << MLX5E_TT_ANY) |
162 			    0;
163 			break;
164 
165 		case MLX5E_MC_IPV4:
166 			ret =
167 			    (1 << MLX5E_TT_IPV4_UDP) |
168 			    (1 << MLX5E_TT_IPV4) |
169 			    0;
170 			break;
171 
172 		case MLX5E_MC_IPV6:
173 			ret =
174 			    (1 << MLX5E_TT_IPV6_UDP) |
175 			    (1 << MLX5E_TT_IPV6) |
176 			    0;
177 			break;
178 
179 		default:
180 			ret =
181 			    (1 << MLX5E_TT_ANY) |
182 			    0;
183 			break;
184 		}
185 		break;
186 
187 	case MLX5E_ALLMULTI:
188 		ret =
189 		    (1 << MLX5E_TT_IPV4_UDP) |
190 		    (1 << MLX5E_TT_IPV6_UDP) |
191 		    (1 << MLX5E_TT_IPV4) |
192 		    (1 << MLX5E_TT_IPV6) |
193 		    (1 << MLX5E_TT_ANY) |
194 		    0;
195 		break;
196 
197 	default:			/* MLX5E_PROMISC */
198 		ret =
199 		    (1 << MLX5E_TT_IPV4_TCP) |
200 		    (1 << MLX5E_TT_IPV6_TCP) |
201 		    (1 << MLX5E_TT_IPV4_UDP) |
202 		    (1 << MLX5E_TT_IPV6_UDP) |
203 		    (1 << MLX5E_TT_IPV4) |
204 		    (1 << MLX5E_TT_IPV6) |
205 		    (1 << MLX5E_TT_ANY) |
206 		    0;
207 		break;
208 	}
209 
210 	return (ret);
211 }
212 
213 static int
214 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
215     struct mlx5e_eth_addr_info *ai, int type,
216     void *flow_context, void *match_criteria)
217 {
218 	u8 match_criteria_enable = 0;
219 	void *match_value;
220 	void *dest;
221 	u8 *dmac;
222 	u8 *match_criteria_dmac;
223 	void *ft = priv->ft.main;
224 	u32 *tirn = priv->tirn;
225 	u32 tt_vec;
226 	int err;
227 
228 	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
229 	dmac = MLX5_ADDR_OF(fte_match_param, match_value,
230 	    outer_headers.dmac_47_16);
231 	match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
232 	    outer_headers.dmac_47_16);
233 	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
234 
235 	MLX5_SET(flow_context, flow_context, action,
236 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
237 	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
238 	MLX5_SET(dest_format_struct, dest, destination_type,
239 	    MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
240 
241 	switch (type) {
242 	case MLX5E_FULLMATCH:
243 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
244 		memset(match_criteria_dmac, 0xff, ETH_ALEN);
245 		ether_addr_copy(dmac, ai->addr);
246 		break;
247 
248 	case MLX5E_ALLMULTI:
249 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
250 		match_criteria_dmac[0] = 0x01;
251 		dmac[0] = 0x01;
252 		break;
253 
254 	case MLX5E_PROMISC:
255 		break;
256 	default:
257 		break;
258 	}
259 
260 	tt_vec = mlx5e_get_tt_vec(ai, type);
261 
262 	if (tt_vec & (1 << MLX5E_TT_ANY)) {
263 		MLX5_SET(dest_format_struct, dest, destination_id,
264 		    tirn[MLX5E_TT_ANY]);
265 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
266 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_ANY]);
267 		if (err) {
268 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
269 			return (err);
270 		}
271 		ai->tt_vec |= (1 << MLX5E_TT_ANY);
272 	}
273 
274 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
275 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
276 	    outer_headers.ethertype);
277 
278 	if (tt_vec & (1 << MLX5E_TT_IPV4)) {
279 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
280 		    ETHERTYPE_IP);
281 		MLX5_SET(dest_format_struct, dest, destination_id,
282 		    tirn[MLX5E_TT_IPV4]);
283 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
284 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4]);
285 		if (err) {
286 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
287 			return (err);
288 		}
289 		ai->tt_vec |= (1 << MLX5E_TT_IPV4);
290 	}
291 
292 	if (tt_vec & (1 << MLX5E_TT_IPV6)) {
293 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
294 		    ETHERTYPE_IPV6);
295 		MLX5_SET(dest_format_struct, dest, destination_id,
296 		    tirn[MLX5E_TT_IPV6]);
297 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
298 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6]);
299 		if (err) {
300 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
301 			return (err);
302 		}
303 		ai->tt_vec |= (1 << MLX5E_TT_IPV6);
304 	}
305 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
306 	    outer_headers.ip_protocol);
307 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
308 	    IPPROTO_UDP);
309 
310 	if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
311 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
312 		    ETHERTYPE_IP);
313 		MLX5_SET(dest_format_struct, dest, destination_id,
314 		    tirn[MLX5E_TT_IPV4_UDP]);
315 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
316 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
317 		if (err) {
318 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
319 			return (err);
320 		}
321 		ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
322 	}
323 	if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
324 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
325 		    ETHERTYPE_IPV6);
326 		MLX5_SET(dest_format_struct, dest, destination_id,
327 		    tirn[MLX5E_TT_IPV6_UDP]);
328 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
329 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
330 		if (err) {
331 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
332 			return (err);
333 		}
334 		ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
335 	}
336 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
337 	    IPPROTO_TCP);
338 
339 	if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
340 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
341 		    ETHERTYPE_IP);
342 		MLX5_SET(dest_format_struct, dest, destination_id,
343 		    tirn[MLX5E_TT_IPV4_TCP]);
344 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
345 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
346 		if (err) {
347 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
348 			return (err);
349 		}
350 		ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
351 	}
352 	if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
353 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
354 		    ETHERTYPE_IPV6);
355 		MLX5_SET(dest_format_struct, dest, destination_id,
356 		    tirn[MLX5E_TT_IPV6_TCP]);
357 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
358 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
359 		if (err) {
360 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
361 			return (err);
362 		}
363 		ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
364 	}
365 	return (0);
366 }
367 
368 static int
369 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
370     struct mlx5e_eth_addr_info *ai, int type)
371 {
372 	u32 *flow_context;
373 	u32 *match_criteria;
374 	int err;
375 
376 	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
377 	    MLX5_ST_SZ_BYTES(dest_format_struct));
378 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
379 	if (!flow_context || !match_criteria) {
380 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
381 		err = -ENOMEM;
382 		goto add_eth_addr_rule_out;
383 	}
384 
385 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, flow_context,
386 	    match_criteria);
387 	if (err)
388 		if_printf(priv->ifp, "%s: failed\n", __func__);
389 
390 add_eth_addr_rule_out:
391 	kvfree(match_criteria);
392 	kvfree(flow_context);
393 	return (err);
394 }
395 
396 enum mlx5e_vlan_rule_type {
397 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
398 	MLX5E_VLAN_RULE_TYPE_ANY_VID,
399 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
400 };
401 
402 static int
403 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
404     enum mlx5e_vlan_rule_type rule_type, u16 vid)
405 {
406 	u8 match_criteria_enable = 0;
407 	u32 *flow_context;
408 	void *match_value;
409 	void *dest;
410 	u32 *match_criteria;
411 	u32 *ft_ix;
412 	int err;
413 
414 	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
415 	    MLX5_ST_SZ_BYTES(dest_format_struct));
416 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
417 	if (!flow_context || !match_criteria) {
418 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
419 		err = -ENOMEM;
420 		goto add_vlan_rule_out;
421 	}
422 	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
423 	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
424 
425 	MLX5_SET(flow_context, flow_context, action,
426 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
427 	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
428 	MLX5_SET(dest_format_struct, dest, destination_type,
429 		 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
430 	MLX5_SET(dest_format_struct, dest, destination_id,
431 		 mlx5_get_flow_table_id(priv->ft.main));
432 
433 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
434 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
435 	    outer_headers.vlan_tag);
436 
437 	switch (rule_type) {
438 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
439 		ft_ix = &priv->vlan.untagged_rule_ft_ix;
440 		break;
441 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
442 		ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
443 		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
444 		    1);
445 		break;
446 	default:			/* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
447 		ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
448 		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
449 		    1);
450 		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
451 		    outer_headers.first_vid);
452 		MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
453 		    vid);
454 		break;
455 	}
456 
457 	err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
458 	    match_criteria, flow_context, ft_ix);
459 	if (err)
460 		if_printf(priv->ifp, "%s: failed\n", __func__);
461 
462 add_vlan_rule_out:
463 	kvfree(match_criteria);
464 	kvfree(flow_context);
465 	return (err);
466 }
467 
468 static void
469 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
470     enum mlx5e_vlan_rule_type rule_type, u16 vid)
471 {
472 	switch (rule_type) {
473 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
474 		mlx5_del_flow_table_entry(priv->ft.vlan,
475 		    priv->vlan.untagged_rule_ft_ix);
476 		break;
477 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
478 		mlx5_del_flow_table_entry(priv->ft.vlan,
479 		    priv->vlan.any_vlan_rule_ft_ix);
480 		break;
481 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
482 		mlx5_del_flow_table_entry(priv->ft.vlan,
483 		    priv->vlan.active_vlans_ft_ix[vid]);
484 		break;
485 	}
486 }
487 
488 void
489 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
490 {
491 	if (priv->vlan.filter_disabled) {
492 		priv->vlan.filter_disabled = false;
493 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
494 			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
495 			    0);
496 	}
497 }
498 
499 void
500 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
501 {
502 	if (!priv->vlan.filter_disabled) {
503 		priv->vlan.filter_disabled = true;
504 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
505 			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
506 			    0);
507 	}
508 }
509 
510 void
511 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
512 {
513 	struct mlx5e_priv *priv = arg;
514 
515 	if (ifp != priv->ifp)
516 		return;
517 
518 	PRIV_LOCK(priv);
519 	set_bit(vid, priv->vlan.active_vlans);
520 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
521 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
522 	PRIV_UNLOCK(priv);
523 }
524 
525 void
526 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
527 {
528 	struct mlx5e_priv *priv = arg;
529 
530 	if (ifp != priv->ifp)
531 		return;
532 
533 	PRIV_LOCK(priv);
534 	clear_bit(vid, priv->vlan.active_vlans);
535 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
536 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
537 	PRIV_UNLOCK(priv);
538 }
539 
540 int
541 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
542 {
543 	u16 vid;
544 	int err;
545 
546 	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
547 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
548 		    vid);
549 		if (err)
550 			return (err);
551 	}
552 
553 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
554 	if (err)
555 		return (err);
556 
557 	if (priv->vlan.filter_disabled) {
558 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
559 		    0);
560 		if (err)
561 			return (err);
562 	}
563 	return (0);
564 }
565 
566 void
567 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
568 {
569 	u16 vid;
570 
571 	if (priv->vlan.filter_disabled)
572 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
573 
574 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
575 
576 	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
577 	    mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
578 }
579 
580 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
581 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
582 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
583 
584 static void
585 mlx5e_execute_action(struct mlx5e_priv *priv,
586     struct mlx5e_eth_addr_hash_node *hn)
587 {
588 	switch (hn->action) {
589 	case MLX5E_ACTION_ADD:
590 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
591 		hn->action = MLX5E_ACTION_NONE;
592 		break;
593 
594 	case MLX5E_ACTION_DEL:
595 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
596 		mlx5e_del_eth_addr_from_hash(hn);
597 		break;
598 
599 	default:
600 		break;
601 	}
602 }
603 
604 static void
605 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
606 {
607 	struct ifnet *ifp = priv->ifp;
608 	struct ifaddr *ifa;
609 	struct ifmultiaddr *ifma;
610 
611 	/* XXX adding this entry might not be needed */
612 	mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
613 	    LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
614 
615 	if_addr_rlock(ifp);
616 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
617 		if (ifa->ifa_addr->sa_family != AF_LINK)
618 			continue;
619 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
620 		    LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
621 	}
622 	if_addr_runlock(ifp);
623 
624 	if_maddr_rlock(ifp);
625 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
626 		if (ifma->ifma_addr->sa_family != AF_LINK)
627 			continue;
628 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
629 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
630 	}
631 	if_maddr_runlock(ifp);
632 }
633 
634 static void
635 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
636 {
637 	struct mlx5e_eth_addr_hash_node *hn;
638 	struct mlx5e_eth_addr_hash_node *tmp;
639 	int i;
640 
641 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
642 	    mlx5e_execute_action(priv, hn);
643 
644 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
645 	    mlx5e_execute_action(priv, hn);
646 }
647 
648 static void
649 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
650 {
651 	struct mlx5e_eth_addr_hash_node *hn;
652 	struct mlx5e_eth_addr_hash_node *tmp;
653 	int i;
654 
655 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
656 	    hn->action = MLX5E_ACTION_DEL;
657 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
658 	    hn->action = MLX5E_ACTION_DEL;
659 
660 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
661 		mlx5e_sync_ifp_addr(priv);
662 
663 	mlx5e_apply_ifp_addr(priv);
664 }
665 
666 void
667 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
668 {
669 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
670 	struct ifnet *ndev = priv->ifp;
671 
672 	bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
673 	bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
674 	bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
675 	bool broadcast_enabled = rx_mode_enable;
676 
677 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
678 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
679 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
680 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
681 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
682 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
683 
684 	/* update broadcast address */
685 	ether_addr_copy(priv->eth_addr.broadcast.addr,
686 	    priv->ifp->if_broadcastaddr);
687 
688 	if (enable_promisc)
689 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
690 	if (enable_allmulti)
691 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
692 	if (enable_broadcast)
693 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
694 
695 	mlx5e_handle_ifp_addr(priv);
696 
697 	if (disable_broadcast)
698 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
699 	if (disable_allmulti)
700 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
701 	if (disable_promisc)
702 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
703 
704 	ea->promisc_enabled = promisc_enabled;
705 	ea->allmulti_enabled = allmulti_enabled;
706 	ea->broadcast_enabled = broadcast_enabled;
707 }
708 
709 void
710 mlx5e_set_rx_mode_work(struct work_struct *work)
711 {
712 	struct mlx5e_priv *priv =
713 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
714 
715 	PRIV_LOCK(priv);
716 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
717 		mlx5e_set_rx_mode_core(priv);
718 	PRIV_UNLOCK(priv);
719 }
720 
721 static int
722 mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
723 {
724 	struct mlx5_flow_table_group *g;
725 	u8 *dmac;
726 
727 	g = malloc(9 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
728 	if (g == NULL)
729 		return (-ENOMEM);
730 
731 	g[0].log_sz = 2;
732 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
733 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
734 	    outer_headers.ethertype);
735 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
736 	    outer_headers.ip_protocol);
737 
738 	g[1].log_sz = 1;
739 	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
740 	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
741 	    outer_headers.ethertype);
742 
743 	g[2].log_sz = 0;
744 
745 	g[3].log_sz = 14;
746 	g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
747 	dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
748 	    outer_headers.dmac_47_16);
749 	memset(dmac, 0xff, ETH_ALEN);
750 	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
751 	    outer_headers.ethertype);
752 	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
753 	    outer_headers.ip_protocol);
754 
755 	g[4].log_sz = 13;
756 	g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
757 	dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
758 	    outer_headers.dmac_47_16);
759 	memset(dmac, 0xff, ETH_ALEN);
760 	MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
761 	    outer_headers.ethertype);
762 
763 	g[5].log_sz = 11;
764 	g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
765 	dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
766 	    outer_headers.dmac_47_16);
767 	memset(dmac, 0xff, ETH_ALEN);
768 
769 	g[6].log_sz = 2;
770 	g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
771 	dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
772 	    outer_headers.dmac_47_16);
773 	dmac[0] = 0x01;
774 	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
775 	    outer_headers.ethertype);
776 	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
777 	    outer_headers.ip_protocol);
778 
779 	g[7].log_sz = 1;
780 	g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
781 	dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
782 	    outer_headers.dmac_47_16);
783 	dmac[0] = 0x01;
784 	MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
785 	    outer_headers.ethertype);
786 
787 	g[8].log_sz = 0;
788 	g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
789 	dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
790 	    outer_headers.dmac_47_16);
791 	dmac[0] = 0x01;
792 	priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
793 	    MLX5_FLOW_TABLE_TYPE_NIC_RCV,
794 	    0, 9, g);
795 	free(g, M_MLX5EN);
796 
797 	return (priv->ft.main ? 0 : -ENOMEM);
798 }
799 
800 static void
801 mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
802 {
803 	mlx5_destroy_flow_table(priv->ft.main);
804 	priv->ft.main = NULL;
805 }
806 
807 static int
808 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
809 {
810 	struct mlx5_flow_table_group *g;
811 
812 	g = malloc(2 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
813 	if (g == NULL)
814 		return (-ENOMEM);
815 
816 	g[0].log_sz = 12;
817 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
818 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
819 	    outer_headers.vlan_tag);
820 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
821 	    outer_headers.first_vid);
822 
823 	/* untagged + any vlan id */
824 	g[1].log_sz = 1;
825 	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
826 	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
827 	    outer_headers.vlan_tag);
828 
829 	priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
830 	    MLX5_FLOW_TABLE_TYPE_NIC_RCV,
831 	    0, 2, g);
832 	free(g, M_MLX5EN);
833 
834 	return (priv->ft.vlan ? 0 : -ENOMEM);
835 }
836 
837 static void
838 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
839 {
840 	mlx5_destroy_flow_table(priv->ft.vlan);
841 	priv->ft.vlan = NULL;
842 }
843 
844 int
845 mlx5e_open_flow_table(struct mlx5e_priv *priv)
846 {
847 	int err;
848 
849 	err = mlx5e_create_main_flow_table(priv);
850 	if (err)
851 		return (err);
852 
853 	err = mlx5e_create_vlan_flow_table(priv);
854 	if (err)
855 		goto err_destroy_main_flow_table;
856 
857 	return (0);
858 
859 err_destroy_main_flow_table:
860 	mlx5e_destroy_main_flow_table(priv);
861 
862 	return (err);
863 }
864 
865 void
866 mlx5e_close_flow_table(struct mlx5e_priv *priv)
867 {
868 	mlx5e_destroy_vlan_flow_table(priv);
869 	mlx5e_destroy_main_flow_table(priv);
870 }
871