xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 49b49cda41feabe3439f7318e8bf40e3896c7bf4)
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 
30 #include <linux/list.h>
31 #include <dev/mlx5/flow_table.h>
32 
33 enum {
34 	MLX5E_FULLMATCH = 0,
35 	MLX5E_ALLMULTI = 1,
36 	MLX5E_PROMISC = 2,
37 };
38 
39 enum {
40 	MLX5E_UC = 0,
41 	MLX5E_MC_IPV4 = 1,
42 	MLX5E_MC_IPV6 = 2,
43 	MLX5E_MC_OTHER = 3,
44 };
45 
46 enum {
47 	MLX5E_ACTION_NONE = 0,
48 	MLX5E_ACTION_ADD = 1,
49 	MLX5E_ACTION_DEL = 2,
50 };
51 
52 struct mlx5e_eth_addr_hash_node {
53 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
54 	u8	action;
55 	struct mlx5e_eth_addr_info ai;
56 };
57 
58 static inline int
59 mlx5e_hash_eth_addr(const u8 * addr)
60 {
61 	return (addr[5]);
62 }
63 
64 static void
65 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
66     const u8 * addr)
67 {
68 	struct mlx5e_eth_addr_hash_node *hn;
69 	int ix = mlx5e_hash_eth_addr(addr);
70 
71 	LIST_FOREACH(hn, &hash[ix], hlist) {
72 		if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
73 			if (hn->action == MLX5E_ACTION_DEL)
74 				hn->action = MLX5E_ACTION_NONE;
75 			return;
76 		}
77 	}
78 
79 	hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
80 	if (hn == NULL)
81 		return;
82 
83 	ether_addr_copy(hn->ai.addr, addr);
84 	hn->action = MLX5E_ACTION_ADD;
85 
86 	LIST_INSERT_HEAD(&hash[ix], hn, hlist);
87 }
88 
89 static void
90 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
91 {
92 	LIST_REMOVE(hn, hlist);
93 	free(hn, M_MLX5EN);
94 }
95 
96 static void
97 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
98     struct mlx5e_eth_addr_info *ai)
99 {
100 	void *ft = priv->ft.main;
101 
102 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
103 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
104 
105 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
106 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
107 
108 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
109 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
110 
111 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
112 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
113 
114 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
115 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
116 
117 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
118 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
119 
120 	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
121 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
122 }
123 
124 static int
125 mlx5e_get_eth_addr_type(const u8 * addr)
126 {
127 	if (ETHER_IS_MULTICAST(addr) == 0)
128 		return (MLX5E_UC);
129 
130 	if ((addr[0] == 0x01) &&
131 	    (addr[1] == 0x00) &&
132 	    (addr[2] == 0x5e) &&
133 	    !(addr[3] & 0x80))
134 		return (MLX5E_MC_IPV4);
135 
136 	if ((addr[0] == 0x33) &&
137 	    (addr[1] == 0x33))
138 		return (MLX5E_MC_IPV6);
139 
140 	return (MLX5E_MC_OTHER);
141 }
142 
143 static	u32
144 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
145 {
146 	int eth_addr_type;
147 	u32 ret;
148 
149 	switch (type) {
150 	case MLX5E_FULLMATCH:
151 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
152 		switch (eth_addr_type) {
153 		case MLX5E_UC:
154 			ret =
155 			    (1 << MLX5E_TT_IPV4_TCP) |
156 			    (1 << MLX5E_TT_IPV6_TCP) |
157 			    (1 << MLX5E_TT_IPV4_UDP) |
158 			    (1 << MLX5E_TT_IPV6_UDP) |
159 			    (1 << MLX5E_TT_IPV4) |
160 			    (1 << MLX5E_TT_IPV6) |
161 			    (1 << MLX5E_TT_ANY) |
162 			    0;
163 			break;
164 
165 		case MLX5E_MC_IPV4:
166 			ret =
167 			    (1 << MLX5E_TT_IPV4_UDP) |
168 			    (1 << MLX5E_TT_IPV4) |
169 			    0;
170 			break;
171 
172 		case MLX5E_MC_IPV6:
173 			ret =
174 			    (1 << MLX5E_TT_IPV6_UDP) |
175 			    (1 << MLX5E_TT_IPV6) |
176 			    0;
177 			break;
178 
179 		default:
180 			ret =
181 			    (1 << MLX5E_TT_ANY) |
182 			    0;
183 			break;
184 		}
185 		break;
186 
187 	case MLX5E_ALLMULTI:
188 		ret =
189 		    (1 << MLX5E_TT_IPV4_UDP) |
190 		    (1 << MLX5E_TT_IPV6_UDP) |
191 		    (1 << MLX5E_TT_IPV4) |
192 		    (1 << MLX5E_TT_IPV6) |
193 		    (1 << MLX5E_TT_ANY) |
194 		    0;
195 		break;
196 
197 	default:			/* MLX5E_PROMISC */
198 		ret =
199 		    (1 << MLX5E_TT_IPV4_TCP) |
200 		    (1 << MLX5E_TT_IPV6_TCP) |
201 		    (1 << MLX5E_TT_IPV4_UDP) |
202 		    (1 << MLX5E_TT_IPV6_UDP) |
203 		    (1 << MLX5E_TT_IPV4) |
204 		    (1 << MLX5E_TT_IPV6) |
205 		    (1 << MLX5E_TT_ANY) |
206 		    0;
207 		break;
208 	}
209 
210 	return (ret);
211 }
212 
213 static int
214 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
215     struct mlx5e_eth_addr_info *ai, int type,
216     void *flow_context, void *match_criteria)
217 {
218 	u8 match_criteria_enable = 0;
219 	void *match_value;
220 	void *dest;
221 	u8 *dmac;
222 	u8 *match_criteria_dmac;
223 	void *ft = priv->ft.main;
224 	u32 *tirn = priv->tirn;
225 	u32 tt_vec;
226 	int err;
227 
228 	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
229 	dmac = MLX5_ADDR_OF(fte_match_param, match_value,
230 	    outer_headers.dmac_47_16);
231 	match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
232 	    outer_headers.dmac_47_16);
233 	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
234 
235 	MLX5_SET(flow_context, flow_context, action,
236 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
237 	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
238 	MLX5_SET(dest_format_struct, dest, destination_type,
239 	    MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
240 
241 	switch (type) {
242 	case MLX5E_FULLMATCH:
243 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
244 		memset(match_criteria_dmac, 0xff, ETH_ALEN);
245 		ether_addr_copy(dmac, ai->addr);
246 		break;
247 
248 	case MLX5E_ALLMULTI:
249 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
250 		match_criteria_dmac[0] = 0x01;
251 		dmac[0] = 0x01;
252 		break;
253 
254 	case MLX5E_PROMISC:
255 		break;
256 	default:
257 		break;
258 	}
259 
260 	tt_vec = mlx5e_get_tt_vec(ai, type);
261 
262 	if (tt_vec & (1 << MLX5E_TT_ANY)) {
263 		MLX5_SET(dest_format_struct, dest, destination_id,
264 		    tirn[MLX5E_TT_ANY]);
265 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
266 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_ANY]);
267 		if (err) {
268 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
269 			return (err);
270 		}
271 		ai->tt_vec |= (1 << MLX5E_TT_ANY);
272 	}
273 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
274 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
275 	    outer_headers.ethertype);
276 
277 	if (tt_vec & (1 << MLX5E_TT_IPV4)) {
278 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
279 		    ETHERTYPE_IP);
280 		MLX5_SET(dest_format_struct, dest, destination_id,
281 		    tirn[MLX5E_TT_IPV4]);
282 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
283 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4]);
284 		if (err) {
285 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
286 			return (err);
287 		}
288 		ai->tt_vec |= (1 << MLX5E_TT_IPV4);
289 	}
290 	if (tt_vec & (1 << MLX5E_TT_IPV6)) {
291 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
292 		    ETHERTYPE_IPV6);
293 		MLX5_SET(dest_format_struct, dest, destination_id,
294 		    tirn[MLX5E_TT_IPV6]);
295 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
296 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6]);
297 		if (err) {
298 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
299 			return (err);
300 		}
301 		ai->tt_vec |= (1 << MLX5E_TT_IPV6);
302 	}
303 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
304 	    outer_headers.ip_protocol);
305 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
306 	    IPPROTO_UDP);
307 
308 	if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
309 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
310 		    ETHERTYPE_IP);
311 		MLX5_SET(dest_format_struct, dest, destination_id,
312 		    tirn[MLX5E_TT_IPV4_UDP]);
313 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
314 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
315 		if (err) {
316 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
317 			return (err);
318 		}
319 		ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
320 	}
321 	if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
322 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
323 		    ETHERTYPE_IPV6);
324 		MLX5_SET(dest_format_struct, dest, destination_id,
325 		    tirn[MLX5E_TT_IPV6_UDP]);
326 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
327 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
328 		if (err) {
329 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
330 			return (err);
331 		}
332 		ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
333 	}
334 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
335 	    IPPROTO_TCP);
336 
337 	if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
338 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
339 		    ETHERTYPE_IP);
340 		MLX5_SET(dest_format_struct, dest, destination_id,
341 		    tirn[MLX5E_TT_IPV4_TCP]);
342 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
343 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
344 		if (err) {
345 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
346 			return (err);
347 		}
348 		ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
349 	}
350 	if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
351 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
352 		    ETHERTYPE_IPV6);
353 		MLX5_SET(dest_format_struct, dest, destination_id,
354 		    tirn[MLX5E_TT_IPV6_TCP]);
355 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
356 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
357 		if (err) {
358 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
359 			return (err);
360 		}
361 		ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
362 	}
363 	return (0);
364 }
365 
366 static int
367 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
368     struct mlx5e_eth_addr_info *ai, int type)
369 {
370 	u32 *flow_context;
371 	u32 *match_criteria;
372 	int err;
373 
374 	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
375 	    MLX5_ST_SZ_BYTES(dest_format_struct));
376 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
377 	if (!flow_context || !match_criteria) {
378 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
379 		err = -ENOMEM;
380 		goto add_eth_addr_rule_out;
381 	}
382 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, flow_context,
383 	    match_criteria);
384 	if (err)
385 		if_printf(priv->ifp, "%s: failed\n", __func__);
386 
387 add_eth_addr_rule_out:
388 	kvfree(match_criteria);
389 	kvfree(flow_context);
390 	return (err);
391 }
392 
393 enum mlx5e_vlan_rule_type {
394 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
395 	MLX5E_VLAN_RULE_TYPE_ANY_VID,
396 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
397 };
398 
399 static int
400 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
401     enum mlx5e_vlan_rule_type rule_type, u16 vid)
402 {
403 	u8 match_criteria_enable = 0;
404 	u32 *flow_context;
405 	void *match_value;
406 	void *dest;
407 	u32 *match_criteria;
408 	u32 *ft_ix;
409 	int err;
410 
411 	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
412 	    MLX5_ST_SZ_BYTES(dest_format_struct));
413 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
414 	if (!flow_context || !match_criteria) {
415 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
416 		err = -ENOMEM;
417 		goto add_vlan_rule_out;
418 	}
419 	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
420 	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
421 
422 	MLX5_SET(flow_context, flow_context, action,
423 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
424 	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
425 	MLX5_SET(dest_format_struct, dest, destination_type,
426 	    MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
427 	MLX5_SET(dest_format_struct, dest, destination_id,
428 	    mlx5_get_flow_table_id(priv->ft.main));
429 
430 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
431 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
432 	    outer_headers.vlan_tag);
433 
434 	switch (rule_type) {
435 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
436 		ft_ix = &priv->vlan.untagged_rule_ft_ix;
437 		break;
438 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
439 		ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
440 		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
441 		    1);
442 		break;
443 	default:			/* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
444 		ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
445 		MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
446 		    1);
447 		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
448 		    outer_headers.first_vid);
449 		MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
450 		    vid);
451 		break;
452 	}
453 
454 	err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
455 	    match_criteria, flow_context, ft_ix);
456 	if (err)
457 		if_printf(priv->ifp, "%s: failed\n", __func__);
458 
459 add_vlan_rule_out:
460 	kvfree(match_criteria);
461 	kvfree(flow_context);
462 	return (err);
463 }
464 
465 static void
466 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
467     enum mlx5e_vlan_rule_type rule_type, u16 vid)
468 {
469 	switch (rule_type) {
470 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
471 		mlx5_del_flow_table_entry(priv->ft.vlan,
472 		    priv->vlan.untagged_rule_ft_ix);
473 		break;
474 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
475 		mlx5_del_flow_table_entry(priv->ft.vlan,
476 		    priv->vlan.any_vlan_rule_ft_ix);
477 		break;
478 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
479 		mlx5_del_flow_table_entry(priv->ft.vlan,
480 		    priv->vlan.active_vlans_ft_ix[vid]);
481 		break;
482 	}
483 }
484 
485 void
486 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
487 {
488 	if (priv->vlan.filter_disabled) {
489 		priv->vlan.filter_disabled = false;
490 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
491 			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
492 			    0);
493 	}
494 }
495 
496 void
497 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
498 {
499 	if (!priv->vlan.filter_disabled) {
500 		priv->vlan.filter_disabled = true;
501 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
502 			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
503 			    0);
504 	}
505 }
506 
507 void
508 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
509 {
510 	struct mlx5e_priv *priv = arg;
511 
512 	if (ifp != priv->ifp)
513 		return;
514 
515 	PRIV_LOCK(priv);
516 	set_bit(vid, priv->vlan.active_vlans);
517 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
518 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
519 	PRIV_UNLOCK(priv);
520 }
521 
522 void
523 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
524 {
525 	struct mlx5e_priv *priv = arg;
526 
527 	if (ifp != priv->ifp)
528 		return;
529 
530 	PRIV_LOCK(priv);
531 	clear_bit(vid, priv->vlan.active_vlans);
532 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
533 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
534 	PRIV_UNLOCK(priv);
535 }
536 
537 int
538 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
539 {
540 	u16 vid;
541 	int err;
542 
543 	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
544 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
545 		    vid);
546 		if (err)
547 			return (err);
548 	}
549 
550 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
551 	if (err)
552 		return (err);
553 
554 	if (priv->vlan.filter_disabled) {
555 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
556 		    0);
557 		if (err)
558 			return (err);
559 	}
560 	return (0);
561 }
562 
563 void
564 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
565 {
566 	u16 vid;
567 
568 	if (priv->vlan.filter_disabled)
569 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
570 
571 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
572 
573 	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
574 	    mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
575 }
576 
577 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
578 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
579 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
580 
581 static void
582 mlx5e_execute_action(struct mlx5e_priv *priv,
583     struct mlx5e_eth_addr_hash_node *hn)
584 {
585 	switch (hn->action) {
586 	case MLX5E_ACTION_ADD:
587 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
588 		hn->action = MLX5E_ACTION_NONE;
589 		break;
590 
591 	case MLX5E_ACTION_DEL:
592 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
593 		mlx5e_del_eth_addr_from_hash(hn);
594 		break;
595 
596 	default:
597 		break;
598 	}
599 }
600 
601 static void
602 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
603 {
604 	struct ifnet *ifp = priv->ifp;
605 	struct ifaddr *ifa;
606 	struct ifmultiaddr *ifma;
607 
608 	/* XXX adding this entry might not be needed */
609 	mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
610 	    LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
611 
612 	if_addr_rlock(ifp);
613 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
614 		if (ifa->ifa_addr->sa_family != AF_LINK)
615 			continue;
616 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
617 		    LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
618 	}
619 	if_addr_runlock(ifp);
620 
621 	if_maddr_rlock(ifp);
622 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
623 		if (ifma->ifma_addr->sa_family != AF_LINK)
624 			continue;
625 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
626 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
627 	}
628 	if_maddr_runlock(ifp);
629 }
630 
631 static void
632 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
633 {
634 	struct mlx5e_eth_addr_hash_node *hn;
635 	struct mlx5e_eth_addr_hash_node *tmp;
636 	int i;
637 
638 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
639 	    mlx5e_execute_action(priv, hn);
640 
641 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
642 	    mlx5e_execute_action(priv, hn);
643 }
644 
645 static void
646 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
647 {
648 	struct mlx5e_eth_addr_hash_node *hn;
649 	struct mlx5e_eth_addr_hash_node *tmp;
650 	int i;
651 
652 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
653 	    hn->action = MLX5E_ACTION_DEL;
654 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
655 	    hn->action = MLX5E_ACTION_DEL;
656 
657 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
658 		mlx5e_sync_ifp_addr(priv);
659 
660 	mlx5e_apply_ifp_addr(priv);
661 }
662 
663 void
664 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
665 {
666 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
667 	struct ifnet *ndev = priv->ifp;
668 
669 	bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
670 	bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
671 	bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
672 	bool broadcast_enabled = rx_mode_enable;
673 
674 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
675 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
676 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
677 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
678 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
679 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
680 
681 	/* update broadcast address */
682 	ether_addr_copy(priv->eth_addr.broadcast.addr,
683 	    priv->ifp->if_broadcastaddr);
684 
685 	if (enable_promisc)
686 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
687 	if (enable_allmulti)
688 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
689 	if (enable_broadcast)
690 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
691 
692 	mlx5e_handle_ifp_addr(priv);
693 
694 	if (disable_broadcast)
695 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
696 	if (disable_allmulti)
697 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
698 	if (disable_promisc)
699 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
700 
701 	ea->promisc_enabled = promisc_enabled;
702 	ea->allmulti_enabled = allmulti_enabled;
703 	ea->broadcast_enabled = broadcast_enabled;
704 }
705 
706 void
707 mlx5e_set_rx_mode_work(struct work_struct *work)
708 {
709 	struct mlx5e_priv *priv =
710 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
711 
712 	PRIV_LOCK(priv);
713 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
714 		mlx5e_set_rx_mode_core(priv);
715 	PRIV_UNLOCK(priv);
716 }
717 
718 static int
719 mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
720 {
721 	struct mlx5_flow_table_group *g;
722 	u8 *dmac;
723 
724 	g = malloc(9 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
725 	if (g == NULL)
726 		return (-ENOMEM);
727 
728 	g[0].log_sz = 2;
729 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
730 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
731 	    outer_headers.ethertype);
732 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
733 	    outer_headers.ip_protocol);
734 
735 	g[1].log_sz = 1;
736 	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
737 	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
738 	    outer_headers.ethertype);
739 
740 	g[2].log_sz = 0;
741 
742 	g[3].log_sz = 14;
743 	g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
744 	dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
745 	    outer_headers.dmac_47_16);
746 	memset(dmac, 0xff, ETH_ALEN);
747 	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
748 	    outer_headers.ethertype);
749 	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
750 	    outer_headers.ip_protocol);
751 
752 	g[4].log_sz = 13;
753 	g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
754 	dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
755 	    outer_headers.dmac_47_16);
756 	memset(dmac, 0xff, ETH_ALEN);
757 	MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
758 	    outer_headers.ethertype);
759 
760 	g[5].log_sz = 11;
761 	g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
762 	dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
763 	    outer_headers.dmac_47_16);
764 	memset(dmac, 0xff, ETH_ALEN);
765 
766 	g[6].log_sz = 2;
767 	g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
768 	dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
769 	    outer_headers.dmac_47_16);
770 	dmac[0] = 0x01;
771 	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
772 	    outer_headers.ethertype);
773 	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
774 	    outer_headers.ip_protocol);
775 
776 	g[7].log_sz = 1;
777 	g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
778 	dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
779 	    outer_headers.dmac_47_16);
780 	dmac[0] = 0x01;
781 	MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
782 	    outer_headers.ethertype);
783 
784 	g[8].log_sz = 0;
785 	g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
786 	dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
787 	    outer_headers.dmac_47_16);
788 	dmac[0] = 0x01;
789 	priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
790 	    MLX5_FLOW_TABLE_TYPE_NIC_RCV,
791 	    0, 9, g);
792 	free(g, M_MLX5EN);
793 
794 	return (priv->ft.main ? 0 : -ENOMEM);
795 }
796 
797 static void
798 mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
799 {
800 	mlx5_destroy_flow_table(priv->ft.main);
801 	priv->ft.main = NULL;
802 }
803 
804 static int
805 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
806 {
807 	struct mlx5_flow_table_group *g;
808 
809 	g = malloc(2 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
810 	if (g == NULL)
811 		return (-ENOMEM);
812 
813 	g[0].log_sz = 12;
814 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
815 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
816 	    outer_headers.vlan_tag);
817 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
818 	    outer_headers.first_vid);
819 
820 	/* untagged + any vlan id */
821 	g[1].log_sz = 1;
822 	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
823 	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
824 	    outer_headers.vlan_tag);
825 
826 	priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
827 	    MLX5_FLOW_TABLE_TYPE_NIC_RCV,
828 	    0, 2, g);
829 	free(g, M_MLX5EN);
830 
831 	return (priv->ft.vlan ? 0 : -ENOMEM);
832 }
833 
834 static void
835 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
836 {
837 	mlx5_destroy_flow_table(priv->ft.vlan);
838 	priv->ft.vlan = NULL;
839 }
840 
841 int
842 mlx5e_open_flow_table(struct mlx5e_priv *priv)
843 {
844 	int err;
845 
846 	err = mlx5e_create_main_flow_table(priv);
847 	if (err)
848 		return (err);
849 
850 	err = mlx5e_create_vlan_flow_table(priv);
851 	if (err)
852 		goto err_destroy_main_flow_table;
853 
854 	return (0);
855 
856 err_destroy_main_flow_table:
857 	mlx5e_destroy_main_flow_table(priv);
858 
859 	return (err);
860 }
861 
862 void
863 mlx5e_close_flow_table(struct mlx5e_priv *priv)
864 {
865 	mlx5e_destroy_vlan_flow_table(priv);
866 	mlx5e_destroy_main_flow_table(priv);
867 }
868