xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (revision 273c26a3c3bea87a241d6879abd4f991db180bf0)
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 
30 #include <linux/list.h>
31 #include <dev/mlx5/flow_table.h>
32 
33 enum {
34 	MLX5E_FULLMATCH = 0,
35 	MLX5E_ALLMULTI = 1,
36 	MLX5E_PROMISC = 2,
37 };
38 
39 enum {
40 	MLX5E_UC = 0,
41 	MLX5E_MC_IPV4 = 1,
42 	MLX5E_MC_IPV6 = 2,
43 	MLX5E_MC_OTHER = 3,
44 };
45 
46 enum {
47 	MLX5E_ACTION_NONE = 0,
48 	MLX5E_ACTION_ADD = 1,
49 	MLX5E_ACTION_DEL = 2,
50 };
51 
52 struct mlx5e_eth_addr_hash_node {
53 	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
54 	u8	action;
55 	struct mlx5e_eth_addr_info ai;
56 };
57 
58 static inline int
59 mlx5e_hash_eth_addr(const u8 * addr)
60 {
61 	return (addr[5]);
62 }
63 
64 static void
65 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
66     const u8 * addr)
67 {
68 	struct mlx5e_eth_addr_hash_node *hn;
69 	int ix = mlx5e_hash_eth_addr(addr);
70 
71 	LIST_FOREACH(hn, &hash[ix], hlist) {
72 		if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
73 			if (hn->action == MLX5E_ACTION_DEL)
74 				hn->action = MLX5E_ACTION_NONE;
75 			return;
76 		}
77 	}
78 
79 	hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
80 	if (hn == NULL)
81 		return;
82 
83 	ether_addr_copy(hn->ai.addr, addr);
84 	hn->action = MLX5E_ACTION_ADD;
85 
86 	LIST_INSERT_HEAD(&hash[ix], hn, hlist);
87 }
88 
89 static void
90 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
91 {
92 	LIST_REMOVE(hn, hlist);
93 	free(hn, M_MLX5EN);
94 }
95 
96 static void
97 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
98     struct mlx5e_eth_addr_info *ai)
99 {
100 	void *ft = priv->ft.main;
101 
102 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
103 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
104 
105 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
106 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
107 
108 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
109 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
110 
111 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
112 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
113 
114 	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
115 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
116 
117 	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
118 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
119 
120 	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
121 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
122 }
123 
124 static int
125 mlx5e_get_eth_addr_type(const u8 * addr)
126 {
127 	if (ETHER_IS_MULTICAST(addr) == 0)
128 		return (MLX5E_UC);
129 
130 	if ((addr[0] == 0x01) &&
131 	    (addr[1] == 0x00) &&
132 	    (addr[2] == 0x5e) &&
133 	    !(addr[3] & 0x80))
134 		return (MLX5E_MC_IPV4);
135 
136 	if ((addr[0] == 0x33) &&
137 	    (addr[1] == 0x33))
138 		return (MLX5E_MC_IPV6);
139 
140 	return (MLX5E_MC_OTHER);
141 }
142 
143 static	u32
144 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
145 {
146 	int eth_addr_type;
147 	u32 ret;
148 
149 	switch (type) {
150 	case MLX5E_FULLMATCH:
151 		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
152 		switch (eth_addr_type) {
153 		case MLX5E_UC:
154 			ret =
155 			    (1 << MLX5E_TT_IPV4_TCP) |
156 			    (1 << MLX5E_TT_IPV6_TCP) |
157 			    (1 << MLX5E_TT_IPV4_UDP) |
158 			    (1 << MLX5E_TT_IPV6_UDP) |
159 			    (1 << MLX5E_TT_IPV4) |
160 			    (1 << MLX5E_TT_IPV6) |
161 			    (1 << MLX5E_TT_ANY) |
162 			    0;
163 			break;
164 
165 		case MLX5E_MC_IPV4:
166 			ret =
167 			    (1 << MLX5E_TT_IPV4_UDP) |
168 			    (1 << MLX5E_TT_IPV4) |
169 			    0;
170 			break;
171 
172 		case MLX5E_MC_IPV6:
173 			ret =
174 			    (1 << MLX5E_TT_IPV6_UDP) |
175 			    (1 << MLX5E_TT_IPV6) |
176 			    0;
177 			break;
178 
179 		default:
180 			ret =
181 			    (1 << MLX5E_TT_ANY) |
182 			    0;
183 			break;
184 		}
185 		break;
186 
187 	case MLX5E_ALLMULTI:
188 		ret =
189 		    (1 << MLX5E_TT_IPV4_UDP) |
190 		    (1 << MLX5E_TT_IPV6_UDP) |
191 		    (1 << MLX5E_TT_IPV4) |
192 		    (1 << MLX5E_TT_IPV6) |
193 		    (1 << MLX5E_TT_ANY) |
194 		    0;
195 		break;
196 
197 	default:			/* MLX5E_PROMISC */
198 		ret =
199 		    (1 << MLX5E_TT_IPV4_TCP) |
200 		    (1 << MLX5E_TT_IPV6_TCP) |
201 		    (1 << MLX5E_TT_IPV4_UDP) |
202 		    (1 << MLX5E_TT_IPV6_UDP) |
203 		    (1 << MLX5E_TT_IPV4) |
204 		    (1 << MLX5E_TT_IPV6) |
205 		    (1 << MLX5E_TT_ANY) |
206 		    0;
207 		break;
208 	}
209 
210 	return (ret);
211 }
212 
213 static int
214 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
215     struct mlx5e_eth_addr_info *ai, int type,
216     void *flow_context, void *match_criteria)
217 {
218 	u8 match_criteria_enable = 0;
219 	void *match_value;
220 	void *dest;
221 	u8 *dmac;
222 	u8 *match_criteria_dmac;
223 	void *ft = priv->ft.main;
224 	u32 *tirn = priv->tirn;
225 	u32 tt_vec;
226 	int err;
227 
228 	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
229 	dmac = MLX5_ADDR_OF(fte_match_param, match_value,
230 	    outer_headers.dmac_47_16);
231 	match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
232 	    outer_headers.dmac_47_16);
233 	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
234 
235 	MLX5_SET(flow_context, flow_context, action,
236 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
237 	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
238 	MLX5_SET(dest_format_struct, dest, destination_type,
239 	    MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
240 
241 	switch (type) {
242 	case MLX5E_FULLMATCH:
243 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
244 		memset(match_criteria_dmac, 0xff, ETH_ALEN);
245 		ether_addr_copy(dmac, ai->addr);
246 		break;
247 
248 	case MLX5E_ALLMULTI:
249 		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
250 		match_criteria_dmac[0] = 0x01;
251 		dmac[0] = 0x01;
252 		break;
253 
254 	case MLX5E_PROMISC:
255 		break;
256 	default:
257 		break;
258 	}
259 
260 	tt_vec = mlx5e_get_tt_vec(ai, type);
261 
262 	if (tt_vec & (1 << MLX5E_TT_ANY)) {
263 		MLX5_SET(dest_format_struct, dest, destination_id,
264 		    tirn[MLX5E_TT_ANY]);
265 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
266 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_ANY]);
267 		if (err) {
268 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
269 			return (err);
270 		}
271 		ai->tt_vec |= (1 << MLX5E_TT_ANY);
272 	}
273 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
274 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
275 	    outer_headers.ethertype);
276 
277 	if (tt_vec & (1 << MLX5E_TT_IPV4)) {
278 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
279 		    ETHERTYPE_IP);
280 		MLX5_SET(dest_format_struct, dest, destination_id,
281 		    tirn[MLX5E_TT_IPV4]);
282 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
283 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4]);
284 		if (err) {
285 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
286 			return (err);
287 		}
288 		ai->tt_vec |= (1 << MLX5E_TT_IPV4);
289 	}
290 	if (tt_vec & (1 << MLX5E_TT_IPV6)) {
291 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
292 		    ETHERTYPE_IPV6);
293 		MLX5_SET(dest_format_struct, dest, destination_id,
294 		    tirn[MLX5E_TT_IPV6]);
295 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
296 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6]);
297 		if (err) {
298 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
299 			return (err);
300 		}
301 		ai->tt_vec |= (1 << MLX5E_TT_IPV6);
302 	}
303 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
304 	    outer_headers.ip_protocol);
305 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
306 	    IPPROTO_UDP);
307 
308 	if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
309 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
310 		    ETHERTYPE_IP);
311 		MLX5_SET(dest_format_struct, dest, destination_id,
312 		    tirn[MLX5E_TT_IPV4_UDP]);
313 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
314 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
315 		if (err) {
316 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
317 			return (err);
318 		}
319 		ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
320 	}
321 	if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
322 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
323 		    ETHERTYPE_IPV6);
324 		MLX5_SET(dest_format_struct, dest, destination_id,
325 		    tirn[MLX5E_TT_IPV6_UDP]);
326 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
327 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
328 		if (err) {
329 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
330 			return (err);
331 		}
332 		ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
333 	}
334 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
335 	    IPPROTO_TCP);
336 
337 	if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
338 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
339 		    ETHERTYPE_IP);
340 		MLX5_SET(dest_format_struct, dest, destination_id,
341 		    tirn[MLX5E_TT_IPV4_TCP]);
342 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
343 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
344 		if (err) {
345 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
346 			return (err);
347 		}
348 		ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
349 	}
350 	if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
351 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
352 		    ETHERTYPE_IPV6);
353 		MLX5_SET(dest_format_struct, dest, destination_id,
354 		    tirn[MLX5E_TT_IPV6_TCP]);
355 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
356 		    match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
357 		if (err) {
358 			mlx5e_del_eth_addr_from_flow_table(priv, ai);
359 			return (err);
360 		}
361 		ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
362 	}
363 	return (0);
364 }
365 
366 static int
367 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
368     struct mlx5e_eth_addr_info *ai, int type)
369 {
370 	u32 *flow_context;
371 	u32 *match_criteria;
372 	int err;
373 
374 	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
375 	    MLX5_ST_SZ_BYTES(dest_format_struct));
376 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
377 	if (!flow_context || !match_criteria) {
378 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
379 		err = -ENOMEM;
380 		goto add_eth_addr_rule_out;
381 	}
382 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, flow_context,
383 	    match_criteria);
384 	if (err)
385 		if_printf(priv->ifp, "%s: failed\n", __func__);
386 
387 add_eth_addr_rule_out:
388 	kvfree(match_criteria);
389 	kvfree(flow_context);
390 	return (err);
391 }
392 
393 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
394 {
395 	struct ifnet *ifp = priv->ifp;
396 	int max_list_size;
397 	int list_size;
398 	u16 *vlans;
399 	int vlan;
400 	int err;
401 	int i;
402 
403 	list_size = 0;
404 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
405 		list_size++;
406 
407 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
408 
409 	if (list_size > max_list_size) {
410 		if_printf(ifp,
411 			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
412 			    list_size, max_list_size);
413 		list_size = max_list_size;
414 	}
415 
416 	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
417 	if (!vlans)
418 		return -ENOMEM;
419 
420 	i = 0;
421 	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
422 		if (i >= list_size)
423 			break;
424 		vlans[i++] = vlan;
425 	}
426 
427 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
428 	if (err)
429 		if_printf(ifp, "Failed to modify vport vlans list err(%d)\n",
430 			   err);
431 
432 	kfree(vlans);
433 	return err;
434 }
435 
436 enum mlx5e_vlan_rule_type {
437 	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
438 	MLX5E_VLAN_RULE_TYPE_ANY_VID,
439 	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
440 };
441 
442 static int
443 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
444     enum mlx5e_vlan_rule_type rule_type, u16 vid)
445 {
446 	u8 match_criteria_enable = 0;
447 	u32 *flow_context;
448 	void *match_value;
449 	void *dest;
450 	u32 *match_criteria;
451 	u32 *ft_ix;
452 	int err;
453 
454 	flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
455 	    MLX5_ST_SZ_BYTES(dest_format_struct));
456 	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
457 	if (!flow_context || !match_criteria) {
458 		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
459 		err = -ENOMEM;
460 		goto add_vlan_rule_out;
461 	}
462 	match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
463 	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
464 
465 	MLX5_SET(flow_context, flow_context, action,
466 	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
467 	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
468 	MLX5_SET(dest_format_struct, dest, destination_type,
469 	    MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
470 	MLX5_SET(dest_format_struct, dest, destination_id,
471 	    mlx5_get_flow_table_id(priv->ft.main));
472 
473 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
474 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
475 	    outer_headers.cvlan_tag);
476 
477 	switch (rule_type) {
478 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
479 		ft_ix = &priv->vlan.untagged_rule_ft_ix;
480 		break;
481 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
482 		ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
483 		MLX5_SET(fte_match_param, match_value, outer_headers.cvlan_tag,
484 		    1);
485 		break;
486 	default:			/* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
487 		ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
488 		MLX5_SET(fte_match_param, match_value, outer_headers.cvlan_tag,
489 		    1);
490 		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
491 		    outer_headers.first_vid);
492 		MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
493 		    vid);
494 		mlx5e_vport_context_update_vlans(priv);
495 		break;
496 	}
497 
498 	err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
499 	    match_criteria, flow_context, ft_ix);
500 	if (err)
501 		if_printf(priv->ifp, "%s: failed\n", __func__);
502 
503 add_vlan_rule_out:
504 	kvfree(match_criteria);
505 	kvfree(flow_context);
506 	return (err);
507 }
508 
509 static void
510 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
511     enum mlx5e_vlan_rule_type rule_type, u16 vid)
512 {
513 	switch (rule_type) {
514 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
515 		mlx5_del_flow_table_entry(priv->ft.vlan,
516 		    priv->vlan.untagged_rule_ft_ix);
517 		break;
518 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
519 		mlx5_del_flow_table_entry(priv->ft.vlan,
520 		    priv->vlan.any_vlan_rule_ft_ix);
521 		break;
522 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
523 		mlx5_del_flow_table_entry(priv->ft.vlan,
524 		    priv->vlan.active_vlans_ft_ix[vid]);
525 		mlx5e_vport_context_update_vlans(priv);
526 		break;
527 	}
528 }
529 
530 void
531 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
532 {
533 	if (priv->vlan.filter_disabled) {
534 		priv->vlan.filter_disabled = false;
535 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
536 			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
537 			    0);
538 	}
539 }
540 
541 void
542 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
543 {
544 	if (!priv->vlan.filter_disabled) {
545 		priv->vlan.filter_disabled = true;
546 		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
547 			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
548 			    0);
549 	}
550 }
551 
552 void
553 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
554 {
555 	struct mlx5e_priv *priv = arg;
556 
557 	if (ifp != priv->ifp)
558 		return;
559 
560 	PRIV_LOCK(priv);
561 	set_bit(vid, priv->vlan.active_vlans);
562 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
563 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
564 	PRIV_UNLOCK(priv);
565 }
566 
567 void
568 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
569 {
570 	struct mlx5e_priv *priv = arg;
571 
572 	if (ifp != priv->ifp)
573 		return;
574 
575 	PRIV_LOCK(priv);
576 	clear_bit(vid, priv->vlan.active_vlans);
577 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
578 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
579 	PRIV_UNLOCK(priv);
580 }
581 
582 int
583 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
584 {
585 	u16 vid;
586 	int err;
587 
588 	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
589 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
590 		    vid);
591 		if (err)
592 			return (err);
593 	}
594 
595 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
596 	if (err)
597 		return (err);
598 
599 	if (priv->vlan.filter_disabled) {
600 		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
601 		    0);
602 		if (err)
603 			return (err);
604 	}
605 	return (0);
606 }
607 
608 void
609 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
610 {
611 	u16 vid;
612 
613 	if (priv->vlan.filter_disabled)
614 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
615 
616 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
617 
618 	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
619 	    mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
620 }
621 
622 #define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
623 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
624 		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
625 
626 static void
627 mlx5e_execute_action(struct mlx5e_priv *priv,
628     struct mlx5e_eth_addr_hash_node *hn)
629 {
630 	switch (hn->action) {
631 	case MLX5E_ACTION_ADD:
632 		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
633 		hn->action = MLX5E_ACTION_NONE;
634 		break;
635 
636 	case MLX5E_ACTION_DEL:
637 		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
638 		mlx5e_del_eth_addr_from_hash(hn);
639 		break;
640 
641 	default:
642 		break;
643 	}
644 }
645 
646 static void
647 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
648 {
649 	struct ifnet *ifp = priv->ifp;
650 	struct ifaddr *ifa;
651 	struct ifmultiaddr *ifma;
652 
653 	/* XXX adding this entry might not be needed */
654 	mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
655 	    LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
656 
657 	if_addr_rlock(ifp);
658 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
659 		if (ifa->ifa_addr->sa_family != AF_LINK)
660 			continue;
661 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
662 		    LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
663 	}
664 	if_addr_runlock(ifp);
665 
666 	if_maddr_rlock(ifp);
667 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
668 		if (ifma->ifma_addr->sa_family != AF_LINK)
669 			continue;
670 		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
671 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
672 	}
673 	if_maddr_runlock(ifp);
674 }
675 
676 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
677 				  u8 addr_array[][ETH_ALEN], int size)
678 {
679 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
680 	struct ifnet *ifp = priv->ifp;
681 	struct mlx5e_eth_addr_hash_node *hn;
682 	struct mlx5e_eth_addr_hash_head *addr_list;
683 	struct mlx5e_eth_addr_hash_node *tmp;
684 	int i = 0;
685 	int hi;
686 
687 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
688 
689 	if (is_uc) /* Make sure our own address is pushed first */
690 		ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
691 	else if (priv->eth_addr.broadcast_enabled)
692 		ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
693 
694 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
695 		if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
696 			continue;
697 		if (i >= size)
698 			break;
699 		ether_addr_copy(addr_array[i++], hn->ai.addr);
700 	}
701 }
702 
703 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
704 						 int list_type)
705 {
706 	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
707 	struct mlx5e_eth_addr_hash_node *hn;
708 	u8 (*addr_array)[ETH_ALEN] = NULL;
709 	struct mlx5e_eth_addr_hash_head *addr_list;
710 	struct mlx5e_eth_addr_hash_node *tmp;
711 	int max_size;
712 	int size;
713 	int err;
714 	int hi;
715 
716 	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
717 	max_size = is_uc ?
718 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
719 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
720 
721 	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
722 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
723 		size++;
724 
725 	if (size > max_size) {
726 		if_printf(priv->ifp,
727 			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
728 			    is_uc ? "UC" : "MC", size, max_size);
729 		size = max_size;
730 	}
731 
732 	if (size) {
733 		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
734 		if (!addr_array) {
735 			err = -ENOMEM;
736 			goto out;
737 		}
738 		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
739 	}
740 
741 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
742 out:
743 	if (err)
744 		if_printf(priv->ifp,
745 			   "Failed to modify vport %s list err(%d)\n",
746 			   is_uc ? "UC" : "MC", err);
747 	kfree(addr_array);
748 }
749 
750 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
751 {
752 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
753 
754 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
755 	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
756 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
757 				      ea->allmulti_enabled,
758 				      ea->promisc_enabled);
759 }
760 
761 static void
762 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
763 {
764 	struct mlx5e_eth_addr_hash_node *hn;
765 	struct mlx5e_eth_addr_hash_node *tmp;
766 	int i;
767 
768 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
769 	    mlx5e_execute_action(priv, hn);
770 
771 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
772 	    mlx5e_execute_action(priv, hn);
773 }
774 
775 static void
776 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
777 {
778 	struct mlx5e_eth_addr_hash_node *hn;
779 	struct mlx5e_eth_addr_hash_node *tmp;
780 	int i;
781 
782 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
783 	    hn->action = MLX5E_ACTION_DEL;
784 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
785 	    hn->action = MLX5E_ACTION_DEL;
786 
787 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
788 		mlx5e_sync_ifp_addr(priv);
789 
790 	mlx5e_apply_ifp_addr(priv);
791 }
792 
793 void
794 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
795 {
796 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
797 	struct ifnet *ndev = priv->ifp;
798 
799 	bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
800 	bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
801 	bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
802 	bool broadcast_enabled = rx_mode_enable;
803 
804 	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
805 	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
806 	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
807 	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
808 	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
809 	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
810 
811 	/* update broadcast address */
812 	ether_addr_copy(priv->eth_addr.broadcast.addr,
813 	    priv->ifp->if_broadcastaddr);
814 
815 	if (enable_promisc)
816 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
817 	if (enable_allmulti)
818 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
819 	if (enable_broadcast)
820 		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
821 
822 	mlx5e_handle_ifp_addr(priv);
823 
824 	if (disable_broadcast)
825 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
826 	if (disable_allmulti)
827 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
828 	if (disable_promisc)
829 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
830 
831 	ea->promisc_enabled = promisc_enabled;
832 	ea->allmulti_enabled = allmulti_enabled;
833 	ea->broadcast_enabled = broadcast_enabled;
834 
835 	mlx5e_vport_context_update(priv);
836 }
837 
838 void
839 mlx5e_set_rx_mode_work(struct work_struct *work)
840 {
841 	struct mlx5e_priv *priv =
842 	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
843 
844 	PRIV_LOCK(priv);
845 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
846 		mlx5e_set_rx_mode_core(priv);
847 	PRIV_UNLOCK(priv);
848 }
849 
850 static int
851 mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
852 {
853 	struct mlx5_flow_table_group *g;
854 	u8 *dmac;
855 
856 	g = malloc(9 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
857 	if (g == NULL)
858 		return (-ENOMEM);
859 
860 	g[0].log_sz = 2;
861 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
862 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
863 	    outer_headers.ethertype);
864 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
865 	    outer_headers.ip_protocol);
866 
867 	g[1].log_sz = 1;
868 	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
869 	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
870 	    outer_headers.ethertype);
871 
872 	g[2].log_sz = 0;
873 
874 	g[3].log_sz = 14;
875 	g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
876 	dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
877 	    outer_headers.dmac_47_16);
878 	memset(dmac, 0xff, ETH_ALEN);
879 	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
880 	    outer_headers.ethertype);
881 	MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
882 	    outer_headers.ip_protocol);
883 
884 	g[4].log_sz = 13;
885 	g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
886 	dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
887 	    outer_headers.dmac_47_16);
888 	memset(dmac, 0xff, ETH_ALEN);
889 	MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
890 	    outer_headers.ethertype);
891 
892 	g[5].log_sz = 11;
893 	g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
894 	dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
895 	    outer_headers.dmac_47_16);
896 	memset(dmac, 0xff, ETH_ALEN);
897 
898 	g[6].log_sz = 2;
899 	g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
900 	dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
901 	    outer_headers.dmac_47_16);
902 	dmac[0] = 0x01;
903 	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
904 	    outer_headers.ethertype);
905 	MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
906 	    outer_headers.ip_protocol);
907 
908 	g[7].log_sz = 1;
909 	g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
910 	dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
911 	    outer_headers.dmac_47_16);
912 	dmac[0] = 0x01;
913 	MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
914 	    outer_headers.ethertype);
915 
916 	g[8].log_sz = 0;
917 	g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
918 	dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
919 	    outer_headers.dmac_47_16);
920 	dmac[0] = 0x01;
921 	priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
922 	    MLX5_FLOW_TABLE_TYPE_NIC_RCV,
923 	    0, 9, g);
924 	free(g, M_MLX5EN);
925 
926 	return (priv->ft.main ? 0 : -ENOMEM);
927 }
928 
929 static void
930 mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
931 {
932 	mlx5_destroy_flow_table(priv->ft.main);
933 	priv->ft.main = NULL;
934 }
935 
936 static int
937 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
938 {
939 	struct mlx5_flow_table_group *g;
940 
941 	g = malloc(2 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
942 	if (g == NULL)
943 		return (-ENOMEM);
944 
945 	g[0].log_sz = 12;
946 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
947 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
948 	    outer_headers.cvlan_tag);
949 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
950 	    outer_headers.first_vid);
951 
952 	/* untagged + any vlan id */
953 	g[1].log_sz = 1;
954 	g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
955 	MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
956 	    outer_headers.cvlan_tag);
957 
958 	priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
959 	    MLX5_FLOW_TABLE_TYPE_NIC_RCV,
960 	    0, 2, g);
961 	free(g, M_MLX5EN);
962 
963 	return (priv->ft.vlan ? 0 : -ENOMEM);
964 }
965 
966 static void
967 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
968 {
969 	mlx5_destroy_flow_table(priv->ft.vlan);
970 	priv->ft.vlan = NULL;
971 }
972 
973 int
974 mlx5e_open_flow_table(struct mlx5e_priv *priv)
975 {
976 	int err;
977 
978 	err = mlx5e_create_main_flow_table(priv);
979 	if (err)
980 		return (err);
981 
982 	err = mlx5e_create_vlan_flow_table(priv);
983 	if (err)
984 		goto err_destroy_main_flow_table;
985 
986 	return (0);
987 
988 err_destroy_main_flow_table:
989 	mlx5e_destroy_main_flow_table(priv);
990 
991 	return (err);
992 }
993 
994 void
995 mlx5e_close_flow_table(struct mlx5e_priv *priv)
996 {
997 	mlx5e_destroy_vlan_flow_table(priv);
998 	mlx5e_destroy_main_flow_table(priv);
999 }
1000