xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c (revision 40e79150c1686263e6a031d7702aec63aff31332)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/if_bridge.h>
5 #include <linux/list.h>
6 #include <linux/refcount.h>
7 #include <linux/rtnetlink.h>
8 #include <linux/workqueue.h>
9 #include <net/arp.h>
10 #include <net/gre.h>
11 #include <net/lag.h>
12 #include <net/ndisc.h>
13 #include <net/ip6_tunnel.h>
14 
15 #include "spectrum.h"
16 #include "spectrum_ipip.h"
17 #include "spectrum_span.h"
18 #include "spectrum_switchdev.h"
19 
20 struct mlxsw_sp_span {
21 	struct work_struct work;
22 	struct mlxsw_sp *mlxsw_sp;
23 	atomic_t active_entries_count;
24 	int entries_count;
25 	struct mlxsw_sp_span_entry entries[];
26 };
27 
28 static void mlxsw_sp_span_respin_work(struct work_struct *work);
29 
30 static u64 mlxsw_sp_span_occ_get(void *priv)
31 {
32 	const struct mlxsw_sp *mlxsw_sp = priv;
33 
34 	return atomic_read(&mlxsw_sp->span->active_entries_count);
35 }
36 
37 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
38 {
39 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
40 	struct mlxsw_sp_span *span;
41 	int i, entries_count;
42 
43 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
44 		return -EIO;
45 
46 	entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
47 	span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
48 	if (!span)
49 		return -ENOMEM;
50 	span->entries_count = entries_count;
51 	atomic_set(&span->active_entries_count, 0);
52 	span->mlxsw_sp = mlxsw_sp;
53 	mlxsw_sp->span = span;
54 
55 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
56 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
57 
58 		INIT_LIST_HEAD(&curr->bound_ports_list);
59 		curr->id = i;
60 	}
61 
62 	devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
63 					  mlxsw_sp_span_occ_get, mlxsw_sp);
64 	INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
65 
66 	return 0;
67 }
68 
69 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
70 {
71 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
72 	int i;
73 
74 	cancel_work_sync(&mlxsw_sp->span->work);
75 	devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
76 
77 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
78 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
79 
80 		WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
81 	}
82 	kfree(mlxsw_sp->span);
83 }
84 
85 static int
86 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
87 			       struct mlxsw_sp_span_parms *sparmsp)
88 {
89 	sparmsp->dest_port = netdev_priv(to_dev);
90 	return 0;
91 }
92 
93 static int
94 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
95 				   struct mlxsw_sp_span_parms sparms)
96 {
97 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
98 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
99 	u8 local_port = dest_port->local_port;
100 	char mpat_pl[MLXSW_REG_MPAT_LEN];
101 	int pa_id = span_entry->id;
102 
103 	/* Create a new port analayzer entry for local_port. */
104 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
105 			    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
106 
107 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
108 }
109 
110 static void
111 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
112 				       enum mlxsw_reg_mpat_span_type span_type)
113 {
114 	struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
115 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
116 	u8 local_port = dest_port->local_port;
117 	char mpat_pl[MLXSW_REG_MPAT_LEN];
118 	int pa_id = span_entry->id;
119 
120 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
121 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
122 }
123 
124 static void
125 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
126 {
127 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
128 					    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
129 }
130 
131 static const
132 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
133 	.can_handle = mlxsw_sp_port_dev_check,
134 	.parms_set = mlxsw_sp_span_entry_phys_parms,
135 	.configure = mlxsw_sp_span_entry_phys_configure,
136 	.deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
137 };
138 
139 static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
140 			      const void *pkey,
141 			      struct net_device *dev,
142 			      unsigned char dmac[ETH_ALEN])
143 {
144 	struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
145 	int err = 0;
146 
147 	if (!neigh) {
148 		neigh = neigh_create(tbl, pkey, dev);
149 		if (IS_ERR(neigh))
150 			return PTR_ERR(neigh);
151 	}
152 
153 	neigh_event_send(neigh, NULL);
154 
155 	read_lock_bh(&neigh->lock);
156 	if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
157 		memcpy(dmac, neigh->ha, ETH_ALEN);
158 	else
159 		err = -ENOENT;
160 	read_unlock_bh(&neigh->lock);
161 
162 	neigh_release(neigh);
163 	return err;
164 }
165 
166 static int
167 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
168 {
169 	sparmsp->dest_port = NULL;
170 	return 0;
171 }
172 
173 static struct net_device *
174 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
175 				 unsigned char *dmac,
176 				 u16 *p_vid)
177 {
178 	struct bridge_vlan_info vinfo;
179 	struct net_device *edev;
180 	u16 vid = *p_vid;
181 
182 	if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
183 		return NULL;
184 	if (!vid ||
185 	    br_vlan_get_info(br_dev, vid, &vinfo) ||
186 	    !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
187 		return NULL;
188 
189 	edev = br_fdb_find_port(br_dev, dmac, vid);
190 	if (!edev)
191 		return NULL;
192 
193 	if (br_vlan_get_info(edev, vid, &vinfo))
194 		return NULL;
195 	if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
196 		*p_vid = 0;
197 	else
198 		*p_vid = vid;
199 	return edev;
200 }
201 
202 static struct net_device *
203 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
204 				 unsigned char *dmac)
205 {
206 	return br_fdb_find_port(br_dev, dmac, 0);
207 }
208 
209 static struct net_device *
210 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
211 			   unsigned char dmac[ETH_ALEN],
212 			   u16 *p_vid)
213 {
214 	struct mlxsw_sp_bridge_port *bridge_port;
215 	enum mlxsw_reg_spms_state spms_state;
216 	struct net_device *dev = NULL;
217 	struct mlxsw_sp_port *port;
218 	u8 stp_state;
219 
220 	if (br_vlan_enabled(br_dev))
221 		dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
222 	else if (!*p_vid)
223 		dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
224 	if (!dev)
225 		return NULL;
226 
227 	port = mlxsw_sp_port_dev_lower_find(dev);
228 	if (!port)
229 		return NULL;
230 
231 	bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
232 	if (!bridge_port)
233 		return NULL;
234 
235 	stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
236 	spms_state = mlxsw_sp_stp_spms_state(stp_state);
237 	if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
238 		return NULL;
239 
240 	return dev;
241 }
242 
243 static struct net_device *
244 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
245 			 u16 *p_vid)
246 {
247 	*p_vid = vlan_dev_vlan_id(vlan_dev);
248 	return vlan_dev_real_dev(vlan_dev);
249 }
250 
251 static struct net_device *
252 mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
253 {
254 	struct net_device *dev;
255 	struct list_head *iter;
256 
257 	netdev_for_each_lower_dev(lag_dev, dev, iter)
258 		if (netif_carrier_ok(dev) &&
259 		    net_lag_port_dev_txable(dev) &&
260 		    mlxsw_sp_port_dev_check(dev))
261 			return dev;
262 
263 	return NULL;
264 }
265 
266 static __maybe_unused int
267 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
268 					union mlxsw_sp_l3addr saddr,
269 					union mlxsw_sp_l3addr daddr,
270 					union mlxsw_sp_l3addr gw,
271 					__u8 ttl,
272 					struct neigh_table *tbl,
273 					struct mlxsw_sp_span_parms *sparmsp)
274 {
275 	unsigned char dmac[ETH_ALEN];
276 	u16 vid = 0;
277 
278 	if (mlxsw_sp_l3addr_is_zero(gw))
279 		gw = daddr;
280 
281 	if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
282 		goto unoffloadable;
283 
284 	if (is_vlan_dev(edev))
285 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
286 
287 	if (netif_is_bridge_master(edev)) {
288 		edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
289 		if (!edev)
290 			goto unoffloadable;
291 	}
292 
293 	if (is_vlan_dev(edev)) {
294 		if (vid || !(edev->flags & IFF_UP))
295 			goto unoffloadable;
296 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
297 	}
298 
299 	if (netif_is_lag_master(edev)) {
300 		if (!(edev->flags & IFF_UP))
301 			goto unoffloadable;
302 		edev = mlxsw_sp_span_entry_lag(edev);
303 		if (!edev)
304 			goto unoffloadable;
305 	}
306 
307 	if (!mlxsw_sp_port_dev_check(edev))
308 		goto unoffloadable;
309 
310 	sparmsp->dest_port = netdev_priv(edev);
311 	sparmsp->ttl = ttl;
312 	memcpy(sparmsp->dmac, dmac, ETH_ALEN);
313 	memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
314 	sparmsp->saddr = saddr;
315 	sparmsp->daddr = daddr;
316 	sparmsp->vid = vid;
317 	return 0;
318 
319 unoffloadable:
320 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
321 }
322 
323 #if IS_ENABLED(CONFIG_NET_IPGRE)
324 static struct net_device *
325 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
326 			    __be32 *saddrp, __be32 *daddrp)
327 {
328 	struct ip_tunnel *tun = netdev_priv(to_dev);
329 	struct net_device *dev = NULL;
330 	struct ip_tunnel_parm parms;
331 	struct rtable *rt = NULL;
332 	struct flowi4 fl4;
333 
334 	/* We assume "dev" stays valid after rt is put. */
335 	ASSERT_RTNL();
336 
337 	parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
338 	ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
339 			    0, 0, parms.link, tun->fwmark, 0);
340 
341 	rt = ip_route_output_key(tun->net, &fl4);
342 	if (IS_ERR(rt))
343 		return NULL;
344 
345 	if (rt->rt_type != RTN_UNICAST)
346 		goto out;
347 
348 	dev = rt->dst.dev;
349 	*saddrp = fl4.saddr;
350 	if (rt->rt_gw_family == AF_INET)
351 		*daddrp = rt->rt_gw4;
352 	/* can not offload if route has an IPv6 gateway */
353 	else if (rt->rt_gw_family == AF_INET6)
354 		dev = NULL;
355 
356 out:
357 	ip_rt_put(rt);
358 	return dev;
359 }
360 
361 static int
362 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
363 				  struct mlxsw_sp_span_parms *sparmsp)
364 {
365 	struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
366 	union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
367 	union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
368 	bool inherit_tos = tparm.iph.tos & 0x1;
369 	bool inherit_ttl = !tparm.iph.ttl;
370 	union mlxsw_sp_l3addr gw = daddr;
371 	struct net_device *l3edev;
372 
373 	if (!(to_dev->flags & IFF_UP) ||
374 	    /* Reject tunnels with GRE keys, checksums, etc. */
375 	    tparm.i_flags || tparm.o_flags ||
376 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
377 	    inherit_ttl || !inherit_tos ||
378 	    /* A destination address may not be "any". */
379 	    mlxsw_sp_l3addr_is_zero(daddr))
380 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
381 
382 	l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
383 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
384 						       tparm.iph.ttl,
385 						       &arp_tbl, sparmsp);
386 }
387 
388 static int
389 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
390 				      struct mlxsw_sp_span_parms sparms)
391 {
392 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
393 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
394 	u8 local_port = dest_port->local_port;
395 	char mpat_pl[MLXSW_REG_MPAT_LEN];
396 	int pa_id = span_entry->id;
397 
398 	/* Create a new port analayzer entry for local_port. */
399 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
400 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
401 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
402 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
403 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
404 				    sparms.dmac, !!sparms.vid);
405 	mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
406 					      sparms.ttl, sparms.smac,
407 					      be32_to_cpu(sparms.saddr.addr4),
408 					      be32_to_cpu(sparms.daddr.addr4));
409 
410 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
411 }
412 
413 static void
414 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
415 {
416 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
417 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
418 }
419 
420 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
421 	.can_handle = netif_is_gretap,
422 	.parms_set = mlxsw_sp_span_entry_gretap4_parms,
423 	.configure = mlxsw_sp_span_entry_gretap4_configure,
424 	.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
425 };
426 #endif
427 
428 #if IS_ENABLED(CONFIG_IPV6_GRE)
429 static struct net_device *
430 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
431 			    struct in6_addr *saddrp,
432 			    struct in6_addr *daddrp)
433 {
434 	struct ip6_tnl *t = netdev_priv(to_dev);
435 	struct flowi6 fl6 = t->fl.u.ip6;
436 	struct net_device *dev = NULL;
437 	struct dst_entry *dst;
438 	struct rt6_info *rt6;
439 
440 	/* We assume "dev" stays valid after dst is released. */
441 	ASSERT_RTNL();
442 
443 	fl6.flowi6_mark = t->parms.fwmark;
444 	if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
445 		return NULL;
446 
447 	dst = ip6_route_output(t->net, NULL, &fl6);
448 	if (!dst || dst->error)
449 		goto out;
450 
451 	rt6 = container_of(dst, struct rt6_info, dst);
452 
453 	dev = dst->dev;
454 	*saddrp = fl6.saddr;
455 	*daddrp = rt6->rt6i_gateway;
456 
457 out:
458 	dst_release(dst);
459 	return dev;
460 }
461 
462 static int
463 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
464 				  struct mlxsw_sp_span_parms *sparmsp)
465 {
466 	struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
467 	bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
468 	union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
469 	union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
470 	bool inherit_ttl = !tparm.hop_limit;
471 	union mlxsw_sp_l3addr gw = daddr;
472 	struct net_device *l3edev;
473 
474 	if (!(to_dev->flags & IFF_UP) ||
475 	    /* Reject tunnels with GRE keys, checksums, etc. */
476 	    tparm.i_flags || tparm.o_flags ||
477 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
478 	    inherit_ttl || !inherit_tos ||
479 	    /* A destination address may not be "any". */
480 	    mlxsw_sp_l3addr_is_zero(daddr))
481 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
482 
483 	l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
484 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
485 						       tparm.hop_limit,
486 						       &nd_tbl, sparmsp);
487 }
488 
489 static int
490 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
491 				      struct mlxsw_sp_span_parms sparms)
492 {
493 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
494 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
495 	u8 local_port = dest_port->local_port;
496 	char mpat_pl[MLXSW_REG_MPAT_LEN];
497 	int pa_id = span_entry->id;
498 
499 	/* Create a new port analayzer entry for local_port. */
500 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
501 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
502 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
503 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
504 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
505 				    sparms.dmac, !!sparms.vid);
506 	mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
507 					      sparms.saddr.addr6,
508 					      sparms.daddr.addr6);
509 
510 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
511 }
512 
513 static void
514 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
515 {
516 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
517 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
518 }
519 
520 static const
521 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
522 	.can_handle = netif_is_ip6gretap,
523 	.parms_set = mlxsw_sp_span_entry_gretap6_parms,
524 	.configure = mlxsw_sp_span_entry_gretap6_configure,
525 	.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
526 };
527 #endif
528 
529 static bool
530 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
531 {
532 	return is_vlan_dev(dev) &&
533 	       mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
534 }
535 
536 static int
537 mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
538 			       struct mlxsw_sp_span_parms *sparmsp)
539 {
540 	struct net_device *real_dev;
541 	u16 vid;
542 
543 	if (!(to_dev->flags & IFF_UP))
544 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
545 
546 	real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
547 	sparmsp->dest_port = netdev_priv(real_dev);
548 	sparmsp->vid = vid;
549 	return 0;
550 }
551 
552 static int
553 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
554 				   struct mlxsw_sp_span_parms sparms)
555 {
556 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
557 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
558 	u8 local_port = dest_port->local_port;
559 	char mpat_pl[MLXSW_REG_MPAT_LEN];
560 	int pa_id = span_entry->id;
561 
562 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
563 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
564 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
565 
566 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
567 }
568 
569 static void
570 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
571 {
572 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
573 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
574 }
575 
576 static const
577 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
578 	.can_handle = mlxsw_sp_span_vlan_can_handle,
579 	.parms_set = mlxsw_sp_span_entry_vlan_parms,
580 	.configure = mlxsw_sp_span_entry_vlan_configure,
581 	.deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
582 };
583 
584 static const
585 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
586 	&mlxsw_sp_span_entry_ops_phys,
587 #if IS_ENABLED(CONFIG_NET_IPGRE)
588 	&mlxsw_sp_span_entry_ops_gretap4,
589 #endif
590 #if IS_ENABLED(CONFIG_IPV6_GRE)
591 	&mlxsw_sp_span_entry_ops_gretap6,
592 #endif
593 	&mlxsw_sp_span_entry_ops_vlan,
594 };
595 
596 static int
597 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
598 			      struct mlxsw_sp_span_parms *sparmsp)
599 {
600 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
601 }
602 
603 static int
604 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
605 				  struct mlxsw_sp_span_parms sparms)
606 {
607 	return 0;
608 }
609 
610 static void
611 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
612 {
613 }
614 
615 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
616 	.parms_set = mlxsw_sp_span_entry_nop_parms,
617 	.configure = mlxsw_sp_span_entry_nop_configure,
618 	.deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
619 };
620 
621 static void
622 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
623 			      struct mlxsw_sp_span_entry *span_entry,
624 			      struct mlxsw_sp_span_parms sparms)
625 {
626 	int err;
627 
628 	if (!sparms.dest_port)
629 		goto set_parms;
630 
631 	if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
632 		netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
633 			   sparms.dest_port->dev->name);
634 		sparms.dest_port = NULL;
635 		goto set_parms;
636 	}
637 
638 	err = span_entry->ops->configure(span_entry, sparms);
639 	if (err) {
640 		netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
641 			   sparms.dest_port->dev->name);
642 		sparms.dest_port = NULL;
643 		goto set_parms;
644 	}
645 
646 set_parms:
647 	span_entry->parms = sparms;
648 }
649 
650 static void
651 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
652 {
653 	if (span_entry->parms.dest_port)
654 		span_entry->ops->deconfigure(span_entry);
655 }
656 
657 static struct mlxsw_sp_span_entry *
658 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
659 			   const struct net_device *to_dev,
660 			   const struct mlxsw_sp_span_entry_ops *ops,
661 			   struct mlxsw_sp_span_parms sparms)
662 {
663 	struct mlxsw_sp_span_entry *span_entry = NULL;
664 	int i;
665 
666 	/* find a free entry to use */
667 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
668 		if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
669 			span_entry = &mlxsw_sp->span->entries[i];
670 			break;
671 		}
672 	}
673 	if (!span_entry)
674 		return NULL;
675 
676 	atomic_inc(&mlxsw_sp->span->active_entries_count);
677 	span_entry->ops = ops;
678 	refcount_set(&span_entry->ref_count, 1);
679 	span_entry->to_dev = to_dev;
680 	mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
681 
682 	return span_entry;
683 }
684 
685 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
686 					struct mlxsw_sp_span_entry *span_entry)
687 {
688 	mlxsw_sp_span_entry_deconfigure(span_entry);
689 	atomic_dec(&mlxsw_sp->span->active_entries_count);
690 }
691 
692 struct mlxsw_sp_span_entry *
693 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
694 				 const struct net_device *to_dev)
695 {
696 	int i;
697 
698 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
699 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
700 
701 		if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
702 			return curr;
703 	}
704 	return NULL;
705 }
706 
707 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
708 				    struct mlxsw_sp_span_entry *span_entry)
709 {
710 	mlxsw_sp_span_entry_deconfigure(span_entry);
711 	span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
712 }
713 
714 static struct mlxsw_sp_span_entry *
715 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
716 {
717 	int i;
718 
719 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
720 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
721 
722 		if (refcount_read(&curr->ref_count) && curr->id == span_id)
723 			return curr;
724 	}
725 	return NULL;
726 }
727 
728 static struct mlxsw_sp_span_entry *
729 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
730 			const struct net_device *to_dev,
731 			const struct mlxsw_sp_span_entry_ops *ops,
732 			struct mlxsw_sp_span_parms sparms)
733 {
734 	struct mlxsw_sp_span_entry *span_entry;
735 
736 	span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
737 	if (span_entry) {
738 		/* Already exists, just take a reference */
739 		refcount_inc(&span_entry->ref_count);
740 		return span_entry;
741 	}
742 
743 	return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
744 }
745 
746 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
747 				   struct mlxsw_sp_span_entry *span_entry)
748 {
749 	if (refcount_dec_and_test(&span_entry->ref_count))
750 		mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
751 	return 0;
752 }
753 
754 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
755 {
756 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
757 	struct mlxsw_sp_span_inspected_port *p;
758 	int i;
759 
760 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
761 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
762 
763 		list_for_each_entry(p, &curr->bound_ports_list, list)
764 			if (p->local_port == port->local_port &&
765 			    p->type == MLXSW_SP_SPAN_EGRESS)
766 				return true;
767 	}
768 
769 	return false;
770 }
771 
772 static int
773 mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
774 {
775 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
776 	char sbib_pl[MLXSW_REG_SBIB_LEN];
777 	u32 buffsize;
778 	u32 speed;
779 	int err;
780 
781 	err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
782 	if (err)
783 		return err;
784 	if (speed == SPEED_UNKNOWN)
785 		speed = 0;
786 
787 	buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
788 	mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
789 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
790 }
791 
792 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
793 {
794 	/* If port is egress mirrored, the shared buffer size should be
795 	 * updated according to the mtu value
796 	 */
797 	if (mlxsw_sp_span_is_egress_mirror(port))
798 		return mlxsw_sp_span_port_buffsize_update(port, mtu);
799 	return 0;
800 }
801 
802 void mlxsw_sp_span_speed_update_work(struct work_struct *work)
803 {
804 	struct delayed_work *dwork = to_delayed_work(work);
805 	struct mlxsw_sp_port *mlxsw_sp_port;
806 
807 	mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
808 				     span.speed_update_dw);
809 
810 	/* If port is egress mirrored, the shared buffer size should be
811 	 * updated according to the speed value.
812 	 */
813 	if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
814 		mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
815 						   mlxsw_sp_port->dev->mtu);
816 }
817 
818 static struct mlxsw_sp_span_inspected_port *
819 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
820 				    enum mlxsw_sp_span_type type,
821 				    struct mlxsw_sp_port *port,
822 				    bool bind)
823 {
824 	struct mlxsw_sp_span_inspected_port *p;
825 
826 	list_for_each_entry(p, &span_entry->bound_ports_list, list)
827 		if (type == p->type &&
828 		    port->local_port == p->local_port &&
829 		    bind == p->bound)
830 			return p;
831 	return NULL;
832 }
833 
834 static int
835 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
836 				  struct mlxsw_sp_span_entry *span_entry,
837 				  enum mlxsw_sp_span_type type,
838 				  bool bind)
839 {
840 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
841 	char mpar_pl[MLXSW_REG_MPAR_LEN];
842 	int pa_id = span_entry->id;
843 
844 	/* bind the port to the SPAN entry */
845 	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
846 			    (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
847 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
848 }
849 
850 static int
851 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
852 				 struct mlxsw_sp_span_entry *span_entry,
853 				 enum mlxsw_sp_span_type type,
854 				 bool bind)
855 {
856 	struct mlxsw_sp_span_inspected_port *inspected_port;
857 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
858 	char sbib_pl[MLXSW_REG_SBIB_LEN];
859 	int i;
860 	int err;
861 
862 	/* A given (source port, direction) can only be bound to one analyzer,
863 	 * so if a binding is requested, check for conflicts.
864 	 */
865 	if (bind)
866 		for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
867 			struct mlxsw_sp_span_entry *curr =
868 				&mlxsw_sp->span->entries[i];
869 
870 			if (mlxsw_sp_span_entry_bound_port_find(curr, type,
871 								port, bind))
872 				return -EEXIST;
873 		}
874 
875 	/* if it is an egress SPAN, bind a shared buffer to it */
876 	if (type == MLXSW_SP_SPAN_EGRESS) {
877 		err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu);
878 		if (err)
879 			return err;
880 	}
881 
882 	if (bind) {
883 		err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
884 							true);
885 		if (err)
886 			goto err_port_bind;
887 	}
888 
889 	inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
890 	if (!inspected_port) {
891 		err = -ENOMEM;
892 		goto err_inspected_port_alloc;
893 	}
894 	inspected_port->local_port = port->local_port;
895 	inspected_port->type = type;
896 	inspected_port->bound = bind;
897 	list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
898 
899 	return 0;
900 
901 err_inspected_port_alloc:
902 	if (bind)
903 		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
904 						  false);
905 err_port_bind:
906 	if (type == MLXSW_SP_SPAN_EGRESS) {
907 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
908 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
909 	}
910 	return err;
911 }
912 
913 static void
914 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
915 				 struct mlxsw_sp_span_entry *span_entry,
916 				 enum mlxsw_sp_span_type type,
917 				 bool bind)
918 {
919 	struct mlxsw_sp_span_inspected_port *inspected_port;
920 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
921 	char sbib_pl[MLXSW_REG_SBIB_LEN];
922 
923 	inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
924 							     port, bind);
925 	if (!inspected_port)
926 		return;
927 
928 	if (bind)
929 		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
930 						  false);
931 	/* remove the SBIB buffer if it was egress SPAN */
932 	if (type == MLXSW_SP_SPAN_EGRESS) {
933 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
934 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
935 	}
936 
937 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
938 
939 	list_del(&inspected_port->list);
940 	kfree(inspected_port);
941 }
942 
943 static const struct mlxsw_sp_span_entry_ops *
944 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
945 			const struct net_device *to_dev)
946 {
947 	size_t i;
948 
949 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
950 		if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
951 			return mlxsw_sp_span_entry_types[i];
952 
953 	return NULL;
954 }
955 
956 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
957 			     const struct net_device *to_dev,
958 			     enum mlxsw_sp_span_type type, bool bind,
959 			     int *p_span_id)
960 {
961 	struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
962 	const struct mlxsw_sp_span_entry_ops *ops;
963 	struct mlxsw_sp_span_parms sparms = {NULL};
964 	struct mlxsw_sp_span_entry *span_entry;
965 	int err;
966 
967 	ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
968 	if (!ops) {
969 		netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
970 		return -EOPNOTSUPP;
971 	}
972 
973 	err = ops->parms_set(to_dev, &sparms);
974 	if (err)
975 		return err;
976 
977 	span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
978 	if (!span_entry)
979 		return -ENOBUFS;
980 
981 	err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
982 	if (err)
983 		goto err_port_bind;
984 
985 	*p_span_id = span_entry->id;
986 	return 0;
987 
988 err_port_bind:
989 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
990 	return err;
991 }
992 
993 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
994 			      enum mlxsw_sp_span_type type, bool bind)
995 {
996 	struct mlxsw_sp_span_entry *span_entry;
997 
998 	span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
999 	if (!span_entry) {
1000 		netdev_err(from->dev, "no span entry found\n");
1001 		return;
1002 	}
1003 
1004 	mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
1005 }
1006 
1007 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1008 {
1009 	struct mlxsw_sp_span *span;
1010 	struct mlxsw_sp *mlxsw_sp;
1011 	int i, err;
1012 
1013 	span = container_of(work, struct mlxsw_sp_span, work);
1014 	mlxsw_sp = span->mlxsw_sp;
1015 
1016 	rtnl_lock();
1017 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
1018 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
1019 		struct mlxsw_sp_span_parms sparms = {NULL};
1020 
1021 		if (!refcount_read(&curr->ref_count))
1022 			continue;
1023 
1024 		err = curr->ops->parms_set(curr->to_dev, &sparms);
1025 		if (err)
1026 			continue;
1027 
1028 		if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
1029 			mlxsw_sp_span_entry_deconfigure(curr);
1030 			mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
1031 		}
1032 	}
1033 	rtnl_unlock();
1034 }
1035 
1036 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
1037 {
1038 	if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
1039 		return;
1040 	mlxsw_core_schedule_work(&mlxsw_sp->span->work);
1041 }
1042