xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c (revision 18a7e218cfcdca6666e1f7356533e4c988780b57)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	struct sparx5 *sparx5;
20 	unsigned long event;
21 };
22 
sparx5_port_attr_pre_bridge_flags(struct sparx5_port * port,struct switchdev_brport_flags flags)23 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
24 					     struct switchdev_brport_flags flags)
25 {
26 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
27 		return -EINVAL;
28 
29 	return 0;
30 }
31 
sparx5_port_update_mcast_ip_flood(struct sparx5_port * port,bool flood_flag)32 static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
33 {
34 	bool should_flood = flood_flag || port->is_mrouter;
35 	struct sparx5 *sparx5 = port->sparx5;
36 	int pgid;
37 
38 	for (pgid = sparx5_get_pgid(sparx5, PGID_IPV4_MC_DATA);
39 	     pgid <= sparx5_get_pgid(sparx5, PGID_IPV6_MC_CTRL); pgid++)
40 		sparx5_pgid_update_mask(port, pgid, should_flood);
41 }
42 
sparx5_port_attr_bridge_flags(struct sparx5_port * port,struct switchdev_brport_flags flags)43 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
44 					  struct switchdev_brport_flags flags)
45 {
46 	struct sparx5 *sparx5 = port->sparx5;
47 
48 	if (flags.mask & BR_MCAST_FLOOD) {
49 		sparx5_pgid_update_mask(port,
50 					sparx5_get_pgid(sparx5, PGID_MC_FLOOD),
51 					!!(flags.val & BR_MCAST_FLOOD));
52 		sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
53 	}
54 
55 	if (flags.mask & BR_FLOOD)
56 		sparx5_pgid_update_mask(port,
57 					sparx5_get_pgid(sparx5, PGID_UC_FLOOD),
58 					!!(flags.val & BR_FLOOD));
59 	if (flags.mask & BR_BCAST_FLOOD)
60 		sparx5_pgid_update_mask(port,
61 					sparx5_get_pgid(sparx5, PGID_BCAST),
62 					!!(flags.val & BR_BCAST_FLOOD));
63 }
64 
sparx5_attr_stp_state_set(struct sparx5_port * port,u8 state)65 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
66 				      u8 state)
67 {
68 	struct sparx5 *sparx5 = port->sparx5;
69 
70 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
71 		netdev_err(port->ndev,
72 			   "Controlling non-bridged port %d?\n", port->portno);
73 		return;
74 	}
75 
76 	switch (state) {
77 	case BR_STATE_FORWARDING:
78 		set_bit(port->portno, sparx5->bridge_fwd_mask);
79 		fallthrough;
80 	case BR_STATE_LEARNING:
81 		set_bit(port->portno, sparx5->bridge_lrn_mask);
82 		break;
83 
84 	default:
85 		/* All other states treated as blocking */
86 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
87 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
88 		break;
89 	}
90 
91 	/* apply the bridge_fwd_mask to all the ports */
92 	sparx5_update_fwd(sparx5);
93 }
94 
sparx5_port_attr_ageing_set(struct sparx5_port * port,unsigned long ageing_clock_t)95 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
96 					unsigned long ageing_clock_t)
97 {
98 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
99 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
100 
101 	sparx5_set_ageing(port->sparx5, ageing_time);
102 }
103 
sparx5_port_attr_mrouter_set(struct sparx5_port * port,struct net_device * orig_dev,bool enable)104 static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
105 					 struct net_device *orig_dev,
106 					 bool enable)
107 {
108 	struct sparx5 *sparx5 = port->sparx5;
109 	struct sparx5_mdb_entry *e;
110 	bool flood_flag;
111 
112 	if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
113 		return;
114 
115 	/* Add/del mrouter port on all active mdb entries in HW.
116 	 * Don't change entry port mask, since that represents
117 	 * ports that actually joined that group.
118 	 */
119 	mutex_lock(&sparx5->mdb_lock);
120 	list_for_each_entry(e, &sparx5->mdb_entries, list) {
121 		if (!test_bit(port->portno, e->port_mask) &&
122 		    ether_addr_is_ip_mcast(e->addr))
123 			sparx5_pgid_update_mask(port, e->pgid_idx, enable);
124 	}
125 	mutex_unlock(&sparx5->mdb_lock);
126 
127 	/* Enable/disable flooding depending on if port is mrouter port
128 	 * or if mcast flood is enabled.
129 	 */
130 	port->is_mrouter = enable;
131 	flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
132 	sparx5_port_update_mcast_ip_flood(port, flood_flag);
133 }
134 
sparx5_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)135 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
136 				const struct switchdev_attr *attr,
137 				struct netlink_ext_ack *extack)
138 {
139 	struct sparx5_port *port = netdev_priv(dev);
140 
141 	switch (attr->id) {
142 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
143 		return sparx5_port_attr_pre_bridge_flags(port,
144 							 attr->u.brport_flags);
145 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
146 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
147 		break;
148 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
149 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
150 		break;
151 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
152 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
153 		break;
154 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
155 		/* Used PVID 1 when default_pvid is 0, to avoid
156 		 * collision with non-bridged ports.
157 		 */
158 		if (port->pvid == 0)
159 			port->pvid = 1;
160 		port->vlan_aware = attr->u.vlan_filtering;
161 		sparx5_vlan_port_apply(port->sparx5, port);
162 		break;
163 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
164 		sparx5_port_attr_mrouter_set(port,
165 					     attr->orig_dev,
166 					     attr->u.mrouter);
167 		break;
168 	default:
169 		return -EOPNOTSUPP;
170 	}
171 
172 	return 0;
173 }
174 
sparx5_port_bridge_join(struct sparx5_port * port,struct net_device * bridge,struct netlink_ext_ack * extack)175 static int sparx5_port_bridge_join(struct sparx5_port *port,
176 				   struct net_device *bridge,
177 				   struct netlink_ext_ack *extack)
178 {
179 	struct switchdev_brport_flags flags = {0};
180 	struct sparx5 *sparx5 = port->sparx5;
181 	struct net_device *ndev = port->ndev;
182 	int err;
183 
184 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
185 		/* First bridged port */
186 		sparx5->hw_bridge_dev = bridge;
187 	else
188 		if (sparx5->hw_bridge_dev != bridge)
189 			/* This is adding the port to a second bridge, this is
190 			 * unsupported
191 			 */
192 			return -ENODEV;
193 
194 	set_bit(port->portno, sparx5->bridge_mask);
195 
196 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
197 					    false, extack);
198 	if (err)
199 		goto err_switchdev_offload;
200 
201 	/* Remove standalone port entry */
202 	sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
203 
204 	/* Port enters in bridge mode therefore don't need to copy to CPU
205 	 * frames for multicast in case the bridge is not requesting them
206 	 */
207 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
208 
209 	/* Enable uc/mc/bc flooding */
210 	flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
211 	flags.val = flags.mask;
212 	sparx5_port_attr_bridge_flags(port, flags);
213 
214 	return 0;
215 
216 err_switchdev_offload:
217 	clear_bit(port->portno, sparx5->bridge_mask);
218 	return err;
219 }
220 
sparx5_port_bridge_leave(struct sparx5_port * port,struct net_device * bridge)221 static void sparx5_port_bridge_leave(struct sparx5_port *port,
222 				     struct net_device *bridge)
223 {
224 	struct switchdev_brport_flags flags = {0};
225 	struct sparx5 *sparx5 = port->sparx5;
226 
227 	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
228 
229 	clear_bit(port->portno, sparx5->bridge_mask);
230 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
231 		sparx5->hw_bridge_dev = NULL;
232 
233 	/* Clear bridge vlan settings before updating the port settings */
234 	port->vlan_aware = 0;
235 	port->pvid = NULL_VID;
236 	port->vid = NULL_VID;
237 
238 	/* Forward frames to CPU */
239 	sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
240 			  port->ndev->dev_addr, 0);
241 
242 	/* Port enters in host more therefore restore mc list */
243 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
244 
245 	/* Disable uc/mc/bc flooding */
246 	flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
247 	flags.val = 0;
248 	sparx5_port_attr_bridge_flags(port, flags);
249 }
250 
sparx5_port_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)251 static int sparx5_port_changeupper(struct net_device *dev,
252 				   struct netdev_notifier_changeupper_info *info)
253 {
254 	struct sparx5_port *port = netdev_priv(dev);
255 	struct netlink_ext_ack *extack;
256 	int err = 0;
257 
258 	extack = netdev_notifier_info_to_extack(&info->info);
259 
260 	if (netif_is_bridge_master(info->upper_dev)) {
261 		if (info->linking)
262 			err = sparx5_port_bridge_join(port, info->upper_dev,
263 						      extack);
264 		else
265 			sparx5_port_bridge_leave(port, info->upper_dev);
266 
267 		sparx5_vlan_port_apply(port->sparx5, port);
268 	}
269 
270 	return err;
271 }
272 
sparx5_port_add_addr(struct net_device * dev,bool up)273 static int sparx5_port_add_addr(struct net_device *dev, bool up)
274 {
275 	struct sparx5_port *port = netdev_priv(dev);
276 	struct sparx5 *sparx5 = port->sparx5;
277 	u16 vid = port->pvid;
278 
279 	if (up)
280 		sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
281 				  port->ndev->dev_addr, vid);
282 	else
283 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
284 
285 	return 0;
286 }
287 
sparx5_netdevice_port_event(struct net_device * dev,struct notifier_block * nb,unsigned long event,void * ptr)288 static int sparx5_netdevice_port_event(struct net_device *dev,
289 				       struct notifier_block *nb,
290 				       unsigned long event, void *ptr)
291 {
292 	int err = 0;
293 
294 	if (!sparx5_netdevice_check(dev))
295 		return 0;
296 
297 	switch (event) {
298 	case NETDEV_CHANGEUPPER:
299 		err = sparx5_port_changeupper(dev, ptr);
300 		break;
301 	case NETDEV_PRE_UP:
302 		err = sparx5_port_add_addr(dev, true);
303 		break;
304 	case NETDEV_DOWN:
305 		err = sparx5_port_add_addr(dev, false);
306 		break;
307 	}
308 
309 	return err;
310 }
311 
sparx5_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)312 static int sparx5_netdevice_event(struct notifier_block *nb,
313 				  unsigned long event, void *ptr)
314 {
315 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
316 	int ret = 0;
317 
318 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
319 
320 	return notifier_from_errno(ret);
321 }
322 
sparx5_switchdev_bridge_fdb_event_work(struct work_struct * work)323 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
324 {
325 	struct sparx5_switchdev_event_work *switchdev_work =
326 		container_of(work, struct sparx5_switchdev_event_work, work);
327 	struct net_device *dev = switchdev_work->dev;
328 	struct switchdev_notifier_fdb_info *fdb_info;
329 	struct sparx5_port *port;
330 	struct sparx5 *sparx5;
331 	bool host_addr;
332 	u16 vid;
333 
334 	rtnl_lock();
335 	if (!sparx5_netdevice_check(dev)) {
336 		host_addr = true;
337 		sparx5 = switchdev_work->sparx5;
338 	} else {
339 		host_addr = false;
340 		sparx5 = switchdev_work->sparx5;
341 		port = netdev_priv(dev);
342 	}
343 
344 	fdb_info = &switchdev_work->fdb_info;
345 
346 	/* Used PVID 1 when default_pvid is 0, to avoid
347 	 * collision with non-bridged ports.
348 	 */
349 	if (fdb_info->vid == 0)
350 		vid = 1;
351 	else
352 		vid = fdb_info->vid;
353 
354 	switch (switchdev_work->event) {
355 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
356 		if (host_addr)
357 			sparx5_add_mact_entry(sparx5, dev,
358 					      sparx5_get_pgid(sparx5, PGID_CPU),
359 					      fdb_info->addr, vid);
360 		else
361 			sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
362 					      fdb_info->addr, vid);
363 		break;
364 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
365 		sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
366 		break;
367 	}
368 
369 	rtnl_unlock();
370 	kfree(switchdev_work->fdb_info.addr);
371 	kfree(switchdev_work);
372 	dev_put(dev);
373 }
374 
sparx5_schedule_work(struct work_struct * work)375 static void sparx5_schedule_work(struct work_struct *work)
376 {
377 	queue_work(sparx5_owq, work);
378 }
379 
sparx5_switchdev_event(struct notifier_block * nb,unsigned long event,void * ptr)380 static int sparx5_switchdev_event(struct notifier_block *nb,
381 				  unsigned long event, void *ptr)
382 {
383 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
384 	struct sparx5_switchdev_event_work *switchdev_work;
385 	struct switchdev_notifier_fdb_info *fdb_info;
386 	struct switchdev_notifier_info *info = ptr;
387 	struct sparx5 *spx5;
388 	int err;
389 
390 	spx5 = container_of(nb, struct sparx5, switchdev_nb);
391 
392 	switch (event) {
393 	case SWITCHDEV_PORT_ATTR_SET:
394 		err = switchdev_handle_port_attr_set(dev, ptr,
395 						     sparx5_netdevice_check,
396 						     sparx5_port_attr_set);
397 		return notifier_from_errno(err);
398 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
399 		fallthrough;
400 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
401 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
402 		if (!switchdev_work)
403 			return NOTIFY_BAD;
404 
405 		switchdev_work->dev = dev;
406 		switchdev_work->event = event;
407 		switchdev_work->sparx5 = spx5;
408 
409 		fdb_info = container_of(info,
410 					struct switchdev_notifier_fdb_info,
411 					info);
412 		INIT_WORK(&switchdev_work->work,
413 			  sparx5_switchdev_bridge_fdb_event_work);
414 		memcpy(&switchdev_work->fdb_info, ptr,
415 		       sizeof(switchdev_work->fdb_info));
416 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
417 		if (!switchdev_work->fdb_info.addr)
418 			goto err_addr_alloc;
419 
420 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
421 				fdb_info->addr);
422 		dev_hold(dev);
423 
424 		sparx5_schedule_work(&switchdev_work->work);
425 		break;
426 	}
427 
428 	return NOTIFY_DONE;
429 err_addr_alloc:
430 	kfree(switchdev_work);
431 	return NOTIFY_BAD;
432 }
433 
sparx5_handle_port_vlan_add(struct net_device * dev,struct notifier_block * nb,const struct switchdev_obj_port_vlan * v)434 static int sparx5_handle_port_vlan_add(struct net_device *dev,
435 				       struct notifier_block *nb,
436 				       const struct switchdev_obj_port_vlan *v)
437 {
438 	struct sparx5_port *port = netdev_priv(dev);
439 
440 	if (netif_is_bridge_master(dev)) {
441 		struct sparx5 *sparx5 =
442 			container_of(nb, struct sparx5,
443 				     switchdev_blocking_nb);
444 
445 		/* Flood broadcast to CPU */
446 		sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_BCAST),
447 				  dev->broadcast, v->vid);
448 		return 0;
449 	}
450 
451 	if (!sparx5_netdevice_check(dev))
452 		return -EOPNOTSUPP;
453 
454 	return sparx5_vlan_vid_add(port, v->vid,
455 				  v->flags & BRIDGE_VLAN_INFO_PVID,
456 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
457 }
458 
sparx5_alloc_mdb_entry(struct sparx5 * sparx5,const unsigned char * addr,u16 vid,struct sparx5_mdb_entry ** entry_out)459 static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
460 				  const unsigned char *addr,
461 				  u16 vid,
462 				  struct sparx5_mdb_entry **entry_out)
463 {
464 	struct sparx5_mdb_entry *entry;
465 	u16 pgid_idx;
466 	int err;
467 
468 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
469 	if (!entry)
470 		return -ENOMEM;
471 
472 	err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
473 	if (err) {
474 		kfree(entry);
475 		return err;
476 	}
477 
478 	memcpy(entry->addr, addr, ETH_ALEN);
479 	entry->vid = vid;
480 	entry->pgid_idx = pgid_idx;
481 
482 	mutex_lock(&sparx5->mdb_lock);
483 	list_add_tail(&entry->list, &sparx5->mdb_entries);
484 	mutex_unlock(&sparx5->mdb_lock);
485 
486 	*entry_out = entry;
487 	return 0;
488 }
489 
sparx5_free_mdb_entry(struct sparx5 * sparx5,const unsigned char * addr,u16 vid)490 static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
491 				  const unsigned char *addr,
492 				  u16 vid)
493 {
494 	struct sparx5_mdb_entry *entry, *tmp;
495 
496 	mutex_lock(&sparx5->mdb_lock);
497 	list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
498 		if ((vid == 0 || entry->vid == vid) &&
499 		    ether_addr_equal(addr, entry->addr)) {
500 			list_del(&entry->list);
501 
502 			sparx5_pgid_free(sparx5, entry->pgid_idx);
503 			kfree(entry);
504 			goto out;
505 		}
506 	}
507 
508 out:
509 	mutex_unlock(&sparx5->mdb_lock);
510 }
511 
sparx5_mdb_get_entry(struct sparx5 * sparx5,const unsigned char * addr,u16 vid)512 static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
513 						     const unsigned char *addr,
514 						     u16 vid)
515 {
516 	struct sparx5_mdb_entry *e, *found = NULL;
517 
518 	mutex_lock(&sparx5->mdb_lock);
519 	list_for_each_entry(e, &sparx5->mdb_entries, list) {
520 		if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
521 			found = e;
522 			goto out;
523 		}
524 	}
525 
526 out:
527 	mutex_unlock(&sparx5->mdb_lock);
528 	return found;
529 }
530 
sparx5_cpu_copy_ena(struct sparx5 * spx5,u16 pgid,bool enable)531 static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
532 {
533 	spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
534 		 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
535 		 ANA_AC_PGID_MISC_CFG(pgid));
536 }
537 
sparx5_handle_port_mdb_add(struct net_device * dev,struct notifier_block * nb,const struct switchdev_obj_port_mdb * v)538 static int sparx5_handle_port_mdb_add(struct net_device *dev,
539 				      struct notifier_block *nb,
540 				      const struct switchdev_obj_port_mdb *v)
541 {
542 	struct sparx5_port *port = netdev_priv(dev);
543 	struct sparx5 *spx5 = port->sparx5;
544 	struct sparx5_mdb_entry *entry;
545 	bool is_host, is_new;
546 	int err, i;
547 	u16 vid;
548 
549 	if (!sparx5_netdevice_check(dev))
550 		return -EOPNOTSUPP;
551 
552 	is_host = netif_is_bridge_master(v->obj.orig_dev);
553 
554 	/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
555 	 * Fall back to bridge vid 1.
556 	 */
557 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
558 		vid = 1;
559 	else
560 		vid = v->vid;
561 
562 	is_new = false;
563 	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
564 	if (!entry) {
565 		err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
566 		is_new = true;
567 		if (err)
568 			return err;
569 	}
570 
571 	mutex_lock(&spx5->mdb_lock);
572 
573 	/* Add any mrouter ports to the new entry */
574 	if (is_new && ether_addr_is_ip_mcast(v->addr))
575 		for (i = 0; i < spx5->data->consts->n_ports; i++)
576 			if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
577 				sparx5_pgid_update_mask(spx5->ports[i],
578 							entry->pgid_idx,
579 							true);
580 
581 	if (is_host && !entry->cpu_copy) {
582 		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
583 		entry->cpu_copy = true;
584 	} else if (!is_host) {
585 		sparx5_pgid_update_mask(port, entry->pgid_idx, true);
586 		set_bit(port->portno, entry->port_mask);
587 	}
588 	mutex_unlock(&spx5->mdb_lock);
589 
590 	sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
591 
592 	return 0;
593 }
594 
sparx5_handle_port_mdb_del(struct net_device * dev,struct notifier_block * nb,const struct switchdev_obj_port_mdb * v)595 static int sparx5_handle_port_mdb_del(struct net_device *dev,
596 				      struct notifier_block *nb,
597 				      const struct switchdev_obj_port_mdb *v)
598 {
599 	struct sparx5_port *port = netdev_priv(dev);
600 	struct sparx5 *spx5 = port->sparx5;
601 	struct sparx5_mdb_entry *entry;
602 	bool is_host;
603 	u16 vid;
604 
605 	if (!sparx5_netdevice_check(dev))
606 		return -EOPNOTSUPP;
607 
608 	is_host = netif_is_bridge_master(v->obj.orig_dev);
609 
610 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
611 		vid = 1;
612 	else
613 		vid = v->vid;
614 
615 	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
616 	if (!entry)
617 		return 0;
618 
619 	mutex_lock(&spx5->mdb_lock);
620 	if (is_host && entry->cpu_copy) {
621 		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
622 		entry->cpu_copy = false;
623 	} else if (!is_host) {
624 		clear_bit(port->portno, entry->port_mask);
625 
626 		/* Port not mrouter port or addr is L2 mcast, remove port from mask. */
627 		if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
628 			sparx5_pgid_update_mask(port, entry->pgid_idx, false);
629 	}
630 	mutex_unlock(&spx5->mdb_lock);
631 
632 	if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
633 		 /* Clear pgid in case mrouter ports exists
634 		  * that are not part of the group.
635 		  */
636 		sparx5_pgid_clear(spx5, entry->pgid_idx);
637 		sparx5_mact_forget(spx5, entry->addr, entry->vid);
638 		sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
639 	}
640 	return 0;
641 }
642 
sparx5_handle_port_obj_add(struct net_device * dev,struct notifier_block * nb,struct switchdev_notifier_port_obj_info * info)643 static int sparx5_handle_port_obj_add(struct net_device *dev,
644 				      struct notifier_block *nb,
645 				      struct switchdev_notifier_port_obj_info *info)
646 {
647 	const struct switchdev_obj *obj = info->obj;
648 	int err;
649 
650 	switch (obj->id) {
651 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
652 		err = sparx5_handle_port_vlan_add(dev, nb,
653 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
654 		break;
655 	case SWITCHDEV_OBJ_ID_PORT_MDB:
656 	case SWITCHDEV_OBJ_ID_HOST_MDB:
657 		err = sparx5_handle_port_mdb_add(dev, nb,
658 						 SWITCHDEV_OBJ_PORT_MDB(obj));
659 		break;
660 	default:
661 		err = -EOPNOTSUPP;
662 		break;
663 	}
664 
665 	info->handled = true;
666 	return err;
667 }
668 
sparx5_handle_port_vlan_del(struct net_device * dev,struct notifier_block * nb,u16 vid)669 static int sparx5_handle_port_vlan_del(struct net_device *dev,
670 				       struct notifier_block *nb,
671 				       u16 vid)
672 {
673 	struct sparx5_port *port = netdev_priv(dev);
674 	int ret;
675 
676 	/* Master bridge? */
677 	if (netif_is_bridge_master(dev)) {
678 		struct sparx5 *sparx5 =
679 			container_of(nb, struct sparx5,
680 				     switchdev_blocking_nb);
681 
682 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
683 		return 0;
684 	}
685 
686 	if (!sparx5_netdevice_check(dev))
687 		return -EOPNOTSUPP;
688 
689 	ret = sparx5_vlan_vid_del(port, vid);
690 	if (ret)
691 		return ret;
692 
693 	return 0;
694 }
695 
sparx5_handle_port_obj_del(struct net_device * dev,struct notifier_block * nb,struct switchdev_notifier_port_obj_info * info)696 static int sparx5_handle_port_obj_del(struct net_device *dev,
697 				      struct notifier_block *nb,
698 				      struct switchdev_notifier_port_obj_info *info)
699 {
700 	const struct switchdev_obj *obj = info->obj;
701 	int err;
702 
703 	switch (obj->id) {
704 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
705 		err = sparx5_handle_port_vlan_del(dev, nb,
706 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
707 		break;
708 	case SWITCHDEV_OBJ_ID_PORT_MDB:
709 	case SWITCHDEV_OBJ_ID_HOST_MDB:
710 		err = sparx5_handle_port_mdb_del(dev, nb,
711 						 SWITCHDEV_OBJ_PORT_MDB(obj));
712 		break;
713 	default:
714 		err = -EOPNOTSUPP;
715 		break;
716 	}
717 
718 	info->handled = true;
719 	return err;
720 }
721 
sparx5_switchdev_blocking_event(struct notifier_block * nb,unsigned long event,void * ptr)722 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
723 					   unsigned long event,
724 					   void *ptr)
725 {
726 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
727 	int err;
728 
729 	switch (event) {
730 	case SWITCHDEV_PORT_OBJ_ADD:
731 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
732 		return notifier_from_errno(err);
733 	case SWITCHDEV_PORT_OBJ_DEL:
734 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
735 		return notifier_from_errno(err);
736 	case SWITCHDEV_PORT_ATTR_SET:
737 		err = switchdev_handle_port_attr_set(dev, ptr,
738 						     sparx5_netdevice_check,
739 						     sparx5_port_attr_set);
740 		return notifier_from_errno(err);
741 	}
742 
743 	return NOTIFY_DONE;
744 }
745 
sparx5_register_notifier_blocks(struct sparx5 * s5)746 int sparx5_register_notifier_blocks(struct sparx5 *s5)
747 {
748 	int err;
749 
750 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
751 	err = register_netdevice_notifier(&s5->netdevice_nb);
752 	if (err)
753 		return err;
754 
755 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
756 	err = register_switchdev_notifier(&s5->switchdev_nb);
757 	if (err)
758 		goto err_switchdev_nb;
759 
760 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
761 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
762 	if (err)
763 		goto err_switchdev_blocking_nb;
764 
765 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
766 	if (!sparx5_owq) {
767 		err = -ENOMEM;
768 		goto err_switchdev_blocking_nb;
769 	}
770 
771 	return 0;
772 
773 err_switchdev_blocking_nb:
774 	unregister_switchdev_notifier(&s5->switchdev_nb);
775 err_switchdev_nb:
776 	unregister_netdevice_notifier(&s5->netdevice_nb);
777 
778 	return err;
779 }
780 
sparx5_unregister_notifier_blocks(struct sparx5 * s5)781 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
782 {
783 	destroy_workqueue(sparx5_owq);
784 
785 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
786 	unregister_switchdev_notifier(&s5->switchdev_nb);
787 	unregister_netdevice_notifier(&s5->netdevice_nb);
788 }
789