xref: /linux/net/switchdev/switchdev.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/switchdev/switchdev.c - Switch device API
4  * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
21 
22 static bool switchdev_obj_eq(const struct switchdev_obj *a,
23 			     const struct switchdev_obj *b)
24 {
25 	const struct switchdev_obj_port_vlan *va, *vb;
26 	const struct switchdev_obj_port_mdb *ma, *mb;
27 
28 	if (a->id != b->id || a->orig_dev != b->orig_dev)
29 		return false;
30 
31 	switch (a->id) {
32 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
33 		va = SWITCHDEV_OBJ_PORT_VLAN(a);
34 		vb = SWITCHDEV_OBJ_PORT_VLAN(b);
35 		return va->flags == vb->flags &&
36 			va->vid == vb->vid &&
37 			va->changed == vb->changed;
38 	case SWITCHDEV_OBJ_ID_PORT_MDB:
39 	case SWITCHDEV_OBJ_ID_HOST_MDB:
40 		ma = SWITCHDEV_OBJ_PORT_MDB(a);
41 		mb = SWITCHDEV_OBJ_PORT_MDB(b);
42 		return ma->vid == mb->vid &&
43 			ether_addr_equal(ma->addr, mb->addr);
44 	default:
45 		break;
46 	}
47 
48 	BUG();
49 }
50 
51 static LIST_HEAD(deferred);
52 static DEFINE_SPINLOCK(deferred_lock);
53 
54 typedef void switchdev_deferred_func_t(struct net_device *dev,
55 				       const void *data);
56 
57 struct switchdev_deferred_item {
58 	struct list_head list;
59 	struct net_device *dev;
60 	netdevice_tracker dev_tracker;
61 	switchdev_deferred_func_t *func;
62 	unsigned long data[];
63 };
64 
65 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
66 {
67 	struct switchdev_deferred_item *dfitem;
68 
69 	spin_lock_bh(&deferred_lock);
70 	if (list_empty(&deferred)) {
71 		dfitem = NULL;
72 		goto unlock;
73 	}
74 	dfitem = list_first_entry(&deferred,
75 				  struct switchdev_deferred_item, list);
76 	list_del(&dfitem->list);
77 unlock:
78 	spin_unlock_bh(&deferred_lock);
79 	return dfitem;
80 }
81 
82 /**
83  *	switchdev_deferred_process - Process ops in deferred queue
84  *
85  *	Called to flush the ops currently queued in deferred ops queue.
86  *	rtnl_lock must be held.
87  */
88 void switchdev_deferred_process(void)
89 {
90 	struct switchdev_deferred_item *dfitem;
91 
92 	ASSERT_RTNL();
93 
94 	while ((dfitem = switchdev_deferred_dequeue())) {
95 		dfitem->func(dfitem->dev, dfitem->data);
96 		netdev_put(dfitem->dev, &dfitem->dev_tracker);
97 		kfree(dfitem);
98 	}
99 }
100 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
101 
102 static void switchdev_deferred_process_work(struct work_struct *work)
103 {
104 	rtnl_lock();
105 	switchdev_deferred_process();
106 	rtnl_unlock();
107 }
108 
109 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
110 
111 static int switchdev_deferred_enqueue(struct net_device *dev,
112 				      const void *data, size_t data_len,
113 				      switchdev_deferred_func_t *func)
114 {
115 	struct switchdev_deferred_item *dfitem;
116 
117 	dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
118 	if (!dfitem)
119 		return -ENOMEM;
120 	dfitem->dev = dev;
121 	dfitem->func = func;
122 	memcpy(dfitem->data, data, data_len);
123 	netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
124 	spin_lock_bh(&deferred_lock);
125 	list_add_tail(&dfitem->list, &deferred);
126 	spin_unlock_bh(&deferred_lock);
127 	schedule_work(&deferred_process_work);
128 	return 0;
129 }
130 
131 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
132 				      struct net_device *dev,
133 				      const struct switchdev_attr *attr,
134 				      struct netlink_ext_ack *extack)
135 {
136 	int err;
137 	int rc;
138 
139 	struct switchdev_notifier_port_attr_info attr_info = {
140 		.attr = attr,
141 		.handled = false,
142 	};
143 
144 	rc = call_switchdev_blocking_notifiers(nt, dev,
145 					       &attr_info.info, extack);
146 	err = notifier_to_errno(rc);
147 	if (err) {
148 		WARN_ON(!attr_info.handled);
149 		return err;
150 	}
151 
152 	if (!attr_info.handled)
153 		return -EOPNOTSUPP;
154 
155 	return 0;
156 }
157 
158 static int switchdev_port_attr_set_now(struct net_device *dev,
159 				       const struct switchdev_attr *attr,
160 				       struct netlink_ext_ack *extack)
161 {
162 	return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
163 					  extack);
164 }
165 
166 static void switchdev_port_attr_set_deferred(struct net_device *dev,
167 					     const void *data)
168 {
169 	const struct switchdev_attr *attr = data;
170 	int err;
171 
172 	err = switchdev_port_attr_set_now(dev, attr, NULL);
173 	if (err && err != -EOPNOTSUPP)
174 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
175 			   err, attr->id);
176 	if (attr->complete)
177 		attr->complete(dev, err, attr->complete_priv);
178 }
179 
180 static int switchdev_port_attr_set_defer(struct net_device *dev,
181 					 const struct switchdev_attr *attr)
182 {
183 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
184 					  switchdev_port_attr_set_deferred);
185 }
186 
187 /**
188  *	switchdev_port_attr_set - Set port attribute
189  *
190  *	@dev: port device
191  *	@attr: attribute to set
192  *	@extack: netlink extended ack, for error message propagation
193  *
194  *	rtnl_lock must be held and must not be in atomic section,
195  *	in case SWITCHDEV_F_DEFER flag is not set.
196  */
197 int switchdev_port_attr_set(struct net_device *dev,
198 			    const struct switchdev_attr *attr,
199 			    struct netlink_ext_ack *extack)
200 {
201 	if (attr->flags & SWITCHDEV_F_DEFER)
202 		return switchdev_port_attr_set_defer(dev, attr);
203 	ASSERT_RTNL();
204 	return switchdev_port_attr_set_now(dev, attr, extack);
205 }
206 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
207 
208 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
209 {
210 	switch (obj->id) {
211 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
212 		return sizeof(struct switchdev_obj_port_vlan);
213 	case SWITCHDEV_OBJ_ID_PORT_MDB:
214 		return sizeof(struct switchdev_obj_port_mdb);
215 	case SWITCHDEV_OBJ_ID_HOST_MDB:
216 		return sizeof(struct switchdev_obj_port_mdb);
217 	default:
218 		BUG();
219 	}
220 	return 0;
221 }
222 
223 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
224 				     struct net_device *dev,
225 				     const struct switchdev_obj *obj,
226 				     struct netlink_ext_ack *extack)
227 {
228 	int rc;
229 	int err;
230 
231 	struct switchdev_notifier_port_obj_info obj_info = {
232 		.obj = obj,
233 		.handled = false,
234 	};
235 
236 	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
237 	err = notifier_to_errno(rc);
238 	if (err) {
239 		WARN_ON(!obj_info.handled);
240 		return err;
241 	}
242 	if (!obj_info.handled)
243 		return -EOPNOTSUPP;
244 	return 0;
245 }
246 
247 static void switchdev_obj_id_to_helpful_msg(struct net_device *dev,
248 					    enum switchdev_obj_id obj_id,
249 					    int err, bool add)
250 {
251 	const char *action = add ? "add" : "del";
252 	const char *reason = "";
253 	const char *problem;
254 	const char *obj_str;
255 
256 	switch (obj_id) {
257 	case SWITCHDEV_OBJ_ID_UNDEFINED:
258 		obj_str = "Undefined object";
259 		problem = "Attempted operation is undefined, indicating a possible programming\n"
260 			  "error.\n";
261 		break;
262 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
263 		obj_str = "VLAN entry";
264 		problem = "Failure in VLAN settings on this port might disrupt network\n"
265 			  "segmentation or traffic isolation, affecting network partitioning.\n";
266 		break;
267 	case SWITCHDEV_OBJ_ID_PORT_MDB:
268 		obj_str = "Port Multicast Database entry";
269 		problem = "Failure in updating the port's Multicast Database could lead to\n"
270 			  "multicast forwarding issues.\n";
271 		break;
272 	case SWITCHDEV_OBJ_ID_HOST_MDB:
273 		obj_str = "Host Multicast Database entry";
274 		problem = "Failure in updating the host's Multicast Database may impact multicast\n"
275 			  "group memberships or traffic delivery, affecting multicast\n"
276 			  "communication.\n";
277 		break;
278 	case SWITCHDEV_OBJ_ID_MRP:
279 		obj_str = "Media Redundancy Protocol configuration for port";
280 		problem = "Failure to set MRP ring ID on this port prevents communication with\n"
281 			  "the specified redundancy ring, resulting in an inability to engage\n"
282 			  "in MRP-based network operations.\n";
283 		break;
284 	case SWITCHDEV_OBJ_ID_RING_TEST_MRP:
285 		obj_str = "MRP Test Frame Operations for port";
286 		problem = "Failure to generate/monitor MRP test frames may lead to inability to\n"
287 			  "assess the ring's operational integrity and fault response, hindering\n"
288 			  "proactive network management.\n";
289 		break;
290 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
291 		obj_str = "MRP Ring Role Configuration";
292 		problem = "Improper MRP ring role configuration may create conflicts in the ring,\n"
293 			  "disrupting communication for all participants, or isolate the local\n"
294 			  "system from the ring, hindering its ability to communicate with other\n"
295 			  "participants.\n";
296 		break;
297 	case SWITCHDEV_OBJ_ID_RING_STATE_MRP:
298 		obj_str = "MRP Ring State Configuration";
299 		problem = "Failure to correctly set the MRP ring state can result in network\n"
300 			  "loops or leave segments without communication. In a Closed state,\n"
301 			  "it maintains loop prevention by blocking one MRM port, while an Open\n"
302 			  "state activates in response to failures, changing port states to\n"
303 			  "preserve network connectivity.\n";
304 		break;
305 	case SWITCHDEV_OBJ_ID_IN_TEST_MRP:
306 		obj_str = "MRP_InTest Frame Generation Configuration";
307 		problem = "Failure in managing MRP_InTest frame generation can misjudge the\n"
308 			  "interconnection ring's state, leading to incorrect blocking or\n"
309 			  "unblocking of the I/C port. This misconfiguration might result\n"
310 			  "in unintended network loops or isolate critical network segments,\n"
311 			  "compromising network integrity and reliability.\n";
312 		break;
313 	case SWITCHDEV_OBJ_ID_IN_ROLE_MRP:
314 		obj_str = "Interconnection Ring Role Configuration";
315 		problem = "Failure in incorrect assignment of interconnection ring roles\n"
316 			  "(MIM/MIC) can impair the formation of the interconnection rings.\n";
317 		break;
318 	case SWITCHDEV_OBJ_ID_IN_STATE_MRP:
319 		obj_str = "Interconnection Ring State Configuration";
320 		problem = "Failure in updating the interconnection ring state can lead in\n"
321 			  "case of Open state to incorrect blocking or unblocking of the\n"
322 			  "I/C port, resulting in unintended network loops or isolation\n"
323 			  "of critical network\n";
324 		break;
325 	default:
326 		obj_str = "Unknown object";
327 		problem	= "Indicating a possible programming error.\n";
328 	}
329 
330 	switch (err) {
331 	case -ENOSPC:
332 		reason = "Current HW/SW setup lacks sufficient resources.\n";
333 		break;
334 	}
335 
336 	netdev_err(dev, "Failed to %s %s (object id=%d) with error: %pe (%d).\n%s%s\n",
337 		   action, obj_str, obj_id, ERR_PTR(err), err, problem, reason);
338 }
339 
340 static void switchdev_port_obj_add_deferred(struct net_device *dev,
341 					    const void *data)
342 {
343 	const struct switchdev_obj *obj = data;
344 	int err;
345 
346 	ASSERT_RTNL();
347 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
348 					dev, obj, NULL);
349 	if (err && err != -EOPNOTSUPP)
350 		switchdev_obj_id_to_helpful_msg(dev, obj->id, err, true);
351 	if (obj->complete)
352 		obj->complete(dev, err, obj->complete_priv);
353 }
354 
355 static int switchdev_port_obj_add_defer(struct net_device *dev,
356 					const struct switchdev_obj *obj)
357 {
358 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
359 					  switchdev_port_obj_add_deferred);
360 }
361 
362 /**
363  *	switchdev_port_obj_add - Add port object
364  *
365  *	@dev: port device
366  *	@obj: object to add
367  *	@extack: netlink extended ack
368  *
369  *	rtnl_lock must be held and must not be in atomic section,
370  *	in case SWITCHDEV_F_DEFER flag is not set.
371  */
372 int switchdev_port_obj_add(struct net_device *dev,
373 			   const struct switchdev_obj *obj,
374 			   struct netlink_ext_ack *extack)
375 {
376 	if (obj->flags & SWITCHDEV_F_DEFER)
377 		return switchdev_port_obj_add_defer(dev, obj);
378 	ASSERT_RTNL();
379 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
380 					 dev, obj, extack);
381 }
382 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
383 
384 static int switchdev_port_obj_del_now(struct net_device *dev,
385 				      const struct switchdev_obj *obj)
386 {
387 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
388 					 dev, obj, NULL);
389 }
390 
391 static void switchdev_port_obj_del_deferred(struct net_device *dev,
392 					    const void *data)
393 {
394 	const struct switchdev_obj *obj = data;
395 	int err;
396 
397 	err = switchdev_port_obj_del_now(dev, obj);
398 	if (err && err != -EOPNOTSUPP)
399 		switchdev_obj_id_to_helpful_msg(dev, obj->id, err, false);
400 	if (obj->complete)
401 		obj->complete(dev, err, obj->complete_priv);
402 }
403 
404 static int switchdev_port_obj_del_defer(struct net_device *dev,
405 					const struct switchdev_obj *obj)
406 {
407 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
408 					  switchdev_port_obj_del_deferred);
409 }
410 
411 /**
412  *	switchdev_port_obj_del - Delete port object
413  *
414  *	@dev: port device
415  *	@obj: object to delete
416  *
417  *	rtnl_lock must be held and must not be in atomic section,
418  *	in case SWITCHDEV_F_DEFER flag is not set.
419  */
420 int switchdev_port_obj_del(struct net_device *dev,
421 			   const struct switchdev_obj *obj)
422 {
423 	if (obj->flags & SWITCHDEV_F_DEFER)
424 		return switchdev_port_obj_del_defer(dev, obj);
425 	ASSERT_RTNL();
426 	return switchdev_port_obj_del_now(dev, obj);
427 }
428 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
429 
430 /**
431  *	switchdev_port_obj_act_is_deferred - Is object action pending?
432  *
433  *	@dev: port device
434  *	@nt: type of action; add or delete
435  *	@obj: object to test
436  *
437  *	Returns true if a deferred item is pending, which is
438  *	equivalent to the action @nt on an object @obj.
439  *
440  *	rtnl_lock must be held.
441  */
442 bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
443 					enum switchdev_notifier_type nt,
444 					const struct switchdev_obj *obj)
445 {
446 	struct switchdev_deferred_item *dfitem;
447 	bool found = false;
448 
449 	ASSERT_RTNL();
450 
451 	spin_lock_bh(&deferred_lock);
452 
453 	list_for_each_entry(dfitem, &deferred, list) {
454 		if (dfitem->dev != dev)
455 			continue;
456 
457 		if ((dfitem->func == switchdev_port_obj_add_deferred &&
458 		     nt == SWITCHDEV_PORT_OBJ_ADD) ||
459 		    (dfitem->func == switchdev_port_obj_del_deferred &&
460 		     nt == SWITCHDEV_PORT_OBJ_DEL)) {
461 			if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
462 				found = true;
463 				break;
464 			}
465 		}
466 	}
467 
468 	spin_unlock_bh(&deferred_lock);
469 
470 	return found;
471 }
472 EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
473 
474 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
475 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
476 
477 /**
478  *	register_switchdev_notifier - Register notifier
479  *	@nb: notifier_block
480  *
481  *	Register switch device notifier.
482  */
483 int register_switchdev_notifier(struct notifier_block *nb)
484 {
485 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
486 }
487 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
488 
489 /**
490  *	unregister_switchdev_notifier - Unregister notifier
491  *	@nb: notifier_block
492  *
493  *	Unregister switch device notifier.
494  */
495 int unregister_switchdev_notifier(struct notifier_block *nb)
496 {
497 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
498 }
499 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
500 
501 /**
502  *	call_switchdev_notifiers - Call notifiers
503  *	@val: value passed unmodified to notifier function
504  *	@dev: port device
505  *	@info: notifier information data
506  *	@extack: netlink extended ack
507  *	Call all network notifier blocks.
508  */
509 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
510 			     struct switchdev_notifier_info *info,
511 			     struct netlink_ext_ack *extack)
512 {
513 	info->dev = dev;
514 	info->extack = extack;
515 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
516 }
517 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
518 
519 int register_switchdev_blocking_notifier(struct notifier_block *nb)
520 {
521 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
522 
523 	return blocking_notifier_chain_register(chain, nb);
524 }
525 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
526 
527 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
528 {
529 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
530 
531 	return blocking_notifier_chain_unregister(chain, nb);
532 }
533 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
534 
535 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
536 				      struct switchdev_notifier_info *info,
537 				      struct netlink_ext_ack *extack)
538 {
539 	info->dev = dev;
540 	info->extack = extack;
541 	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
542 					    val, info);
543 }
544 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
545 
546 struct switchdev_nested_priv {
547 	bool (*check_cb)(const struct net_device *dev);
548 	bool (*foreign_dev_check_cb)(const struct net_device *dev,
549 				     const struct net_device *foreign_dev);
550 	const struct net_device *dev;
551 	struct net_device *lower_dev;
552 };
553 
554 static int switchdev_lower_dev_walk(struct net_device *lower_dev,
555 				    struct netdev_nested_priv *priv)
556 {
557 	struct switchdev_nested_priv *switchdev_priv = priv->data;
558 	bool (*foreign_dev_check_cb)(const struct net_device *dev,
559 				     const struct net_device *foreign_dev);
560 	bool (*check_cb)(const struct net_device *dev);
561 	const struct net_device *dev;
562 
563 	check_cb = switchdev_priv->check_cb;
564 	foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
565 	dev = switchdev_priv->dev;
566 
567 	if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
568 		switchdev_priv->lower_dev = lower_dev;
569 		return 1;
570 	}
571 
572 	return 0;
573 }
574 
575 static struct net_device *
576 switchdev_lower_dev_find_rcu(struct net_device *dev,
577 			     bool (*check_cb)(const struct net_device *dev),
578 			     bool (*foreign_dev_check_cb)(const struct net_device *dev,
579 							  const struct net_device *foreign_dev))
580 {
581 	struct switchdev_nested_priv switchdev_priv = {
582 		.check_cb = check_cb,
583 		.foreign_dev_check_cb = foreign_dev_check_cb,
584 		.dev = dev,
585 		.lower_dev = NULL,
586 	};
587 	struct netdev_nested_priv priv = {
588 		.data = &switchdev_priv,
589 	};
590 
591 	netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
592 
593 	return switchdev_priv.lower_dev;
594 }
595 
596 static struct net_device *
597 switchdev_lower_dev_find(struct net_device *dev,
598 			 bool (*check_cb)(const struct net_device *dev),
599 			 bool (*foreign_dev_check_cb)(const struct net_device *dev,
600 						      const struct net_device *foreign_dev))
601 {
602 	struct switchdev_nested_priv switchdev_priv = {
603 		.check_cb = check_cb,
604 		.foreign_dev_check_cb = foreign_dev_check_cb,
605 		.dev = dev,
606 		.lower_dev = NULL,
607 	};
608 	struct netdev_nested_priv priv = {
609 		.data = &switchdev_priv,
610 	};
611 
612 	netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
613 
614 	return switchdev_priv.lower_dev;
615 }
616 
617 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
618 		struct net_device *orig_dev, unsigned long event,
619 		const struct switchdev_notifier_fdb_info *fdb_info,
620 		bool (*check_cb)(const struct net_device *dev),
621 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
622 					     const struct net_device *foreign_dev),
623 		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
624 			      unsigned long event, const void *ctx,
625 			      const struct switchdev_notifier_fdb_info *fdb_info))
626 {
627 	const struct switchdev_notifier_info *info = &fdb_info->info;
628 	struct net_device *br, *lower_dev, *switchdev;
629 	struct list_head *iter;
630 	int err = -EOPNOTSUPP;
631 
632 	if (check_cb(dev))
633 		return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
634 
635 	/* Recurse through lower interfaces in case the FDB entry is pointing
636 	 * towards a bridge or a LAG device.
637 	 */
638 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
639 		/* Do not propagate FDB entries across bridges */
640 		if (netif_is_bridge_master(lower_dev))
641 			continue;
642 
643 		/* Bridge ports might be either us, or LAG interfaces
644 		 * that we offload.
645 		 */
646 		if (!check_cb(lower_dev) &&
647 		    !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
648 						  foreign_dev_check_cb))
649 			continue;
650 
651 		err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
652 							     event, fdb_info, check_cb,
653 							     foreign_dev_check_cb,
654 							     mod_cb);
655 		if (err && err != -EOPNOTSUPP)
656 			return err;
657 	}
658 
659 	/* Event is neither on a bridge nor a LAG. Check whether it is on an
660 	 * interface that is in a bridge with us.
661 	 */
662 	br = netdev_master_upper_dev_get_rcu(dev);
663 	if (!br || !netif_is_bridge_master(br))
664 		return 0;
665 
666 	switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
667 	if (!switchdev)
668 		return 0;
669 
670 	if (!foreign_dev_check_cb(switchdev, dev))
671 		return err;
672 
673 	return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
674 						      check_cb, foreign_dev_check_cb,
675 						      mod_cb);
676 }
677 
678 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
679 		const struct switchdev_notifier_fdb_info *fdb_info,
680 		bool (*check_cb)(const struct net_device *dev),
681 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
682 					     const struct net_device *foreign_dev),
683 		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
684 			      unsigned long event, const void *ctx,
685 			      const struct switchdev_notifier_fdb_info *fdb_info))
686 {
687 	int err;
688 
689 	err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
690 						     check_cb, foreign_dev_check_cb,
691 						     mod_cb);
692 	if (err == -EOPNOTSUPP)
693 		err = 0;
694 
695 	return err;
696 }
697 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
698 
699 static int __switchdev_handle_port_obj_add(struct net_device *dev,
700 			struct switchdev_notifier_port_obj_info *port_obj_info,
701 			bool (*check_cb)(const struct net_device *dev),
702 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
703 						     const struct net_device *foreign_dev),
704 			int (*add_cb)(struct net_device *dev, const void *ctx,
705 				      const struct switchdev_obj *obj,
706 				      struct netlink_ext_ack *extack))
707 {
708 	struct switchdev_notifier_info *info = &port_obj_info->info;
709 	struct net_device *br, *lower_dev, *switchdev;
710 	struct netlink_ext_ack *extack;
711 	struct list_head *iter;
712 	int err = -EOPNOTSUPP;
713 
714 	extack = switchdev_notifier_info_to_extack(info);
715 
716 	if (check_cb(dev)) {
717 		err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
718 		if (err != -EOPNOTSUPP)
719 			port_obj_info->handled = true;
720 		return err;
721 	}
722 
723 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
724 	 * unsupported devices, another driver might be able to handle them. But
725 	 * propagate to the callers any hard errors.
726 	 *
727 	 * If the driver does its own bookkeeping of stacked ports, it's not
728 	 * necessary to go through this helper.
729 	 */
730 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
731 		if (netif_is_bridge_master(lower_dev))
732 			continue;
733 
734 		/* When searching for switchdev interfaces that are neighbors
735 		 * of foreign ones, and @dev is a bridge, do not recurse on the
736 		 * foreign interface again, it was already visited.
737 		 */
738 		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
739 		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
740 			continue;
741 
742 		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
743 						      check_cb, foreign_dev_check_cb,
744 						      add_cb);
745 		if (err && err != -EOPNOTSUPP)
746 			return err;
747 	}
748 
749 	/* Event is neither on a bridge nor a LAG. Check whether it is on an
750 	 * interface that is in a bridge with us.
751 	 */
752 	if (!foreign_dev_check_cb)
753 		return err;
754 
755 	br = netdev_master_upper_dev_get(dev);
756 	if (!br || !netif_is_bridge_master(br))
757 		return err;
758 
759 	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
760 	if (!switchdev)
761 		return err;
762 
763 	if (!foreign_dev_check_cb(switchdev, dev))
764 		return err;
765 
766 	return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
767 					       foreign_dev_check_cb, add_cb);
768 }
769 
770 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
771  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
772  * bridge or a LAG.
773  */
774 int switchdev_handle_port_obj_add(struct net_device *dev,
775 			struct switchdev_notifier_port_obj_info *port_obj_info,
776 			bool (*check_cb)(const struct net_device *dev),
777 			int (*add_cb)(struct net_device *dev, const void *ctx,
778 				      const struct switchdev_obj *obj,
779 				      struct netlink_ext_ack *extack))
780 {
781 	int err;
782 
783 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
784 					      NULL, add_cb);
785 	if (err == -EOPNOTSUPP)
786 		err = 0;
787 	return err;
788 }
789 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
790 
791 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
792  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
793  * that pass @check_cb and are in the same bridge as @dev.
794  */
795 int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
796 			struct switchdev_notifier_port_obj_info *port_obj_info,
797 			bool (*check_cb)(const struct net_device *dev),
798 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
799 						     const struct net_device *foreign_dev),
800 			int (*add_cb)(struct net_device *dev, const void *ctx,
801 				      const struct switchdev_obj *obj,
802 				      struct netlink_ext_ack *extack))
803 {
804 	int err;
805 
806 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
807 					      foreign_dev_check_cb, add_cb);
808 	if (err == -EOPNOTSUPP)
809 		err = 0;
810 	return err;
811 }
812 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
813 
814 static int __switchdev_handle_port_obj_del(struct net_device *dev,
815 			struct switchdev_notifier_port_obj_info *port_obj_info,
816 			bool (*check_cb)(const struct net_device *dev),
817 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
818 						     const struct net_device *foreign_dev),
819 			int (*del_cb)(struct net_device *dev, const void *ctx,
820 				      const struct switchdev_obj *obj))
821 {
822 	struct switchdev_notifier_info *info = &port_obj_info->info;
823 	struct net_device *br, *lower_dev, *switchdev;
824 	struct list_head *iter;
825 	int err = -EOPNOTSUPP;
826 
827 	if (check_cb(dev)) {
828 		err = del_cb(dev, info->ctx, port_obj_info->obj);
829 		if (err != -EOPNOTSUPP)
830 			port_obj_info->handled = true;
831 		return err;
832 	}
833 
834 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
835 	 * unsupported devices, another driver might be able to handle them. But
836 	 * propagate to the callers any hard errors.
837 	 *
838 	 * If the driver does its own bookkeeping of stacked ports, it's not
839 	 * necessary to go through this helper.
840 	 */
841 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
842 		if (netif_is_bridge_master(lower_dev))
843 			continue;
844 
845 		/* When searching for switchdev interfaces that are neighbors
846 		 * of foreign ones, and @dev is a bridge, do not recurse on the
847 		 * foreign interface again, it was already visited.
848 		 */
849 		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
850 		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
851 			continue;
852 
853 		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
854 						      check_cb, foreign_dev_check_cb,
855 						      del_cb);
856 		if (err && err != -EOPNOTSUPP)
857 			return err;
858 	}
859 
860 	/* Event is neither on a bridge nor a LAG. Check whether it is on an
861 	 * interface that is in a bridge with us.
862 	 */
863 	if (!foreign_dev_check_cb)
864 		return err;
865 
866 	br = netdev_master_upper_dev_get(dev);
867 	if (!br || !netif_is_bridge_master(br))
868 		return err;
869 
870 	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
871 	if (!switchdev)
872 		return err;
873 
874 	if (!foreign_dev_check_cb(switchdev, dev))
875 		return err;
876 
877 	return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
878 					       foreign_dev_check_cb, del_cb);
879 }
880 
881 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
882  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
883  * bridge or a LAG.
884  */
885 int switchdev_handle_port_obj_del(struct net_device *dev,
886 			struct switchdev_notifier_port_obj_info *port_obj_info,
887 			bool (*check_cb)(const struct net_device *dev),
888 			int (*del_cb)(struct net_device *dev, const void *ctx,
889 				      const struct switchdev_obj *obj))
890 {
891 	int err;
892 
893 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
894 					      NULL, del_cb);
895 	if (err == -EOPNOTSUPP)
896 		err = 0;
897 	return err;
898 }
899 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
900 
901 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
902  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
903  * that pass @check_cb and are in the same bridge as @dev.
904  */
905 int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
906 			struct switchdev_notifier_port_obj_info *port_obj_info,
907 			bool (*check_cb)(const struct net_device *dev),
908 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
909 						     const struct net_device *foreign_dev),
910 			int (*del_cb)(struct net_device *dev, const void *ctx,
911 				      const struct switchdev_obj *obj))
912 {
913 	int err;
914 
915 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
916 					      foreign_dev_check_cb, del_cb);
917 	if (err == -EOPNOTSUPP)
918 		err = 0;
919 	return err;
920 }
921 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
922 
923 static int __switchdev_handle_port_attr_set(struct net_device *dev,
924 			struct switchdev_notifier_port_attr_info *port_attr_info,
925 			bool (*check_cb)(const struct net_device *dev),
926 			int (*set_cb)(struct net_device *dev, const void *ctx,
927 				      const struct switchdev_attr *attr,
928 				      struct netlink_ext_ack *extack))
929 {
930 	struct switchdev_notifier_info *info = &port_attr_info->info;
931 	struct netlink_ext_ack *extack;
932 	struct net_device *lower_dev;
933 	struct list_head *iter;
934 	int err = -EOPNOTSUPP;
935 
936 	extack = switchdev_notifier_info_to_extack(info);
937 
938 	if (check_cb(dev)) {
939 		err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
940 		if (err != -EOPNOTSUPP)
941 			port_attr_info->handled = true;
942 		return err;
943 	}
944 
945 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
946 	 * unsupported devices, another driver might be able to handle them. But
947 	 * propagate to the callers any hard errors.
948 	 *
949 	 * If the driver does its own bookkeeping of stacked ports, it's not
950 	 * necessary to go through this helper.
951 	 */
952 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
953 		if (netif_is_bridge_master(lower_dev))
954 			continue;
955 
956 		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
957 						       check_cb, set_cb);
958 		if (err && err != -EOPNOTSUPP)
959 			return err;
960 	}
961 
962 	return err;
963 }
964 
965 int switchdev_handle_port_attr_set(struct net_device *dev,
966 			struct switchdev_notifier_port_attr_info *port_attr_info,
967 			bool (*check_cb)(const struct net_device *dev),
968 			int (*set_cb)(struct net_device *dev, const void *ctx,
969 				      const struct switchdev_attr *attr,
970 				      struct netlink_ext_ack *extack))
971 {
972 	int err;
973 
974 	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
975 					       set_cb);
976 	if (err == -EOPNOTSUPP)
977 		err = 0;
978 	return err;
979 }
980 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
981 
982 int switchdev_bridge_port_offload(struct net_device *brport_dev,
983 				  struct net_device *dev, const void *ctx,
984 				  struct notifier_block *atomic_nb,
985 				  struct notifier_block *blocking_nb,
986 				  bool tx_fwd_offload,
987 				  struct netlink_ext_ack *extack)
988 {
989 	struct switchdev_notifier_brport_info brport_info = {
990 		.brport = {
991 			.dev = dev,
992 			.ctx = ctx,
993 			.atomic_nb = atomic_nb,
994 			.blocking_nb = blocking_nb,
995 			.tx_fwd_offload = tx_fwd_offload,
996 		},
997 	};
998 	int err;
999 
1000 	ASSERT_RTNL();
1001 
1002 	err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
1003 						brport_dev, &brport_info.info,
1004 						extack);
1005 	return notifier_to_errno(err);
1006 }
1007 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
1008 
1009 void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
1010 				     const void *ctx,
1011 				     struct notifier_block *atomic_nb,
1012 				     struct notifier_block *blocking_nb)
1013 {
1014 	struct switchdev_notifier_brport_info brport_info = {
1015 		.brport = {
1016 			.ctx = ctx,
1017 			.atomic_nb = atomic_nb,
1018 			.blocking_nb = blocking_nb,
1019 		},
1020 	};
1021 
1022 	ASSERT_RTNL();
1023 
1024 	call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
1025 					  brport_dev, &brport_info.info,
1026 					  NULL);
1027 }
1028 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);
1029 
1030 int switchdev_bridge_port_replay(struct net_device *brport_dev,
1031 				 struct net_device *dev, const void *ctx,
1032 				 struct notifier_block *atomic_nb,
1033 				 struct notifier_block *blocking_nb,
1034 				 struct netlink_ext_ack *extack)
1035 {
1036 	struct switchdev_notifier_brport_info brport_info = {
1037 		.brport = {
1038 			.dev = dev,
1039 			.ctx = ctx,
1040 			.atomic_nb = atomic_nb,
1041 			.blocking_nb = blocking_nb,
1042 		},
1043 	};
1044 	int err;
1045 
1046 	ASSERT_RTNL();
1047 
1048 	err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY,
1049 						brport_dev, &brport_info.info,
1050 						extack);
1051 	return notifier_to_errno(err);
1052 }
1053 EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay);
1054