xref: /linux/net/switchdev/switchdev.c (revision 4003c9e78778e93188a09d6043a74f7154449d43)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/switchdev/switchdev.c - Switch device API
4  * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
21 
switchdev_obj_eq(const struct switchdev_obj * a,const struct switchdev_obj * b)22 static bool switchdev_obj_eq(const struct switchdev_obj *a,
23 			     const struct switchdev_obj *b)
24 {
25 	const struct switchdev_obj_port_vlan *va, *vb;
26 	const struct switchdev_obj_port_mdb *ma, *mb;
27 
28 	if (a->id != b->id || a->orig_dev != b->orig_dev)
29 		return false;
30 
31 	switch (a->id) {
32 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
33 		va = SWITCHDEV_OBJ_PORT_VLAN(a);
34 		vb = SWITCHDEV_OBJ_PORT_VLAN(b);
35 		return va->flags == vb->flags &&
36 			va->vid == vb->vid &&
37 			va->changed == vb->changed;
38 	case SWITCHDEV_OBJ_ID_PORT_MDB:
39 	case SWITCHDEV_OBJ_ID_HOST_MDB:
40 		ma = SWITCHDEV_OBJ_PORT_MDB(a);
41 		mb = SWITCHDEV_OBJ_PORT_MDB(b);
42 		return ma->vid == mb->vid &&
43 			ether_addr_equal(ma->addr, mb->addr);
44 	default:
45 		break;
46 	}
47 
48 	BUG();
49 }
50 
51 static LIST_HEAD(deferred);
52 static DEFINE_SPINLOCK(deferred_lock);
53 
54 typedef void switchdev_deferred_func_t(struct net_device *dev,
55 				       const void *data);
56 
57 struct switchdev_deferred_item {
58 	struct list_head list;
59 	struct net_device *dev;
60 	netdevice_tracker dev_tracker;
61 	switchdev_deferred_func_t *func;
62 	unsigned long data[];
63 };
64 
switchdev_deferred_dequeue(void)65 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
66 {
67 	struct switchdev_deferred_item *dfitem;
68 
69 	spin_lock_bh(&deferred_lock);
70 	if (list_empty(&deferred)) {
71 		dfitem = NULL;
72 		goto unlock;
73 	}
74 	dfitem = list_first_entry(&deferred,
75 				  struct switchdev_deferred_item, list);
76 	list_del(&dfitem->list);
77 unlock:
78 	spin_unlock_bh(&deferred_lock);
79 	return dfitem;
80 }
81 
82 /**
83  *	switchdev_deferred_process - Process ops in deferred queue
84  *
85  *	Called to flush the ops currently queued in deferred ops queue.
86  *	rtnl_lock must be held.
87  */
switchdev_deferred_process(void)88 void switchdev_deferred_process(void)
89 {
90 	struct switchdev_deferred_item *dfitem;
91 
92 	ASSERT_RTNL();
93 
94 	while ((dfitem = switchdev_deferred_dequeue())) {
95 		dfitem->func(dfitem->dev, dfitem->data);
96 		netdev_put(dfitem->dev, &dfitem->dev_tracker);
97 		kfree(dfitem);
98 	}
99 }
100 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
101 
switchdev_deferred_process_work(struct work_struct * work)102 static void switchdev_deferred_process_work(struct work_struct *work)
103 {
104 	rtnl_lock();
105 	switchdev_deferred_process();
106 	rtnl_unlock();
107 }
108 
109 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
110 
switchdev_deferred_enqueue(struct net_device * dev,const void * data,size_t data_len,switchdev_deferred_func_t * func)111 static int switchdev_deferred_enqueue(struct net_device *dev,
112 				      const void *data, size_t data_len,
113 				      switchdev_deferred_func_t *func)
114 {
115 	struct switchdev_deferred_item *dfitem;
116 
117 	dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
118 	if (!dfitem)
119 		return -ENOMEM;
120 	dfitem->dev = dev;
121 	dfitem->func = func;
122 	memcpy(dfitem->data, data, data_len);
123 	netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
124 	spin_lock_bh(&deferred_lock);
125 	list_add_tail(&dfitem->list, &deferred);
126 	spin_unlock_bh(&deferred_lock);
127 	schedule_work(&deferred_process_work);
128 	return 0;
129 }
130 
switchdev_port_attr_notify(enum switchdev_notifier_type nt,struct net_device * dev,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)131 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
132 				      struct net_device *dev,
133 				      const struct switchdev_attr *attr,
134 				      struct netlink_ext_ack *extack)
135 {
136 	int err;
137 	int rc;
138 
139 	struct switchdev_notifier_port_attr_info attr_info = {
140 		.attr = attr,
141 		.handled = false,
142 	};
143 
144 	rc = call_switchdev_blocking_notifiers(nt, dev,
145 					       &attr_info.info, extack);
146 	err = notifier_to_errno(rc);
147 	if (err) {
148 		WARN_ON(!attr_info.handled);
149 		return err;
150 	}
151 
152 	if (!attr_info.handled)
153 		return -EOPNOTSUPP;
154 
155 	return 0;
156 }
157 
switchdev_port_attr_set_now(struct net_device * dev,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)158 static int switchdev_port_attr_set_now(struct net_device *dev,
159 				       const struct switchdev_attr *attr,
160 				       struct netlink_ext_ack *extack)
161 {
162 	return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
163 					  extack);
164 }
165 
switchdev_port_attr_set_deferred(struct net_device * dev,const void * data)166 static void switchdev_port_attr_set_deferred(struct net_device *dev,
167 					     const void *data)
168 {
169 	const struct switchdev_attr *attr = data;
170 	int err;
171 
172 	err = switchdev_port_attr_set_now(dev, attr, NULL);
173 	if (err && err != -EOPNOTSUPP)
174 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
175 			   err, attr->id);
176 	if (attr->complete)
177 		attr->complete(dev, err, attr->complete_priv);
178 }
179 
switchdev_port_attr_set_defer(struct net_device * dev,const struct switchdev_attr * attr)180 static int switchdev_port_attr_set_defer(struct net_device *dev,
181 					 const struct switchdev_attr *attr)
182 {
183 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
184 					  switchdev_port_attr_set_deferred);
185 }
186 
187 /**
188  *	switchdev_port_attr_set - Set port attribute
189  *
190  *	@dev: port device
191  *	@attr: attribute to set
192  *	@extack: netlink extended ack, for error message propagation
193  *
194  *	rtnl_lock must be held and must not be in atomic section,
195  *	in case SWITCHDEV_F_DEFER flag is not set.
196  */
switchdev_port_attr_set(struct net_device * dev,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)197 int switchdev_port_attr_set(struct net_device *dev,
198 			    const struct switchdev_attr *attr,
199 			    struct netlink_ext_ack *extack)
200 {
201 	if (attr->flags & SWITCHDEV_F_DEFER)
202 		return switchdev_port_attr_set_defer(dev, attr);
203 	ASSERT_RTNL();
204 	return switchdev_port_attr_set_now(dev, attr, extack);
205 }
206 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
207 
switchdev_obj_size(const struct switchdev_obj * obj)208 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
209 {
210 	switch (obj->id) {
211 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
212 		return sizeof(struct switchdev_obj_port_vlan);
213 	case SWITCHDEV_OBJ_ID_PORT_MDB:
214 		return sizeof(struct switchdev_obj_port_mdb);
215 	case SWITCHDEV_OBJ_ID_HOST_MDB:
216 		return sizeof(struct switchdev_obj_port_mdb);
217 	default:
218 		BUG();
219 	}
220 	return 0;
221 }
222 
switchdev_port_obj_notify(enum switchdev_notifier_type nt,struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)223 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
224 				     struct net_device *dev,
225 				     const struct switchdev_obj *obj,
226 				     struct netlink_ext_ack *extack)
227 {
228 	int rc;
229 	int err;
230 
231 	struct switchdev_notifier_port_obj_info obj_info = {
232 		.obj = obj,
233 		.handled = false,
234 	};
235 
236 	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
237 	err = notifier_to_errno(rc);
238 	if (err) {
239 		WARN_ON(!obj_info.handled);
240 		return err;
241 	}
242 	if (!obj_info.handled)
243 		return -EOPNOTSUPP;
244 	return 0;
245 }
246 
switchdev_obj_id_to_helpful_msg(struct net_device * dev,enum switchdev_obj_id obj_id,int err,bool add)247 static void switchdev_obj_id_to_helpful_msg(struct net_device *dev,
248 					    enum switchdev_obj_id obj_id,
249 					    int err, bool add)
250 {
251 	const char *action = add ? "add" : "del";
252 	const char *reason = "";
253 	const char *problem;
254 	const char *obj_str;
255 
256 	switch (obj_id) {
257 	case SWITCHDEV_OBJ_ID_UNDEFINED:
258 		obj_str = "Undefined object";
259 		problem = "Attempted operation is undefined, indicating a possible programming\n"
260 			  "error.\n";
261 		break;
262 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
263 		obj_str = "VLAN entry";
264 		problem = "Failure in VLAN settings on this port might disrupt network\n"
265 			  "segmentation or traffic isolation, affecting network partitioning.\n";
266 		break;
267 	case SWITCHDEV_OBJ_ID_PORT_MDB:
268 		obj_str = "Port Multicast Database entry";
269 		problem = "Failure in updating the port's Multicast Database could lead to\n"
270 			  "multicast forwarding issues.\n";
271 		break;
272 	case SWITCHDEV_OBJ_ID_HOST_MDB:
273 		obj_str = "Host Multicast Database entry";
274 		problem = "Failure in updating the host's Multicast Database may impact multicast\n"
275 			  "group memberships or traffic delivery, affecting multicast\n"
276 			  "communication.\n";
277 		break;
278 	case SWITCHDEV_OBJ_ID_MRP:
279 		obj_str = "Media Redundancy Protocol configuration for port";
280 		problem = "Failure to set MRP ring ID on this port prevents communication with\n"
281 			  "the specified redundancy ring, resulting in an inability to engage\n"
282 			  "in MRP-based network operations.\n";
283 		break;
284 	case SWITCHDEV_OBJ_ID_RING_TEST_MRP:
285 		obj_str = "MRP Test Frame Operations for port";
286 		problem = "Failure to generate/monitor MRP test frames may lead to inability to\n"
287 			  "assess the ring's operational integrity and fault response, hindering\n"
288 			  "proactive network management.\n";
289 		break;
290 	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
291 		obj_str = "MRP Ring Role Configuration";
292 		problem = "Improper MRP ring role configuration may create conflicts in the ring,\n"
293 			  "disrupting communication for all participants, or isolate the local\n"
294 			  "system from the ring, hindering its ability to communicate with other\n"
295 			  "participants.\n";
296 		break;
297 	case SWITCHDEV_OBJ_ID_RING_STATE_MRP:
298 		obj_str = "MRP Ring State Configuration";
299 		problem = "Failure to correctly set the MRP ring state can result in network\n"
300 			  "loops or leave segments without communication. In a Closed state,\n"
301 			  "it maintains loop prevention by blocking one MRM port, while an Open\n"
302 			  "state activates in response to failures, changing port states to\n"
303 			  "preserve network connectivity.\n";
304 		break;
305 	case SWITCHDEV_OBJ_ID_IN_TEST_MRP:
306 		obj_str = "MRP_InTest Frame Generation Configuration";
307 		problem = "Failure in managing MRP_InTest frame generation can misjudge the\n"
308 			  "interconnection ring's state, leading to incorrect blocking or\n"
309 			  "unblocking of the I/C port. This misconfiguration might result\n"
310 			  "in unintended network loops or isolate critical network segments,\n"
311 			  "compromising network integrity and reliability.\n";
312 		break;
313 	case SWITCHDEV_OBJ_ID_IN_ROLE_MRP:
314 		obj_str = "Interconnection Ring Role Configuration";
315 		problem = "Failure in incorrect assignment of interconnection ring roles\n"
316 			  "(MIM/MIC) can impair the formation of the interconnection rings.\n";
317 		break;
318 	case SWITCHDEV_OBJ_ID_IN_STATE_MRP:
319 		obj_str = "Interconnection Ring State Configuration";
320 		problem = "Failure in updating the interconnection ring state can lead in\n"
321 			  "case of Open state to incorrect blocking or unblocking of the\n"
322 			  "I/C port, resulting in unintended network loops or isolation\n"
323 			  "of critical network\n";
324 		break;
325 	default:
326 		obj_str = "Unknown object";
327 		problem	= "Indicating a possible programming error.\n";
328 	}
329 
330 	switch (err) {
331 	case -ENOSPC:
332 		reason = "Current HW/SW setup lacks sufficient resources.\n";
333 		break;
334 	}
335 
336 	netdev_err(dev, "Failed to %s %s (object id=%d) with error: %pe (%d).\n%s%s\n",
337 		   action, obj_str, obj_id, ERR_PTR(err), err, problem, reason);
338 }
339 
switchdev_port_obj_add_deferred(struct net_device * dev,const void * data)340 static void switchdev_port_obj_add_deferred(struct net_device *dev,
341 					    const void *data)
342 {
343 	const struct switchdev_obj *obj = data;
344 	int err;
345 
346 	ASSERT_RTNL();
347 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
348 					dev, obj, NULL);
349 	if (err && err != -EOPNOTSUPP)
350 		switchdev_obj_id_to_helpful_msg(dev, obj->id, err, true);
351 	if (obj->complete)
352 		obj->complete(dev, err, obj->complete_priv);
353 }
354 
switchdev_port_obj_add_defer(struct net_device * dev,const struct switchdev_obj * obj)355 static int switchdev_port_obj_add_defer(struct net_device *dev,
356 					const struct switchdev_obj *obj)
357 {
358 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
359 					  switchdev_port_obj_add_deferred);
360 }
361 
362 /**
363  *	switchdev_port_obj_add - Add port object
364  *
365  *	@dev: port device
366  *	@obj: object to add
367  *	@extack: netlink extended ack
368  *
369  *	rtnl_lock must be held and must not be in atomic section,
370  *	in case SWITCHDEV_F_DEFER flag is not set.
371  */
switchdev_port_obj_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)372 int switchdev_port_obj_add(struct net_device *dev,
373 			   const struct switchdev_obj *obj,
374 			   struct netlink_ext_ack *extack)
375 {
376 	if (obj->flags & SWITCHDEV_F_DEFER)
377 		return switchdev_port_obj_add_defer(dev, obj);
378 	ASSERT_RTNL();
379 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
380 					 dev, obj, extack);
381 }
382 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
383 
switchdev_port_obj_del_now(struct net_device * dev,const struct switchdev_obj * obj)384 static int switchdev_port_obj_del_now(struct net_device *dev,
385 				      const struct switchdev_obj *obj)
386 {
387 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
388 					 dev, obj, NULL);
389 }
390 
switchdev_port_obj_del_deferred(struct net_device * dev,const void * data)391 static void switchdev_port_obj_del_deferred(struct net_device *dev,
392 					    const void *data)
393 {
394 	const struct switchdev_obj *obj = data;
395 	int err;
396 
397 	err = switchdev_port_obj_del_now(dev, obj);
398 	if (err && err != -EOPNOTSUPP)
399 		switchdev_obj_id_to_helpful_msg(dev, obj->id, err, false);
400 	if (obj->complete)
401 		obj->complete(dev, err, obj->complete_priv);
402 }
403 
switchdev_port_obj_del_defer(struct net_device * dev,const struct switchdev_obj * obj)404 static int switchdev_port_obj_del_defer(struct net_device *dev,
405 					const struct switchdev_obj *obj)
406 {
407 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
408 					  switchdev_port_obj_del_deferred);
409 }
410 
411 /**
412  *	switchdev_port_obj_del - Delete port object
413  *
414  *	@dev: port device
415  *	@obj: object to delete
416  *
417  *	rtnl_lock must be held and must not be in atomic section,
418  *	in case SWITCHDEV_F_DEFER flag is not set.
419  */
switchdev_port_obj_del(struct net_device * dev,const struct switchdev_obj * obj)420 int switchdev_port_obj_del(struct net_device *dev,
421 			   const struct switchdev_obj *obj)
422 {
423 	if (obj->flags & SWITCHDEV_F_DEFER)
424 		return switchdev_port_obj_del_defer(dev, obj);
425 	ASSERT_RTNL();
426 	return switchdev_port_obj_del_now(dev, obj);
427 }
428 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
429 
430 /**
431  *	switchdev_port_obj_act_is_deferred - Is object action pending?
432  *
433  *	@dev: port device
434  *	@nt: type of action; add or delete
435  *	@obj: object to test
436  *
437  *	Returns true if a deferred item is pending, which is
438  *	equivalent to the action @nt on an object @obj.
439  *
440  *	rtnl_lock must be held.
441  */
switchdev_port_obj_act_is_deferred(struct net_device * dev,enum switchdev_notifier_type nt,const struct switchdev_obj * obj)442 bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
443 					enum switchdev_notifier_type nt,
444 					const struct switchdev_obj *obj)
445 {
446 	struct switchdev_deferred_item *dfitem;
447 	bool found = false;
448 
449 	ASSERT_RTNL();
450 
451 	spin_lock_bh(&deferred_lock);
452 
453 	list_for_each_entry(dfitem, &deferred, list) {
454 		if (dfitem->dev != dev)
455 			continue;
456 
457 		if ((dfitem->func == switchdev_port_obj_add_deferred &&
458 		     nt == SWITCHDEV_PORT_OBJ_ADD) ||
459 		    (dfitem->func == switchdev_port_obj_del_deferred &&
460 		     nt == SWITCHDEV_PORT_OBJ_DEL)) {
461 			if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
462 				found = true;
463 				break;
464 			}
465 		}
466 	}
467 
468 	spin_unlock_bh(&deferred_lock);
469 
470 	return found;
471 }
472 EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
473 
474 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
475 static RAW_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
476 
477 /**
478  *	register_switchdev_notifier - Register notifier
479  *	@nb: notifier_block
480  *
481  *	Register switch device notifier.
482  */
register_switchdev_notifier(struct notifier_block * nb)483 int register_switchdev_notifier(struct notifier_block *nb)
484 {
485 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
486 }
487 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
488 
489 /**
490  *	unregister_switchdev_notifier - Unregister notifier
491  *	@nb: notifier_block
492  *
493  *	Unregister switch device notifier.
494  */
unregister_switchdev_notifier(struct notifier_block * nb)495 int unregister_switchdev_notifier(struct notifier_block *nb)
496 {
497 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
498 }
499 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
500 
501 /**
502  *	call_switchdev_notifiers - Call notifiers
503  *	@val: value passed unmodified to notifier function
504  *	@dev: port device
505  *	@info: notifier information data
506  *	@extack: netlink extended ack
507  *	Call all network notifier blocks.
508  */
call_switchdev_notifiers(unsigned long val,struct net_device * dev,struct switchdev_notifier_info * info,struct netlink_ext_ack * extack)509 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
510 			     struct switchdev_notifier_info *info,
511 			     struct netlink_ext_ack *extack)
512 {
513 	info->dev = dev;
514 	info->extack = extack;
515 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
516 }
517 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
518 
register_switchdev_blocking_notifier(struct notifier_block * nb)519 int register_switchdev_blocking_notifier(struct notifier_block *nb)
520 {
521 	struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
522 	int err;
523 
524 	rtnl_lock();
525 	err = raw_notifier_chain_register(chain, nb);
526 	rtnl_unlock();
527 
528 	return err;
529 }
530 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
531 
unregister_switchdev_blocking_notifier(struct notifier_block * nb)532 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
533 {
534 	struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
535 	int err;
536 
537 	rtnl_lock();
538 	err = raw_notifier_chain_unregister(chain, nb);
539 	rtnl_unlock();
540 
541 	return err;
542 }
543 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
544 
call_switchdev_blocking_notifiers(unsigned long val,struct net_device * dev,struct switchdev_notifier_info * info,struct netlink_ext_ack * extack)545 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
546 				      struct switchdev_notifier_info *info,
547 				      struct netlink_ext_ack *extack)
548 {
549 	ASSERT_RTNL();
550 	info->dev = dev;
551 	info->extack = extack;
552 	return raw_notifier_call_chain(&switchdev_blocking_notif_chain,
553 				       val, info);
554 }
555 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
556 
557 struct switchdev_nested_priv {
558 	bool (*check_cb)(const struct net_device *dev);
559 	bool (*foreign_dev_check_cb)(const struct net_device *dev,
560 				     const struct net_device *foreign_dev);
561 	const struct net_device *dev;
562 	struct net_device *lower_dev;
563 };
564 
switchdev_lower_dev_walk(struct net_device * lower_dev,struct netdev_nested_priv * priv)565 static int switchdev_lower_dev_walk(struct net_device *lower_dev,
566 				    struct netdev_nested_priv *priv)
567 {
568 	struct switchdev_nested_priv *switchdev_priv = priv->data;
569 	bool (*foreign_dev_check_cb)(const struct net_device *dev,
570 				     const struct net_device *foreign_dev);
571 	bool (*check_cb)(const struct net_device *dev);
572 	const struct net_device *dev;
573 
574 	check_cb = switchdev_priv->check_cb;
575 	foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
576 	dev = switchdev_priv->dev;
577 
578 	if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
579 		switchdev_priv->lower_dev = lower_dev;
580 		return 1;
581 	}
582 
583 	return 0;
584 }
585 
586 static struct net_device *
switchdev_lower_dev_find_rcu(struct net_device * dev,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev))587 switchdev_lower_dev_find_rcu(struct net_device *dev,
588 			     bool (*check_cb)(const struct net_device *dev),
589 			     bool (*foreign_dev_check_cb)(const struct net_device *dev,
590 							  const struct net_device *foreign_dev))
591 {
592 	struct switchdev_nested_priv switchdev_priv = {
593 		.check_cb = check_cb,
594 		.foreign_dev_check_cb = foreign_dev_check_cb,
595 		.dev = dev,
596 		.lower_dev = NULL,
597 	};
598 	struct netdev_nested_priv priv = {
599 		.data = &switchdev_priv,
600 	};
601 
602 	netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
603 
604 	return switchdev_priv.lower_dev;
605 }
606 
607 static struct net_device *
switchdev_lower_dev_find(struct net_device * dev,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev))608 switchdev_lower_dev_find(struct net_device *dev,
609 			 bool (*check_cb)(const struct net_device *dev),
610 			 bool (*foreign_dev_check_cb)(const struct net_device *dev,
611 						      const struct net_device *foreign_dev))
612 {
613 	struct switchdev_nested_priv switchdev_priv = {
614 		.check_cb = check_cb,
615 		.foreign_dev_check_cb = foreign_dev_check_cb,
616 		.dev = dev,
617 		.lower_dev = NULL,
618 	};
619 	struct netdev_nested_priv priv = {
620 		.data = &switchdev_priv,
621 	};
622 
623 	netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
624 
625 	return switchdev_priv.lower_dev;
626 }
627 
__switchdev_handle_fdb_event_to_device(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const struct switchdev_notifier_fdb_info * fdb_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* mod_cb)(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const void * ctx,const struct switchdev_notifier_fdb_info * fdb_info))628 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
629 		struct net_device *orig_dev, unsigned long event,
630 		const struct switchdev_notifier_fdb_info *fdb_info,
631 		bool (*check_cb)(const struct net_device *dev),
632 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
633 					     const struct net_device *foreign_dev),
634 		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
635 			      unsigned long event, const void *ctx,
636 			      const struct switchdev_notifier_fdb_info *fdb_info))
637 {
638 	const struct switchdev_notifier_info *info = &fdb_info->info;
639 	struct net_device *br, *lower_dev, *switchdev;
640 	struct list_head *iter;
641 	int err = -EOPNOTSUPP;
642 
643 	if (check_cb(dev))
644 		return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
645 
646 	/* Recurse through lower interfaces in case the FDB entry is pointing
647 	 * towards a bridge or a LAG device.
648 	 */
649 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
650 		/* Do not propagate FDB entries across bridges */
651 		if (netif_is_bridge_master(lower_dev))
652 			continue;
653 
654 		/* Bridge ports might be either us, or LAG interfaces
655 		 * that we offload.
656 		 */
657 		if (!check_cb(lower_dev) &&
658 		    !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
659 						  foreign_dev_check_cb))
660 			continue;
661 
662 		err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
663 							     event, fdb_info, check_cb,
664 							     foreign_dev_check_cb,
665 							     mod_cb);
666 		if (err && err != -EOPNOTSUPP)
667 			return err;
668 	}
669 
670 	/* Event is neither on a bridge nor a LAG. Check whether it is on an
671 	 * interface that is in a bridge with us.
672 	 */
673 	br = netdev_master_upper_dev_get_rcu(dev);
674 	if (!br || !netif_is_bridge_master(br))
675 		return 0;
676 
677 	switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
678 	if (!switchdev)
679 		return 0;
680 
681 	if (!foreign_dev_check_cb(switchdev, dev))
682 		return err;
683 
684 	return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
685 						      check_cb, foreign_dev_check_cb,
686 						      mod_cb);
687 }
688 
switchdev_handle_fdb_event_to_device(struct net_device * dev,unsigned long event,const struct switchdev_notifier_fdb_info * fdb_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* mod_cb)(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const void * ctx,const struct switchdev_notifier_fdb_info * fdb_info))689 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
690 		const struct switchdev_notifier_fdb_info *fdb_info,
691 		bool (*check_cb)(const struct net_device *dev),
692 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
693 					     const struct net_device *foreign_dev),
694 		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
695 			      unsigned long event, const void *ctx,
696 			      const struct switchdev_notifier_fdb_info *fdb_info))
697 {
698 	int err;
699 
700 	err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
701 						     check_cb, foreign_dev_check_cb,
702 						     mod_cb);
703 	if (err == -EOPNOTSUPP)
704 		err = 0;
705 
706 	return err;
707 }
708 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
709 
__switchdev_handle_port_obj_add(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* add_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack))710 static int __switchdev_handle_port_obj_add(struct net_device *dev,
711 			struct switchdev_notifier_port_obj_info *port_obj_info,
712 			bool (*check_cb)(const struct net_device *dev),
713 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
714 						     const struct net_device *foreign_dev),
715 			int (*add_cb)(struct net_device *dev, const void *ctx,
716 				      const struct switchdev_obj *obj,
717 				      struct netlink_ext_ack *extack))
718 {
719 	struct switchdev_notifier_info *info = &port_obj_info->info;
720 	struct net_device *br, *lower_dev, *switchdev;
721 	struct netlink_ext_ack *extack;
722 	struct list_head *iter;
723 	int err = -EOPNOTSUPP;
724 
725 	extack = switchdev_notifier_info_to_extack(info);
726 
727 	if (check_cb(dev)) {
728 		err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
729 		if (err != -EOPNOTSUPP)
730 			port_obj_info->handled = true;
731 		return err;
732 	}
733 
734 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
735 	 * unsupported devices, another driver might be able to handle them. But
736 	 * propagate to the callers any hard errors.
737 	 *
738 	 * If the driver does its own bookkeeping of stacked ports, it's not
739 	 * necessary to go through this helper.
740 	 */
741 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
742 		if (netif_is_bridge_master(lower_dev))
743 			continue;
744 
745 		/* When searching for switchdev interfaces that are neighbors
746 		 * of foreign ones, and @dev is a bridge, do not recurse on the
747 		 * foreign interface again, it was already visited.
748 		 */
749 		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
750 		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
751 			continue;
752 
753 		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
754 						      check_cb, foreign_dev_check_cb,
755 						      add_cb);
756 		if (err && err != -EOPNOTSUPP)
757 			return err;
758 	}
759 
760 	/* Event is neither on a bridge nor a LAG. Check whether it is on an
761 	 * interface that is in a bridge with us.
762 	 */
763 	if (!foreign_dev_check_cb)
764 		return err;
765 
766 	br = netdev_master_upper_dev_get(dev);
767 	if (!br || !netif_is_bridge_master(br))
768 		return err;
769 
770 	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
771 	if (!switchdev)
772 		return err;
773 
774 	if (!foreign_dev_check_cb(switchdev, dev))
775 		return err;
776 
777 	return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
778 					       foreign_dev_check_cb, add_cb);
779 }
780 
781 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
782  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
783  * bridge or a LAG.
784  */
switchdev_handle_port_obj_add(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),int (* add_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack))785 int switchdev_handle_port_obj_add(struct net_device *dev,
786 			struct switchdev_notifier_port_obj_info *port_obj_info,
787 			bool (*check_cb)(const struct net_device *dev),
788 			int (*add_cb)(struct net_device *dev, const void *ctx,
789 				      const struct switchdev_obj *obj,
790 				      struct netlink_ext_ack *extack))
791 {
792 	int err;
793 
794 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
795 					      NULL, add_cb);
796 	if (err == -EOPNOTSUPP)
797 		err = 0;
798 	return err;
799 }
800 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
801 
802 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
803  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
804  * that pass @check_cb and are in the same bridge as @dev.
805  */
switchdev_handle_port_obj_add_foreign(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* add_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack))806 int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
807 			struct switchdev_notifier_port_obj_info *port_obj_info,
808 			bool (*check_cb)(const struct net_device *dev),
809 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
810 						     const struct net_device *foreign_dev),
811 			int (*add_cb)(struct net_device *dev, const void *ctx,
812 				      const struct switchdev_obj *obj,
813 				      struct netlink_ext_ack *extack))
814 {
815 	int err;
816 
817 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
818 					      foreign_dev_check_cb, add_cb);
819 	if (err == -EOPNOTSUPP)
820 		err = 0;
821 	return err;
822 }
823 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
824 
__switchdev_handle_port_obj_del(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* del_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj))825 static int __switchdev_handle_port_obj_del(struct net_device *dev,
826 			struct switchdev_notifier_port_obj_info *port_obj_info,
827 			bool (*check_cb)(const struct net_device *dev),
828 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
829 						     const struct net_device *foreign_dev),
830 			int (*del_cb)(struct net_device *dev, const void *ctx,
831 				      const struct switchdev_obj *obj))
832 {
833 	struct switchdev_notifier_info *info = &port_obj_info->info;
834 	struct net_device *br, *lower_dev, *switchdev;
835 	struct list_head *iter;
836 	int err = -EOPNOTSUPP;
837 
838 	if (check_cb(dev)) {
839 		err = del_cb(dev, info->ctx, port_obj_info->obj);
840 		if (err != -EOPNOTSUPP)
841 			port_obj_info->handled = true;
842 		return err;
843 	}
844 
845 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
846 	 * unsupported devices, another driver might be able to handle them. But
847 	 * propagate to the callers any hard errors.
848 	 *
849 	 * If the driver does its own bookkeeping of stacked ports, it's not
850 	 * necessary to go through this helper.
851 	 */
852 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
853 		if (netif_is_bridge_master(lower_dev))
854 			continue;
855 
856 		/* When searching for switchdev interfaces that are neighbors
857 		 * of foreign ones, and @dev is a bridge, do not recurse on the
858 		 * foreign interface again, it was already visited.
859 		 */
860 		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
861 		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
862 			continue;
863 
864 		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
865 						      check_cb, foreign_dev_check_cb,
866 						      del_cb);
867 		if (err && err != -EOPNOTSUPP)
868 			return err;
869 	}
870 
871 	/* Event is neither on a bridge nor a LAG. Check whether it is on an
872 	 * interface that is in a bridge with us.
873 	 */
874 	if (!foreign_dev_check_cb)
875 		return err;
876 
877 	br = netdev_master_upper_dev_get(dev);
878 	if (!br || !netif_is_bridge_master(br))
879 		return err;
880 
881 	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
882 	if (!switchdev)
883 		return err;
884 
885 	if (!foreign_dev_check_cb(switchdev, dev))
886 		return err;
887 
888 	return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
889 					       foreign_dev_check_cb, del_cb);
890 }
891 
892 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
893  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
894  * bridge or a LAG.
895  */
switchdev_handle_port_obj_del(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),int (* del_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj))896 int switchdev_handle_port_obj_del(struct net_device *dev,
897 			struct switchdev_notifier_port_obj_info *port_obj_info,
898 			bool (*check_cb)(const struct net_device *dev),
899 			int (*del_cb)(struct net_device *dev, const void *ctx,
900 				      const struct switchdev_obj *obj))
901 {
902 	int err;
903 
904 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
905 					      NULL, del_cb);
906 	if (err == -EOPNOTSUPP)
907 		err = 0;
908 	return err;
909 }
910 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
911 
912 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
913  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
914  * that pass @check_cb and are in the same bridge as @dev.
915  */
switchdev_handle_port_obj_del_foreign(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,bool (* check_cb)(const struct net_device * dev),bool (* foreign_dev_check_cb)(const struct net_device * dev,const struct net_device * foreign_dev),int (* del_cb)(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj))916 int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
917 			struct switchdev_notifier_port_obj_info *port_obj_info,
918 			bool (*check_cb)(const struct net_device *dev),
919 			bool (*foreign_dev_check_cb)(const struct net_device *dev,
920 						     const struct net_device *foreign_dev),
921 			int (*del_cb)(struct net_device *dev, const void *ctx,
922 				      const struct switchdev_obj *obj))
923 {
924 	int err;
925 
926 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
927 					      foreign_dev_check_cb, del_cb);
928 	if (err == -EOPNOTSUPP)
929 		err = 0;
930 	return err;
931 }
932 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
933 
__switchdev_handle_port_attr_set(struct net_device * dev,struct switchdev_notifier_port_attr_info * port_attr_info,bool (* check_cb)(const struct net_device * dev),int (* set_cb)(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack))934 static int __switchdev_handle_port_attr_set(struct net_device *dev,
935 			struct switchdev_notifier_port_attr_info *port_attr_info,
936 			bool (*check_cb)(const struct net_device *dev),
937 			int (*set_cb)(struct net_device *dev, const void *ctx,
938 				      const struct switchdev_attr *attr,
939 				      struct netlink_ext_ack *extack))
940 {
941 	struct switchdev_notifier_info *info = &port_attr_info->info;
942 	struct netlink_ext_ack *extack;
943 	struct net_device *lower_dev;
944 	struct list_head *iter;
945 	int err = -EOPNOTSUPP;
946 
947 	extack = switchdev_notifier_info_to_extack(info);
948 
949 	if (check_cb(dev)) {
950 		err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
951 		if (err != -EOPNOTSUPP)
952 			port_attr_info->handled = true;
953 		return err;
954 	}
955 
956 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
957 	 * unsupported devices, another driver might be able to handle them. But
958 	 * propagate to the callers any hard errors.
959 	 *
960 	 * If the driver does its own bookkeeping of stacked ports, it's not
961 	 * necessary to go through this helper.
962 	 */
963 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
964 		if (netif_is_bridge_master(lower_dev))
965 			continue;
966 
967 		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
968 						       check_cb, set_cb);
969 		if (err && err != -EOPNOTSUPP)
970 			return err;
971 	}
972 
973 	return err;
974 }
975 
switchdev_handle_port_attr_set(struct net_device * dev,struct switchdev_notifier_port_attr_info * port_attr_info,bool (* check_cb)(const struct net_device * dev),int (* set_cb)(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack))976 int switchdev_handle_port_attr_set(struct net_device *dev,
977 			struct switchdev_notifier_port_attr_info *port_attr_info,
978 			bool (*check_cb)(const struct net_device *dev),
979 			int (*set_cb)(struct net_device *dev, const void *ctx,
980 				      const struct switchdev_attr *attr,
981 				      struct netlink_ext_ack *extack))
982 {
983 	int err;
984 
985 	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
986 					       set_cb);
987 	if (err == -EOPNOTSUPP)
988 		err = 0;
989 	return err;
990 }
991 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
992 
switchdev_bridge_port_offload(struct net_device * brport_dev,struct net_device * dev,const void * ctx,struct notifier_block * atomic_nb,struct notifier_block * blocking_nb,bool tx_fwd_offload,struct netlink_ext_ack * extack)993 int switchdev_bridge_port_offload(struct net_device *brport_dev,
994 				  struct net_device *dev, const void *ctx,
995 				  struct notifier_block *atomic_nb,
996 				  struct notifier_block *blocking_nb,
997 				  bool tx_fwd_offload,
998 				  struct netlink_ext_ack *extack)
999 {
1000 	struct switchdev_notifier_brport_info brport_info = {
1001 		.brport = {
1002 			.dev = dev,
1003 			.ctx = ctx,
1004 			.atomic_nb = atomic_nb,
1005 			.blocking_nb = blocking_nb,
1006 			.tx_fwd_offload = tx_fwd_offload,
1007 		},
1008 	};
1009 	int err;
1010 
1011 	ASSERT_RTNL();
1012 
1013 	err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
1014 						brport_dev, &brport_info.info,
1015 						extack);
1016 	return notifier_to_errno(err);
1017 }
1018 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
1019 
switchdev_bridge_port_unoffload(struct net_device * brport_dev,const void * ctx,struct notifier_block * atomic_nb,struct notifier_block * blocking_nb)1020 void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
1021 				     const void *ctx,
1022 				     struct notifier_block *atomic_nb,
1023 				     struct notifier_block *blocking_nb)
1024 {
1025 	struct switchdev_notifier_brport_info brport_info = {
1026 		.brport = {
1027 			.ctx = ctx,
1028 			.atomic_nb = atomic_nb,
1029 			.blocking_nb = blocking_nb,
1030 		},
1031 	};
1032 
1033 	ASSERT_RTNL();
1034 
1035 	call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
1036 					  brport_dev, &brport_info.info,
1037 					  NULL);
1038 }
1039 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);
1040 
switchdev_bridge_port_replay(struct net_device * brport_dev,struct net_device * dev,const void * ctx,struct notifier_block * atomic_nb,struct notifier_block * blocking_nb,struct netlink_ext_ack * extack)1041 int switchdev_bridge_port_replay(struct net_device *brport_dev,
1042 				 struct net_device *dev, const void *ctx,
1043 				 struct notifier_block *atomic_nb,
1044 				 struct notifier_block *blocking_nb,
1045 				 struct netlink_ext_ack *extack)
1046 {
1047 	struct switchdev_notifier_brport_info brport_info = {
1048 		.brport = {
1049 			.dev = dev,
1050 			.ctx = ctx,
1051 			.atomic_nb = atomic_nb,
1052 			.blocking_nb = blocking_nb,
1053 		},
1054 	};
1055 	int err;
1056 
1057 	ASSERT_RTNL();
1058 
1059 	err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY,
1060 						brport_dev, &brport_info.info,
1061 						extack);
1062 	return notifier_to_errno(err);
1063 }
1064 EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay);
1065