xref: /linux/net/core/rtnetlink.c (revision d1e879ec600f9b3bdd253167533959facfefb17b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Routing netlink socket interface: protocol independent part.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  *	Fixes:
12  *	Vitaly E. Lavrov		RTA_OK arithmetic was wrong.
13  */
14 
15 #include <linux/bitops.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/timer.h>
22 #include <linux/string.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/fcntl.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/capability.h>
30 #include <linux/skbuff.h>
31 #include <linux/init.h>
32 #include <linux/security.h>
33 #include <linux/mutex.h>
34 #include <linux/if_addr.h>
35 #include <linux/if_bridge.h>
36 #include <linux/if_vlan.h>
37 #include <linux/pci.h>
38 #include <linux/etherdevice.h>
39 #include <linux/bpf.h>
40 
41 #include <linux/uaccess.h>
42 
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
45 #include <net/ip.h>
46 #include <net/protocol.h>
47 #include <net/arp.h>
48 #include <net/route.h>
49 #include <net/udp.h>
50 #include <net/tcp.h>
51 #include <net/sock.h>
52 #include <net/pkt_sched.h>
53 #include <net/fib_rules.h>
54 #include <net/rtnetlink.h>
55 #include <net/net_namespace.h>
56 #include <net/devlink.h>
57 #if IS_ENABLED(CONFIG_IPV6)
58 #include <net/addrconf.h>
59 #endif
60 #include <linux/dpll.h>
61 
62 #include "dev.h"
63 
64 #define RTNL_MAX_TYPE		50
65 #define RTNL_SLAVE_MAX_TYPE	44
66 
67 struct rtnl_link {
68 	rtnl_doit_func		doit;
69 	rtnl_dumpit_func	dumpit;
70 	struct module		*owner;
71 	unsigned int		flags;
72 	struct rcu_head		rcu;
73 };
74 
75 static DEFINE_MUTEX(rtnl_mutex);
76 
77 void rtnl_lock(void)
78 {
79 	mutex_lock(&rtnl_mutex);
80 }
81 EXPORT_SYMBOL(rtnl_lock);
82 
83 int rtnl_lock_interruptible(void)
84 {
85 	return mutex_lock_interruptible(&rtnl_mutex);
86 }
87 
88 int rtnl_lock_killable(void)
89 {
90 	return mutex_lock_killable(&rtnl_mutex);
91 }
92 
93 static struct sk_buff *defer_kfree_skb_list;
94 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
95 {
96 	if (head && tail) {
97 		tail->next = defer_kfree_skb_list;
98 		defer_kfree_skb_list = head;
99 	}
100 }
101 EXPORT_SYMBOL(rtnl_kfree_skbs);
102 
103 void __rtnl_unlock(void)
104 {
105 	struct sk_buff *head = defer_kfree_skb_list;
106 
107 	defer_kfree_skb_list = NULL;
108 
109 	/* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
110 	 * is used. In some places, e.g. in cfg80211, we have code that will do
111 	 * something like
112 	 *   rtnl_lock()
113 	 *   wiphy_lock()
114 	 *   ...
115 	 *   rtnl_unlock()
116 	 *
117 	 * and because netdev_run_todo() acquires the RTNL for items on the list
118 	 * we could cause a situation such as this:
119 	 * Thread 1			Thread 2
120 	 *				  rtnl_lock()
121 	 *				  unregister_netdevice()
122 	 *				  __rtnl_unlock()
123 	 * rtnl_lock()
124 	 * wiphy_lock()
125 	 * rtnl_unlock()
126 	 *   netdev_run_todo()
127 	 *     __rtnl_unlock()
128 	 *
129 	 *     // list not empty now
130 	 *     // because of thread 2
131 	 *				  rtnl_lock()
132 	 *     while (!list_empty(...))
133 	 *       rtnl_lock()
134 	 *				  wiphy_lock()
135 	 * **** DEADLOCK ****
136 	 *
137 	 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
138 	 * it's not used in cases where something is added to do the list.
139 	 */
140 	WARN_ON(!list_empty(&net_todo_list));
141 
142 	mutex_unlock(&rtnl_mutex);
143 
144 	while (head) {
145 		struct sk_buff *next = head->next;
146 
147 		kfree_skb(head);
148 		cond_resched();
149 		head = next;
150 	}
151 }
152 
153 void rtnl_unlock(void)
154 {
155 	/* This fellow will unlock it for us. */
156 	netdev_run_todo();
157 }
158 EXPORT_SYMBOL(rtnl_unlock);
159 
160 int rtnl_trylock(void)
161 {
162 	return mutex_trylock(&rtnl_mutex);
163 }
164 EXPORT_SYMBOL(rtnl_trylock);
165 
166 int rtnl_is_locked(void)
167 {
168 	return mutex_is_locked(&rtnl_mutex);
169 }
170 EXPORT_SYMBOL(rtnl_is_locked);
171 
172 bool refcount_dec_and_rtnl_lock(refcount_t *r)
173 {
174 	return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
175 }
176 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
177 
178 #ifdef CONFIG_PROVE_LOCKING
179 bool lockdep_rtnl_is_held(void)
180 {
181 	return lockdep_is_held(&rtnl_mutex);
182 }
183 EXPORT_SYMBOL(lockdep_rtnl_is_held);
184 #endif /* #ifdef CONFIG_PROVE_LOCKING */
185 
186 #ifdef CONFIG_DEBUG_NET_SMALL_RTNL
187 void __rtnl_net_lock(struct net *net)
188 {
189 	ASSERT_RTNL();
190 
191 	mutex_lock(&net->rtnl_mutex);
192 }
193 EXPORT_SYMBOL(__rtnl_net_lock);
194 
195 void __rtnl_net_unlock(struct net *net)
196 {
197 	ASSERT_RTNL();
198 
199 	mutex_unlock(&net->rtnl_mutex);
200 }
201 EXPORT_SYMBOL(__rtnl_net_unlock);
202 
203 void rtnl_net_lock(struct net *net)
204 {
205 	rtnl_lock();
206 	__rtnl_net_lock(net);
207 }
208 EXPORT_SYMBOL(rtnl_net_lock);
209 
210 void rtnl_net_unlock(struct net *net)
211 {
212 	__rtnl_net_unlock(net);
213 	rtnl_unlock();
214 }
215 EXPORT_SYMBOL(rtnl_net_unlock);
216 
217 int rtnl_net_trylock(struct net *net)
218 {
219 	int ret = rtnl_trylock();
220 
221 	if (ret)
222 		__rtnl_net_lock(net);
223 
224 	return ret;
225 }
226 EXPORT_SYMBOL(rtnl_net_trylock);
227 
228 int rtnl_net_lock_killable(struct net *net)
229 {
230 	int ret = rtnl_lock_killable();
231 
232 	if (!ret)
233 		__rtnl_net_lock(net);
234 
235 	return ret;
236 }
237 
238 static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b)
239 {
240 	if (net_eq(net_a, net_b))
241 		return 0;
242 
243 	/* always init_net first */
244 	if (net_eq(net_a, &init_net))
245 		return -1;
246 
247 	if (net_eq(net_b, &init_net))
248 		return 1;
249 
250 	/* otherwise lock in ascending order */
251 	return net_a < net_b ? -1 : 1;
252 }
253 
254 int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b)
255 {
256 	const struct net *net_a, *net_b;
257 
258 	net_a = container_of(a, struct net, rtnl_mutex.dep_map);
259 	net_b = container_of(b, struct net, rtnl_mutex.dep_map);
260 
261 	return rtnl_net_cmp_locks(net_a, net_b);
262 }
263 
264 bool rtnl_net_is_locked(struct net *net)
265 {
266 	return rtnl_is_locked() && mutex_is_locked(&net->rtnl_mutex);
267 }
268 EXPORT_SYMBOL(rtnl_net_is_locked);
269 
270 bool lockdep_rtnl_net_is_held(struct net *net)
271 {
272 	return lockdep_rtnl_is_held() && lockdep_is_held(&net->rtnl_mutex);
273 }
274 EXPORT_SYMBOL(lockdep_rtnl_net_is_held);
275 #else
276 static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b)
277 {
278 	/* No need to swap */
279 	return -1;
280 }
281 #endif
282 
283 struct rtnl_nets {
284 	/* ->newlink() needs to freeze 3 netns at most;
285 	 * 2 for the new device, 1 for its peer.
286 	 */
287 	struct net *net[3];
288 	unsigned char len;
289 };
290 
291 static void rtnl_nets_init(struct rtnl_nets *rtnl_nets)
292 {
293 	memset(rtnl_nets, 0, sizeof(*rtnl_nets));
294 }
295 
296 static void rtnl_nets_destroy(struct rtnl_nets *rtnl_nets)
297 {
298 	int i;
299 
300 	for (i = 0; i < rtnl_nets->len; i++) {
301 		put_net(rtnl_nets->net[i]);
302 		rtnl_nets->net[i] = NULL;
303 	}
304 
305 	rtnl_nets->len = 0;
306 }
307 
308 /**
309  * rtnl_nets_add - Add netns to be locked before ->newlink().
310  *
311  * @rtnl_nets: rtnl_nets pointer passed to ->get_peer_net().
312  * @net: netns pointer with an extra refcnt held.
313  *
314  * The extra refcnt is released in rtnl_nets_destroy().
315  */
316 static void rtnl_nets_add(struct rtnl_nets *rtnl_nets, struct net *net)
317 {
318 	int i;
319 
320 	DEBUG_NET_WARN_ON_ONCE(rtnl_nets->len == ARRAY_SIZE(rtnl_nets->net));
321 
322 	for (i = 0; i < rtnl_nets->len; i++) {
323 		switch (rtnl_net_cmp_locks(rtnl_nets->net[i], net)) {
324 		case 0:
325 			put_net(net);
326 			return;
327 		case 1:
328 			swap(rtnl_nets->net[i], net);
329 		}
330 	}
331 
332 	rtnl_nets->net[i] = net;
333 	rtnl_nets->len++;
334 }
335 
336 static void rtnl_nets_lock(struct rtnl_nets *rtnl_nets)
337 {
338 	int i;
339 
340 	rtnl_lock();
341 
342 	for (i = 0; i < rtnl_nets->len; i++)
343 		__rtnl_net_lock(rtnl_nets->net[i]);
344 }
345 
346 static void rtnl_nets_unlock(struct rtnl_nets *rtnl_nets)
347 {
348 	int i;
349 
350 	for (i = 0; i < rtnl_nets->len; i++)
351 		__rtnl_net_unlock(rtnl_nets->net[i]);
352 
353 	rtnl_unlock();
354 }
355 
356 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
357 
358 static inline int rtm_msgindex(int msgtype)
359 {
360 	int msgindex = msgtype - RTM_BASE;
361 
362 	/*
363 	 * msgindex < 0 implies someone tried to register a netlink
364 	 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
365 	 * the message type has not been added to linux/rtnetlink.h
366 	 */
367 	BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
368 
369 	return msgindex;
370 }
371 
372 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
373 {
374 	struct rtnl_link __rcu **tab;
375 
376 	if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
377 		protocol = PF_UNSPEC;
378 
379 	tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
380 	if (!tab)
381 		tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
382 
383 	return rcu_dereference_rtnl(tab[msgtype]);
384 }
385 
386 static int rtnl_register_internal(struct module *owner,
387 				  int protocol, int msgtype,
388 				  rtnl_doit_func doit, rtnl_dumpit_func dumpit,
389 				  unsigned int flags)
390 {
391 	struct rtnl_link *link, *old;
392 	struct rtnl_link __rcu **tab;
393 	int msgindex;
394 	int ret = -ENOBUFS;
395 
396 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
397 	msgindex = rtm_msgindex(msgtype);
398 
399 	rtnl_lock();
400 	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
401 	if (tab == NULL) {
402 		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
403 		if (!tab)
404 			goto unlock;
405 
406 		/* ensures we see the 0 stores */
407 		rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
408 	}
409 
410 	old = rtnl_dereference(tab[msgindex]);
411 	if (old) {
412 		link = kmemdup(old, sizeof(*old), GFP_KERNEL);
413 		if (!link)
414 			goto unlock;
415 	} else {
416 		link = kzalloc(sizeof(*link), GFP_KERNEL);
417 		if (!link)
418 			goto unlock;
419 	}
420 
421 	WARN_ON(link->owner && link->owner != owner);
422 	link->owner = owner;
423 
424 	WARN_ON(doit && link->doit && link->doit != doit);
425 	if (doit)
426 		link->doit = doit;
427 	WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
428 	if (dumpit)
429 		link->dumpit = dumpit;
430 
431 	WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
432 		(flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
433 	link->flags |= flags;
434 
435 	/* publish protocol:msgtype */
436 	rcu_assign_pointer(tab[msgindex], link);
437 	ret = 0;
438 	if (old)
439 		kfree_rcu(old, rcu);
440 unlock:
441 	rtnl_unlock();
442 	return ret;
443 }
444 
445 /**
446  * rtnl_unregister - Unregister a rtnetlink message type
447  * @protocol: Protocol family or PF_UNSPEC
448  * @msgtype: rtnetlink message type
449  *
450  * Returns 0 on success or a negative error code.
451  */
452 static int rtnl_unregister(int protocol, int msgtype)
453 {
454 	struct rtnl_link __rcu **tab;
455 	struct rtnl_link *link;
456 	int msgindex;
457 
458 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
459 	msgindex = rtm_msgindex(msgtype);
460 
461 	rtnl_lock();
462 	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
463 	if (!tab) {
464 		rtnl_unlock();
465 		return -ENOENT;
466 	}
467 
468 	link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
469 	rtnl_unlock();
470 
471 	kfree_rcu(link, rcu);
472 
473 	return 0;
474 }
475 
476 /**
477  * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
478  * @protocol : Protocol family or PF_UNSPEC
479  *
480  * Identical to calling rtnl_unregster() for all registered message types
481  * of a certain protocol family.
482  */
483 void rtnl_unregister_all(int protocol)
484 {
485 	struct rtnl_link __rcu **tab;
486 	struct rtnl_link *link;
487 	int msgindex;
488 
489 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
490 
491 	rtnl_lock();
492 	tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL);
493 	if (!tab) {
494 		rtnl_unlock();
495 		return;
496 	}
497 	for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
498 		link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
499 		kfree_rcu(link, rcu);
500 	}
501 	rtnl_unlock();
502 
503 	synchronize_net();
504 
505 	kfree(tab);
506 }
507 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
508 
509 /**
510  * __rtnl_register_many - Register rtnetlink message types
511  * @handlers: Array of struct rtnl_msg_handlers
512  * @n: The length of @handlers
513  *
514  * Registers the specified function pointers (at least one of them has
515  * to be non-NULL) to be called whenever a request message for the
516  * specified protocol family and message type is received.
517  *
518  * The special protocol family PF_UNSPEC may be used to define fallback
519  * function pointers for the case when no entry for the specific protocol
520  * family exists.
521  *
522  * When one element of @handlers fails to register,
523  * 1) built-in: panics.
524  * 2) modules : the previous successful registrations are unwinded
525  *              and an error is returned.
526  *
527  * Use rtnl_register_many().
528  */
529 int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n)
530 {
531 	const struct rtnl_msg_handler *handler;
532 	int i, err;
533 
534 	for (i = 0, handler = handlers; i < n; i++, handler++) {
535 		err = rtnl_register_internal(handler->owner, handler->protocol,
536 					     handler->msgtype, handler->doit,
537 					     handler->dumpit, handler->flags);
538 		if (err) {
539 			if (!handler->owner)
540 				panic("Unable to register rtnetlink message "
541 				      "handlers, %pS\n", handlers);
542 
543 			__rtnl_unregister_many(handlers, i);
544 			break;
545 		}
546 	}
547 
548 	return err;
549 }
550 EXPORT_SYMBOL_GPL(__rtnl_register_many);
551 
552 void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n)
553 {
554 	const struct rtnl_msg_handler *handler;
555 	int i;
556 
557 	for (i = n - 1, handler = handlers + n - 1; i >= 0; i--, handler--)
558 		rtnl_unregister(handler->protocol, handler->msgtype);
559 }
560 EXPORT_SYMBOL_GPL(__rtnl_unregister_many);
561 
562 static DEFINE_MUTEX(link_ops_mutex);
563 static LIST_HEAD(link_ops);
564 
565 static struct rtnl_link_ops *rtnl_link_ops_get(const char *kind, int *srcu_index)
566 {
567 	struct rtnl_link_ops *ops;
568 
569 	rcu_read_lock();
570 
571 	list_for_each_entry_rcu(ops, &link_ops, list) {
572 		if (!strcmp(ops->kind, kind)) {
573 			*srcu_index = srcu_read_lock(&ops->srcu);
574 			goto unlock;
575 		}
576 	}
577 
578 	ops = NULL;
579 unlock:
580 	rcu_read_unlock();
581 
582 	return ops;
583 }
584 
585 static void rtnl_link_ops_put(struct rtnl_link_ops *ops, int srcu_index)
586 {
587 	srcu_read_unlock(&ops->srcu, srcu_index);
588 }
589 
590 /**
591  * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
592  * @ops: struct rtnl_link_ops * to register
593  *
594  * Returns 0 on success or a negative error code.
595  */
596 int rtnl_link_register(struct rtnl_link_ops *ops)
597 {
598 	struct rtnl_link_ops *tmp;
599 	int err;
600 
601 	/* Sanity-check max sizes to avoid stack buffer overflow. */
602 	if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
603 		    ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
604 		return -EINVAL;
605 
606 	/* The check for alloc/setup is here because if ops
607 	 * does not have that filled up, it is not possible
608 	 * to use the ops for creating device. So do not
609 	 * fill up dellink as well. That disables rtnl_dellink.
610 	 */
611 	if ((ops->alloc || ops->setup) && !ops->dellink)
612 		ops->dellink = unregister_netdevice_queue;
613 
614 	err = init_srcu_struct(&ops->srcu);
615 	if (err)
616 		return err;
617 
618 	mutex_lock(&link_ops_mutex);
619 
620 	list_for_each_entry(tmp, &link_ops, list) {
621 		if (!strcmp(ops->kind, tmp->kind)) {
622 			err = -EEXIST;
623 			goto unlock;
624 		}
625 	}
626 
627 	list_add_tail_rcu(&ops->list, &link_ops);
628 unlock:
629 	mutex_unlock(&link_ops_mutex);
630 
631 	return err;
632 }
633 EXPORT_SYMBOL_GPL(rtnl_link_register);
634 
635 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
636 {
637 	struct net_device *dev;
638 	LIST_HEAD(list_kill);
639 
640 	for_each_netdev(net, dev) {
641 		if (dev->rtnl_link_ops == ops)
642 			ops->dellink(dev, &list_kill);
643 	}
644 	unregister_netdevice_many(&list_kill);
645 }
646 
647 /* Return with the rtnl_lock held when there are no network
648  * devices unregistering in any network namespace.
649  */
650 static void rtnl_lock_unregistering_all(void)
651 {
652 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
653 
654 	add_wait_queue(&netdev_unregistering_wq, &wait);
655 	for (;;) {
656 		rtnl_lock();
657 		/* We held write locked pernet_ops_rwsem, and parallel
658 		 * setup_net() and cleanup_net() are not possible.
659 		 */
660 		if (!atomic_read(&dev_unreg_count))
661 			break;
662 		__rtnl_unlock();
663 
664 		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
665 	}
666 	remove_wait_queue(&netdev_unregistering_wq, &wait);
667 }
668 
669 /**
670  * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
671  * @ops: struct rtnl_link_ops * to unregister
672  */
673 void rtnl_link_unregister(struct rtnl_link_ops *ops)
674 {
675 	struct net *net;
676 
677 	mutex_lock(&link_ops_mutex);
678 	list_del_rcu(&ops->list);
679 	mutex_unlock(&link_ops_mutex);
680 
681 	synchronize_srcu(&ops->srcu);
682 	cleanup_srcu_struct(&ops->srcu);
683 
684 	/* Close the race with setup_net() and cleanup_net() */
685 	down_write(&pernet_ops_rwsem);
686 	rtnl_lock_unregistering_all();
687 
688 	for_each_net(net)
689 		__rtnl_kill_links(net, ops);
690 
691 	rtnl_unlock();
692 	up_write(&pernet_ops_rwsem);
693 }
694 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
695 
696 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
697 {
698 	struct net_device *master_dev;
699 	const struct rtnl_link_ops *ops;
700 	size_t size = 0;
701 
702 	rcu_read_lock();
703 
704 	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
705 	if (!master_dev)
706 		goto out;
707 
708 	ops = master_dev->rtnl_link_ops;
709 	if (!ops || !ops->get_slave_size)
710 		goto out;
711 	/* IFLA_INFO_SLAVE_DATA + nested data */
712 	size = nla_total_size(sizeof(struct nlattr)) +
713 	       ops->get_slave_size(master_dev, dev);
714 
715 out:
716 	rcu_read_unlock();
717 	return size;
718 }
719 
720 static size_t rtnl_link_get_size(const struct net_device *dev)
721 {
722 	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
723 	size_t size;
724 
725 	if (!ops)
726 		return 0;
727 
728 	size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
729 	       nla_total_size(strlen(ops->kind) + 1);  /* IFLA_INFO_KIND */
730 
731 	if (ops->get_size)
732 		/* IFLA_INFO_DATA + nested data */
733 		size += nla_total_size(sizeof(struct nlattr)) +
734 			ops->get_size(dev);
735 
736 	if (ops->get_xstats_size)
737 		/* IFLA_INFO_XSTATS */
738 		size += nla_total_size(ops->get_xstats_size(dev));
739 
740 	size += rtnl_link_get_slave_info_data_size(dev);
741 
742 	return size;
743 }
744 
745 static LIST_HEAD(rtnl_af_ops);
746 
747 static struct rtnl_af_ops *rtnl_af_lookup(const int family, int *srcu_index)
748 {
749 	struct rtnl_af_ops *ops;
750 
751 	ASSERT_RTNL();
752 
753 	rcu_read_lock();
754 
755 	list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
756 		if (ops->family == family) {
757 			*srcu_index = srcu_read_lock(&ops->srcu);
758 			goto unlock;
759 		}
760 	}
761 
762 	ops = NULL;
763 unlock:
764 	rcu_read_unlock();
765 
766 	return ops;
767 }
768 
769 static void rtnl_af_put(struct rtnl_af_ops *ops, int srcu_index)
770 {
771 	srcu_read_unlock(&ops->srcu, srcu_index);
772 }
773 
774 /**
775  * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
776  * @ops: struct rtnl_af_ops * to register
777  *
778  * Return: 0 on success or a negative error code.
779  */
780 int rtnl_af_register(struct rtnl_af_ops *ops)
781 {
782 	int err = init_srcu_struct(&ops->srcu);
783 
784 	if (err)
785 		return err;
786 
787 	rtnl_lock();
788 	list_add_tail_rcu(&ops->list, &rtnl_af_ops);
789 	rtnl_unlock();
790 
791 	return 0;
792 }
793 EXPORT_SYMBOL_GPL(rtnl_af_register);
794 
795 /**
796  * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
797  * @ops: struct rtnl_af_ops * to unregister
798  */
799 void rtnl_af_unregister(struct rtnl_af_ops *ops)
800 {
801 	rtnl_lock();
802 	list_del_rcu(&ops->list);
803 	rtnl_unlock();
804 
805 	synchronize_rcu();
806 	synchronize_srcu(&ops->srcu);
807 	cleanup_srcu_struct(&ops->srcu);
808 }
809 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
810 
811 static size_t rtnl_link_get_af_size(const struct net_device *dev,
812 				    u32 ext_filter_mask)
813 {
814 	struct rtnl_af_ops *af_ops;
815 	size_t size;
816 
817 	/* IFLA_AF_SPEC */
818 	size = nla_total_size(sizeof(struct nlattr));
819 
820 	rcu_read_lock();
821 	list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
822 		if (af_ops->get_link_af_size) {
823 			/* AF_* + nested data */
824 			size += nla_total_size(sizeof(struct nlattr)) +
825 				af_ops->get_link_af_size(dev, ext_filter_mask);
826 		}
827 	}
828 	rcu_read_unlock();
829 
830 	return size;
831 }
832 
833 static bool rtnl_have_link_slave_info(const struct net_device *dev)
834 {
835 	struct net_device *master_dev;
836 	bool ret = false;
837 
838 	rcu_read_lock();
839 
840 	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
841 	if (master_dev && master_dev->rtnl_link_ops)
842 		ret = true;
843 	rcu_read_unlock();
844 	return ret;
845 }
846 
847 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
848 				     const struct net_device *dev)
849 {
850 	struct net_device *master_dev;
851 	const struct rtnl_link_ops *ops;
852 	struct nlattr *slave_data;
853 	int err;
854 
855 	master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
856 	if (!master_dev)
857 		return 0;
858 	ops = master_dev->rtnl_link_ops;
859 	if (!ops)
860 		return 0;
861 	if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
862 		return -EMSGSIZE;
863 	if (ops->fill_slave_info) {
864 		slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
865 		if (!slave_data)
866 			return -EMSGSIZE;
867 		err = ops->fill_slave_info(skb, master_dev, dev);
868 		if (err < 0)
869 			goto err_cancel_slave_data;
870 		nla_nest_end(skb, slave_data);
871 	}
872 	return 0;
873 
874 err_cancel_slave_data:
875 	nla_nest_cancel(skb, slave_data);
876 	return err;
877 }
878 
879 static int rtnl_link_info_fill(struct sk_buff *skb,
880 			       const struct net_device *dev)
881 {
882 	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
883 	struct nlattr *data;
884 	int err;
885 
886 	if (!ops)
887 		return 0;
888 	if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
889 		return -EMSGSIZE;
890 	if (ops->fill_xstats) {
891 		err = ops->fill_xstats(skb, dev);
892 		if (err < 0)
893 			return err;
894 	}
895 	if (ops->fill_info) {
896 		data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
897 		if (data == NULL)
898 			return -EMSGSIZE;
899 		err = ops->fill_info(skb, dev);
900 		if (err < 0)
901 			goto err_cancel_data;
902 		nla_nest_end(skb, data);
903 	}
904 	return 0;
905 
906 err_cancel_data:
907 	nla_nest_cancel(skb, data);
908 	return err;
909 }
910 
911 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
912 {
913 	struct nlattr *linkinfo;
914 	int err = -EMSGSIZE;
915 
916 	linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
917 	if (linkinfo == NULL)
918 		goto out;
919 
920 	err = rtnl_link_info_fill(skb, dev);
921 	if (err < 0)
922 		goto err_cancel_link;
923 
924 	err = rtnl_link_slave_info_fill(skb, dev);
925 	if (err < 0)
926 		goto err_cancel_link;
927 
928 	nla_nest_end(skb, linkinfo);
929 	return 0;
930 
931 err_cancel_link:
932 	nla_nest_cancel(skb, linkinfo);
933 out:
934 	return err;
935 }
936 
937 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
938 {
939 	struct sock *rtnl = net->rtnl;
940 
941 	return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
942 }
943 
944 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
945 {
946 	struct sock *rtnl = net->rtnl;
947 
948 	return nlmsg_unicast(rtnl, skb, pid);
949 }
950 EXPORT_SYMBOL(rtnl_unicast);
951 
952 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
953 		 const struct nlmsghdr *nlh, gfp_t flags)
954 {
955 	struct sock *rtnl = net->rtnl;
956 
957 	nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
958 }
959 EXPORT_SYMBOL(rtnl_notify);
960 
961 void rtnl_set_sk_err(struct net *net, u32 group, int error)
962 {
963 	struct sock *rtnl = net->rtnl;
964 
965 	netlink_set_err(rtnl, 0, group, error);
966 }
967 EXPORT_SYMBOL(rtnl_set_sk_err);
968 
969 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
970 {
971 	struct nlattr *mx;
972 	int i, valid = 0;
973 
974 	/* nothing is dumped for dst_default_metrics, so just skip the loop */
975 	if (metrics == dst_default_metrics.metrics)
976 		return 0;
977 
978 	mx = nla_nest_start_noflag(skb, RTA_METRICS);
979 	if (mx == NULL)
980 		return -ENOBUFS;
981 
982 	for (i = 0; i < RTAX_MAX; i++) {
983 		if (metrics[i]) {
984 			if (i == RTAX_CC_ALGO - 1) {
985 				char tmp[TCP_CA_NAME_MAX], *name;
986 
987 				name = tcp_ca_get_name_by_key(metrics[i], tmp);
988 				if (!name)
989 					continue;
990 				if (nla_put_string(skb, i + 1, name))
991 					goto nla_put_failure;
992 			} else if (i == RTAX_FEATURES - 1) {
993 				u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
994 
995 				if (!user_features)
996 					continue;
997 				BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
998 				if (nla_put_u32(skb, i + 1, user_features))
999 					goto nla_put_failure;
1000 			} else {
1001 				if (nla_put_u32(skb, i + 1, metrics[i]))
1002 					goto nla_put_failure;
1003 			}
1004 			valid++;
1005 		}
1006 	}
1007 
1008 	if (!valid) {
1009 		nla_nest_cancel(skb, mx);
1010 		return 0;
1011 	}
1012 
1013 	return nla_nest_end(skb, mx);
1014 
1015 nla_put_failure:
1016 	nla_nest_cancel(skb, mx);
1017 	return -EMSGSIZE;
1018 }
1019 EXPORT_SYMBOL(rtnetlink_put_metrics);
1020 
1021 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
1022 		       long expires, u32 error)
1023 {
1024 	struct rta_cacheinfo ci = {
1025 		.rta_error = error,
1026 		.rta_id =  id,
1027 	};
1028 
1029 	if (dst) {
1030 		ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
1031 		ci.rta_used = dst->__use;
1032 		ci.rta_clntref = rcuref_read(&dst->__rcuref);
1033 	}
1034 	if (expires) {
1035 		unsigned long clock;
1036 
1037 		clock = jiffies_to_clock_t(abs(expires));
1038 		clock = min_t(unsigned long, clock, INT_MAX);
1039 		ci.rta_expires = (expires > 0) ? clock : -clock;
1040 	}
1041 	return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
1042 }
1043 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
1044 
1045 void netdev_set_operstate(struct net_device *dev, int newstate)
1046 {
1047 	unsigned int old = READ_ONCE(dev->operstate);
1048 
1049 	do {
1050 		if (old == newstate)
1051 			return;
1052 	} while (!try_cmpxchg(&dev->operstate, &old, newstate));
1053 
1054 	netdev_state_change(dev);
1055 }
1056 EXPORT_SYMBOL(netdev_set_operstate);
1057 
1058 static void set_operstate(struct net_device *dev, unsigned char transition)
1059 {
1060 	unsigned char operstate = READ_ONCE(dev->operstate);
1061 
1062 	switch (transition) {
1063 	case IF_OPER_UP:
1064 		if ((operstate == IF_OPER_DORMANT ||
1065 		     operstate == IF_OPER_TESTING ||
1066 		     operstate == IF_OPER_UNKNOWN) &&
1067 		    !netif_dormant(dev) && !netif_testing(dev))
1068 			operstate = IF_OPER_UP;
1069 		break;
1070 
1071 	case IF_OPER_TESTING:
1072 		if (netif_oper_up(dev))
1073 			operstate = IF_OPER_TESTING;
1074 		break;
1075 
1076 	case IF_OPER_DORMANT:
1077 		if (netif_oper_up(dev))
1078 			operstate = IF_OPER_DORMANT;
1079 		break;
1080 	}
1081 
1082 	netdev_set_operstate(dev, operstate);
1083 }
1084 
1085 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
1086 {
1087 	return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
1088 	       (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
1089 }
1090 
1091 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
1092 					   const struct ifinfomsg *ifm)
1093 {
1094 	unsigned int flags = ifm->ifi_flags;
1095 
1096 	/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
1097 	if (ifm->ifi_change)
1098 		flags = (flags & ifm->ifi_change) |
1099 			(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
1100 
1101 	return flags;
1102 }
1103 
1104 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
1105 				 const struct rtnl_link_stats64 *b)
1106 {
1107 	a->rx_packets = b->rx_packets;
1108 	a->tx_packets = b->tx_packets;
1109 	a->rx_bytes = b->rx_bytes;
1110 	a->tx_bytes = b->tx_bytes;
1111 	a->rx_errors = b->rx_errors;
1112 	a->tx_errors = b->tx_errors;
1113 	a->rx_dropped = b->rx_dropped;
1114 	a->tx_dropped = b->tx_dropped;
1115 
1116 	a->multicast = b->multicast;
1117 	a->collisions = b->collisions;
1118 
1119 	a->rx_length_errors = b->rx_length_errors;
1120 	a->rx_over_errors = b->rx_over_errors;
1121 	a->rx_crc_errors = b->rx_crc_errors;
1122 	a->rx_frame_errors = b->rx_frame_errors;
1123 	a->rx_fifo_errors = b->rx_fifo_errors;
1124 	a->rx_missed_errors = b->rx_missed_errors;
1125 
1126 	a->tx_aborted_errors = b->tx_aborted_errors;
1127 	a->tx_carrier_errors = b->tx_carrier_errors;
1128 	a->tx_fifo_errors = b->tx_fifo_errors;
1129 	a->tx_heartbeat_errors = b->tx_heartbeat_errors;
1130 	a->tx_window_errors = b->tx_window_errors;
1131 
1132 	a->rx_compressed = b->rx_compressed;
1133 	a->tx_compressed = b->tx_compressed;
1134 
1135 	a->rx_nohandler = b->rx_nohandler;
1136 }
1137 
1138 /* All VF info */
1139 static inline int rtnl_vfinfo_size(const struct net_device *dev,
1140 				   u32 ext_filter_mask)
1141 {
1142 	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
1143 		int num_vfs = dev_num_vf(dev->dev.parent);
1144 		size_t size = nla_total_size(0);
1145 		size += num_vfs *
1146 			(nla_total_size(0) +
1147 			 nla_total_size(sizeof(struct ifla_vf_mac)) +
1148 			 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
1149 			 nla_total_size(sizeof(struct ifla_vf_vlan)) +
1150 			 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
1151 			 nla_total_size(MAX_VLAN_LIST_LEN *
1152 					sizeof(struct ifla_vf_vlan_info)) +
1153 			 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
1154 			 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
1155 			 nla_total_size(sizeof(struct ifla_vf_rate)) +
1156 			 nla_total_size(sizeof(struct ifla_vf_link_state)) +
1157 			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
1158 			 nla_total_size(sizeof(struct ifla_vf_trust)));
1159 		if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1160 			size += num_vfs *
1161 				(nla_total_size(0) + /* nest IFLA_VF_STATS */
1162 				 /* IFLA_VF_STATS_RX_PACKETS */
1163 				 nla_total_size_64bit(sizeof(__u64)) +
1164 				 /* IFLA_VF_STATS_TX_PACKETS */
1165 				 nla_total_size_64bit(sizeof(__u64)) +
1166 				 /* IFLA_VF_STATS_RX_BYTES */
1167 				 nla_total_size_64bit(sizeof(__u64)) +
1168 				 /* IFLA_VF_STATS_TX_BYTES */
1169 				 nla_total_size_64bit(sizeof(__u64)) +
1170 				 /* IFLA_VF_STATS_BROADCAST */
1171 				 nla_total_size_64bit(sizeof(__u64)) +
1172 				 /* IFLA_VF_STATS_MULTICAST */
1173 				 nla_total_size_64bit(sizeof(__u64)) +
1174 				 /* IFLA_VF_STATS_RX_DROPPED */
1175 				 nla_total_size_64bit(sizeof(__u64)) +
1176 				 /* IFLA_VF_STATS_TX_DROPPED */
1177 				 nla_total_size_64bit(sizeof(__u64)));
1178 		}
1179 		return size;
1180 	} else
1181 		return 0;
1182 }
1183 
1184 static size_t rtnl_port_size(const struct net_device *dev,
1185 			     u32 ext_filter_mask)
1186 {
1187 	size_t port_size = nla_total_size(4)		/* PORT_VF */
1188 		+ nla_total_size(PORT_PROFILE_MAX)	/* PORT_PROFILE */
1189 		+ nla_total_size(PORT_UUID_MAX)		/* PORT_INSTANCE_UUID */
1190 		+ nla_total_size(PORT_UUID_MAX)		/* PORT_HOST_UUID */
1191 		+ nla_total_size(1)			/* PROT_VDP_REQUEST */
1192 		+ nla_total_size(2);			/* PORT_VDP_RESPONSE */
1193 	size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
1194 	size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
1195 		+ port_size;
1196 	size_t port_self_size = nla_total_size(sizeof(struct nlattr))
1197 		+ port_size;
1198 
1199 	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1200 	    !(ext_filter_mask & RTEXT_FILTER_VF))
1201 		return 0;
1202 	if (dev_num_vf(dev->dev.parent))
1203 		return port_self_size + vf_ports_size +
1204 			vf_port_size * dev_num_vf(dev->dev.parent);
1205 	else
1206 		return port_self_size;
1207 }
1208 
1209 static size_t rtnl_xdp_size(void)
1210 {
1211 	size_t xdp_size = nla_total_size(0) +	/* nest IFLA_XDP */
1212 			  nla_total_size(1) +	/* XDP_ATTACHED */
1213 			  nla_total_size(4) +	/* XDP_PROG_ID (or 1st mode) */
1214 			  nla_total_size(4);	/* XDP_<mode>_PROG_ID */
1215 
1216 	return xdp_size;
1217 }
1218 
1219 static size_t rtnl_prop_list_size(const struct net_device *dev)
1220 {
1221 	struct netdev_name_node *name_node;
1222 	unsigned int cnt = 0;
1223 
1224 	rcu_read_lock();
1225 	list_for_each_entry_rcu(name_node, &dev->name_node->list, list)
1226 		cnt++;
1227 	rcu_read_unlock();
1228 
1229 	if (!cnt)
1230 		return 0;
1231 
1232 	return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ);
1233 }
1234 
1235 static size_t rtnl_proto_down_size(const struct net_device *dev)
1236 {
1237 	size_t size = nla_total_size(1);
1238 
1239 	/* Assume dev->proto_down_reason is not zero. */
1240 	size += nla_total_size(0) + nla_total_size(4);
1241 
1242 	return size;
1243 }
1244 
1245 static size_t rtnl_devlink_port_size(const struct net_device *dev)
1246 {
1247 	size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1248 
1249 	if (dev->devlink_port)
1250 		size += devlink_nl_port_handle_size(dev->devlink_port);
1251 
1252 	return size;
1253 }
1254 
1255 static size_t rtnl_dpll_pin_size(const struct net_device *dev)
1256 {
1257 	size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
1258 
1259 	size += dpll_netdev_pin_handle_size(dev);
1260 
1261 	return size;
1262 }
1263 
1264 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1265 				     u32 ext_filter_mask)
1266 {
1267 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1268 	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1269 	       + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1270 	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1271 	       + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1272 	       + nla_total_size(sizeof(struct rtnl_link_stats))
1273 	       + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1274 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1275 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1276 	       + nla_total_size(4) /* IFLA_TXQLEN */
1277 	       + nla_total_size(4) /* IFLA_WEIGHT */
1278 	       + nla_total_size(4) /* IFLA_MTU */
1279 	       + nla_total_size(4) /* IFLA_LINK */
1280 	       + nla_total_size(4) /* IFLA_MASTER */
1281 	       + nla_total_size(1) /* IFLA_CARRIER */
1282 	       + nla_total_size(4) /* IFLA_PROMISCUITY */
1283 	       + nla_total_size(4) /* IFLA_ALLMULTI */
1284 	       + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1285 	       + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1286 	       + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1287 	       + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1288 	       + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1289 	       + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
1290 	       + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
1291 	       + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1292 	       + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1293 	       + nla_total_size(1) /* IFLA_OPERSTATE */
1294 	       + nla_total_size(1) /* IFLA_LINKMODE */
1295 	       + nla_total_size(1) /* IFLA_NETNS_IMMUTABLE */
1296 	       + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1297 	       + nla_total_size(4) /* IFLA_LINK_NETNSID */
1298 	       + nla_total_size(4) /* IFLA_GROUP */
1299 	       + nla_total_size(ext_filter_mask
1300 			        & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1301 	       + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1302 	       + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1303 	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1304 	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1305 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1306 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1307 	       + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1308 	       + rtnl_xdp_size() /* IFLA_XDP */
1309 	       + nla_total_size(4)  /* IFLA_EVENT */
1310 	       + nla_total_size(4)  /* IFLA_NEW_NETNSID */
1311 	       + nla_total_size(4)  /* IFLA_NEW_IFINDEX */
1312 	       + rtnl_proto_down_size(dev)  /* proto down */
1313 	       + nla_total_size(4)  /* IFLA_TARGET_NETNSID */
1314 	       + nla_total_size(4)  /* IFLA_CARRIER_UP_COUNT */
1315 	       + nla_total_size(4)  /* IFLA_CARRIER_DOWN_COUNT */
1316 	       + nla_total_size(4)  /* IFLA_MIN_MTU */
1317 	       + nla_total_size(4)  /* IFLA_MAX_MTU */
1318 	       + rtnl_prop_list_size(dev)
1319 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1320 	       + rtnl_devlink_port_size(dev)
1321 	       + rtnl_dpll_pin_size(dev)
1322 	       + nla_total_size(8)  /* IFLA_MAX_PACING_OFFLOAD_HORIZON */
1323 	       + 0;
1324 }
1325 
1326 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1327 {
1328 	struct nlattr *vf_ports;
1329 	struct nlattr *vf_port;
1330 	int vf;
1331 	int err;
1332 
1333 	vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1334 	if (!vf_ports)
1335 		return -EMSGSIZE;
1336 
1337 	for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1338 		vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1339 		if (!vf_port)
1340 			goto nla_put_failure;
1341 		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1342 			goto nla_put_failure;
1343 		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1344 		if (err == -EMSGSIZE)
1345 			goto nla_put_failure;
1346 		if (err) {
1347 			nla_nest_cancel(skb, vf_port);
1348 			continue;
1349 		}
1350 		nla_nest_end(skb, vf_port);
1351 	}
1352 
1353 	nla_nest_end(skb, vf_ports);
1354 
1355 	return 0;
1356 
1357 nla_put_failure:
1358 	nla_nest_cancel(skb, vf_ports);
1359 	return -EMSGSIZE;
1360 }
1361 
1362 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1363 {
1364 	struct nlattr *port_self;
1365 	int err;
1366 
1367 	port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1368 	if (!port_self)
1369 		return -EMSGSIZE;
1370 
1371 	err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1372 	if (err) {
1373 		nla_nest_cancel(skb, port_self);
1374 		return (err == -EMSGSIZE) ? err : 0;
1375 	}
1376 
1377 	nla_nest_end(skb, port_self);
1378 
1379 	return 0;
1380 }
1381 
1382 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1383 			  u32 ext_filter_mask)
1384 {
1385 	int err;
1386 
1387 	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1388 	    !(ext_filter_mask & RTEXT_FILTER_VF))
1389 		return 0;
1390 
1391 	err = rtnl_port_self_fill(skb, dev);
1392 	if (err)
1393 		return err;
1394 
1395 	if (dev_num_vf(dev->dev.parent)) {
1396 		err = rtnl_vf_ports_fill(skb, dev);
1397 		if (err)
1398 			return err;
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1405 {
1406 	int err;
1407 	struct netdev_phys_item_id ppid;
1408 
1409 	err = dev_get_phys_port_id(dev, &ppid);
1410 	if (err) {
1411 		if (err == -EOPNOTSUPP)
1412 			return 0;
1413 		return err;
1414 	}
1415 
1416 	if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1417 		return -EMSGSIZE;
1418 
1419 	return 0;
1420 }
1421 
1422 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1423 {
1424 	char name[IFNAMSIZ];
1425 	int err;
1426 
1427 	err = dev_get_phys_port_name(dev, name, sizeof(name));
1428 	if (err) {
1429 		if (err == -EOPNOTSUPP)
1430 			return 0;
1431 		return err;
1432 	}
1433 
1434 	if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1435 		return -EMSGSIZE;
1436 
1437 	return 0;
1438 }
1439 
1440 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1441 {
1442 	struct netdev_phys_item_id ppid = { };
1443 	int err;
1444 
1445 	err = dev_get_port_parent_id(dev, &ppid, false);
1446 	if (err) {
1447 		if (err == -EOPNOTSUPP)
1448 			return 0;
1449 		return err;
1450 	}
1451 
1452 	if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1453 		return -EMSGSIZE;
1454 
1455 	return 0;
1456 }
1457 
1458 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1459 					      struct net_device *dev)
1460 {
1461 	struct rtnl_link_stats64 *sp;
1462 	struct nlattr *attr;
1463 
1464 	attr = nla_reserve_64bit(skb, IFLA_STATS64,
1465 				 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1466 	if (!attr)
1467 		return -EMSGSIZE;
1468 
1469 	sp = nla_data(attr);
1470 	dev_get_stats(dev, sp);
1471 
1472 	attr = nla_reserve(skb, IFLA_STATS,
1473 			   sizeof(struct rtnl_link_stats));
1474 	if (!attr)
1475 		return -EMSGSIZE;
1476 
1477 	copy_rtnl_link_stats(nla_data(attr), sp);
1478 
1479 	return 0;
1480 }
1481 
1482 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1483 					       struct net_device *dev,
1484 					       int vfs_num,
1485 					       u32 ext_filter_mask)
1486 {
1487 	struct ifla_vf_rss_query_en vf_rss_query_en;
1488 	struct nlattr *vf, *vfstats, *vfvlanlist;
1489 	struct ifla_vf_link_state vf_linkstate;
1490 	struct ifla_vf_vlan_info vf_vlan_info;
1491 	struct ifla_vf_spoofchk vf_spoofchk;
1492 	struct ifla_vf_tx_rate vf_tx_rate;
1493 	struct ifla_vf_stats vf_stats;
1494 	struct ifla_vf_trust vf_trust;
1495 	struct ifla_vf_vlan vf_vlan;
1496 	struct ifla_vf_rate vf_rate;
1497 	struct ifla_vf_mac vf_mac;
1498 	struct ifla_vf_broadcast vf_broadcast;
1499 	struct ifla_vf_info ivi;
1500 	struct ifla_vf_guid node_guid;
1501 	struct ifla_vf_guid port_guid;
1502 
1503 	memset(&ivi, 0, sizeof(ivi));
1504 
1505 	/* Not all SR-IOV capable drivers support the
1506 	 * spoofcheck and "RSS query enable" query.  Preset to
1507 	 * -1 so the user space tool can detect that the driver
1508 	 * didn't report anything.
1509 	 */
1510 	ivi.spoofchk = -1;
1511 	ivi.rss_query_en = -1;
1512 	ivi.trusted = -1;
1513 	/* The default value for VF link state is "auto"
1514 	 * IFLA_VF_LINK_STATE_AUTO which equals zero
1515 	 */
1516 	ivi.linkstate = 0;
1517 	/* VLAN Protocol by default is 802.1Q */
1518 	ivi.vlan_proto = htons(ETH_P_8021Q);
1519 	if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1520 		return 0;
1521 
1522 	memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1523 	memset(&node_guid, 0, sizeof(node_guid));
1524 	memset(&port_guid, 0, sizeof(port_guid));
1525 
1526 	vf_mac.vf =
1527 		vf_vlan.vf =
1528 		vf_vlan_info.vf =
1529 		vf_rate.vf =
1530 		vf_tx_rate.vf =
1531 		vf_spoofchk.vf =
1532 		vf_linkstate.vf =
1533 		vf_rss_query_en.vf =
1534 		vf_trust.vf =
1535 		node_guid.vf =
1536 		port_guid.vf = ivi.vf;
1537 
1538 	memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1539 	memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1540 	vf_vlan.vlan = ivi.vlan;
1541 	vf_vlan.qos = ivi.qos;
1542 	vf_vlan_info.vlan = ivi.vlan;
1543 	vf_vlan_info.qos = ivi.qos;
1544 	vf_vlan_info.vlan_proto = ivi.vlan_proto;
1545 	vf_tx_rate.rate = ivi.max_tx_rate;
1546 	vf_rate.min_tx_rate = ivi.min_tx_rate;
1547 	vf_rate.max_tx_rate = ivi.max_tx_rate;
1548 	vf_spoofchk.setting = ivi.spoofchk;
1549 	vf_linkstate.link_state = ivi.linkstate;
1550 	vf_rss_query_en.setting = ivi.rss_query_en;
1551 	vf_trust.setting = ivi.trusted;
1552 	vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1553 	if (!vf)
1554 		return -EMSGSIZE;
1555 	if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1556 	    nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1557 	    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1558 	    nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1559 		    &vf_rate) ||
1560 	    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1561 		    &vf_tx_rate) ||
1562 	    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1563 		    &vf_spoofchk) ||
1564 	    nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1565 		    &vf_linkstate) ||
1566 	    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1567 		    sizeof(vf_rss_query_en),
1568 		    &vf_rss_query_en) ||
1569 	    nla_put(skb, IFLA_VF_TRUST,
1570 		    sizeof(vf_trust), &vf_trust))
1571 		goto nla_put_vf_failure;
1572 
1573 	if (dev->netdev_ops->ndo_get_vf_guid &&
1574 	    !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1575 					      &port_guid)) {
1576 		if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1577 			    &node_guid) ||
1578 		    nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1579 			    &port_guid))
1580 			goto nla_put_vf_failure;
1581 	}
1582 	vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1583 	if (!vfvlanlist)
1584 		goto nla_put_vf_failure;
1585 	if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1586 		    &vf_vlan_info)) {
1587 		nla_nest_cancel(skb, vfvlanlist);
1588 		goto nla_put_vf_failure;
1589 	}
1590 	nla_nest_end(skb, vfvlanlist);
1591 	if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1592 		memset(&vf_stats, 0, sizeof(vf_stats));
1593 		if (dev->netdev_ops->ndo_get_vf_stats)
1594 			dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1595 							  &vf_stats);
1596 		vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1597 		if (!vfstats)
1598 			goto nla_put_vf_failure;
1599 		if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1600 				      vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1601 		    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1602 				      vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1603 		    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1604 				      vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1605 		    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1606 				      vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1607 		    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1608 				      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1609 		    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1610 				      vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1611 		    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1612 				      vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1613 		    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1614 				      vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1615 			nla_nest_cancel(skb, vfstats);
1616 			goto nla_put_vf_failure;
1617 		}
1618 		nla_nest_end(skb, vfstats);
1619 	}
1620 	nla_nest_end(skb, vf);
1621 	return 0;
1622 
1623 nla_put_vf_failure:
1624 	nla_nest_cancel(skb, vf);
1625 	return -EMSGSIZE;
1626 }
1627 
1628 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1629 					   struct net_device *dev,
1630 					   u32 ext_filter_mask)
1631 {
1632 	struct nlattr *vfinfo;
1633 	int i, num_vfs;
1634 
1635 	if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1636 		return 0;
1637 
1638 	num_vfs = dev_num_vf(dev->dev.parent);
1639 	if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1640 		return -EMSGSIZE;
1641 
1642 	if (!dev->netdev_ops->ndo_get_vf_config)
1643 		return 0;
1644 
1645 	vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1646 	if (!vfinfo)
1647 		return -EMSGSIZE;
1648 
1649 	for (i = 0; i < num_vfs; i++) {
1650 		if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) {
1651 			nla_nest_cancel(skb, vfinfo);
1652 			return -EMSGSIZE;
1653 		}
1654 	}
1655 
1656 	nla_nest_end(skb, vfinfo);
1657 	return 0;
1658 }
1659 
1660 static int rtnl_fill_link_ifmap(struct sk_buff *skb,
1661 				const struct net_device *dev)
1662 {
1663 	struct rtnl_link_ifmap map;
1664 
1665 	memset(&map, 0, sizeof(map));
1666 	map.mem_start = READ_ONCE(dev->mem_start);
1667 	map.mem_end   = READ_ONCE(dev->mem_end);
1668 	map.base_addr = READ_ONCE(dev->base_addr);
1669 	map.irq       = READ_ONCE(dev->irq);
1670 	map.dma       = READ_ONCE(dev->dma);
1671 	map.port      = READ_ONCE(dev->if_port);
1672 
1673 	if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1674 		return -EMSGSIZE;
1675 
1676 	return 0;
1677 }
1678 
1679 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1680 {
1681 	const struct bpf_prog *generic_xdp_prog;
1682 	u32 res = 0;
1683 
1684 	rcu_read_lock();
1685 	generic_xdp_prog = rcu_dereference(dev->xdp_prog);
1686 	if (generic_xdp_prog)
1687 		res = generic_xdp_prog->aux->id;
1688 	rcu_read_unlock();
1689 
1690 	return res;
1691 }
1692 
1693 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1694 {
1695 	return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1696 }
1697 
1698 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1699 {
1700 	return dev_xdp_prog_id(dev, XDP_MODE_HW);
1701 }
1702 
1703 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1704 			       u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1705 			       u32 (*get_prog_id)(struct net_device *dev))
1706 {
1707 	u32 curr_id;
1708 	int err;
1709 
1710 	curr_id = get_prog_id(dev);
1711 	if (!curr_id)
1712 		return 0;
1713 
1714 	*prog_id = curr_id;
1715 	err = nla_put_u32(skb, attr, curr_id);
1716 	if (err)
1717 		return err;
1718 
1719 	if (*mode != XDP_ATTACHED_NONE)
1720 		*mode = XDP_ATTACHED_MULTI;
1721 	else
1722 		*mode = tgt_mode;
1723 
1724 	return 0;
1725 }
1726 
1727 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1728 {
1729 	struct nlattr *xdp;
1730 	u32 prog_id;
1731 	int err;
1732 	u8 mode;
1733 
1734 	xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1735 	if (!xdp)
1736 		return -EMSGSIZE;
1737 
1738 	prog_id = 0;
1739 	mode = XDP_ATTACHED_NONE;
1740 	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1741 				  IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1742 	if (err)
1743 		goto err_cancel;
1744 	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1745 				  IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1746 	if (err)
1747 		goto err_cancel;
1748 	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1749 				  IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1750 	if (err)
1751 		goto err_cancel;
1752 
1753 	err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1754 	if (err)
1755 		goto err_cancel;
1756 
1757 	if (prog_id && mode != XDP_ATTACHED_MULTI) {
1758 		err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1759 		if (err)
1760 			goto err_cancel;
1761 	}
1762 
1763 	nla_nest_end(skb, xdp);
1764 	return 0;
1765 
1766 err_cancel:
1767 	nla_nest_cancel(skb, xdp);
1768 	return err;
1769 }
1770 
1771 static u32 rtnl_get_event(unsigned long event)
1772 {
1773 	u32 rtnl_event_type = IFLA_EVENT_NONE;
1774 
1775 	switch (event) {
1776 	case NETDEV_REBOOT:
1777 		rtnl_event_type = IFLA_EVENT_REBOOT;
1778 		break;
1779 	case NETDEV_FEAT_CHANGE:
1780 		rtnl_event_type = IFLA_EVENT_FEATURES;
1781 		break;
1782 	case NETDEV_BONDING_FAILOVER:
1783 		rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1784 		break;
1785 	case NETDEV_NOTIFY_PEERS:
1786 		rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1787 		break;
1788 	case NETDEV_RESEND_IGMP:
1789 		rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1790 		break;
1791 	case NETDEV_CHANGEINFODATA:
1792 		rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1793 		break;
1794 	default:
1795 		break;
1796 	}
1797 
1798 	return rtnl_event_type;
1799 }
1800 
1801 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1802 {
1803 	const struct net_device *upper_dev;
1804 	int ret = 0;
1805 
1806 	rcu_read_lock();
1807 
1808 	upper_dev = netdev_master_upper_dev_get_rcu(dev);
1809 	if (upper_dev)
1810 		ret = nla_put_u32(skb, IFLA_MASTER,
1811 				  READ_ONCE(upper_dev->ifindex));
1812 
1813 	rcu_read_unlock();
1814 	return ret;
1815 }
1816 
1817 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1818 			  bool force)
1819 {
1820 	int iflink = dev_get_iflink(dev);
1821 
1822 	if (force || READ_ONCE(dev->ifindex) != iflink)
1823 		return nla_put_u32(skb, IFLA_LINK, iflink);
1824 
1825 	return 0;
1826 }
1827 
1828 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1829 					      struct net_device *dev)
1830 {
1831 	char buf[IFALIASZ];
1832 	int ret;
1833 
1834 	ret = dev_get_alias(dev, buf, sizeof(buf));
1835 	return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1836 }
1837 
1838 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1839 				  const struct net_device *dev,
1840 				  struct net *src_net, gfp_t gfp)
1841 {
1842 	bool put_iflink = false;
1843 
1844 	if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1845 		struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1846 
1847 		if (!net_eq(dev_net(dev), link_net)) {
1848 			int id = peernet2id_alloc(src_net, link_net, gfp);
1849 
1850 			if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1851 				return -EMSGSIZE;
1852 
1853 			put_iflink = true;
1854 		}
1855 	}
1856 
1857 	return nla_put_iflink(skb, dev, put_iflink);
1858 }
1859 
1860 static int rtnl_fill_link_af(struct sk_buff *skb,
1861 			     const struct net_device *dev,
1862 			     u32 ext_filter_mask)
1863 {
1864 	const struct rtnl_af_ops *af_ops;
1865 	struct nlattr *af_spec;
1866 
1867 	af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1868 	if (!af_spec)
1869 		return -EMSGSIZE;
1870 
1871 	list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1872 		struct nlattr *af;
1873 		int err;
1874 
1875 		if (!af_ops->fill_link_af)
1876 			continue;
1877 
1878 		af = nla_nest_start_noflag(skb, af_ops->family);
1879 		if (!af)
1880 			return -EMSGSIZE;
1881 
1882 		err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1883 		/*
1884 		 * Caller may return ENODATA to indicate that there
1885 		 * was no data to be dumped. This is not an error, it
1886 		 * means we should trim the attribute header and
1887 		 * continue.
1888 		 */
1889 		if (err == -ENODATA)
1890 			nla_nest_cancel(skb, af);
1891 		else if (err < 0)
1892 			return -EMSGSIZE;
1893 
1894 		nla_nest_end(skb, af);
1895 	}
1896 
1897 	nla_nest_end(skb, af_spec);
1898 	return 0;
1899 }
1900 
1901 static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1902 				 const struct net_device *dev)
1903 {
1904 	struct netdev_name_node *name_node;
1905 	int count = 0;
1906 
1907 	list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
1908 		if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1909 			return -EMSGSIZE;
1910 		count++;
1911 	}
1912 	return count;
1913 }
1914 
1915 /* RCU protected. */
1916 static int rtnl_fill_prop_list(struct sk_buff *skb,
1917 			       const struct net_device *dev)
1918 {
1919 	struct nlattr *prop_list;
1920 	int ret;
1921 
1922 	prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1923 	if (!prop_list)
1924 		return -EMSGSIZE;
1925 
1926 	ret = rtnl_fill_alt_ifnames(skb, dev);
1927 	if (ret <= 0)
1928 		goto nest_cancel;
1929 
1930 	nla_nest_end(skb, prop_list);
1931 	return 0;
1932 
1933 nest_cancel:
1934 	nla_nest_cancel(skb, prop_list);
1935 	return ret;
1936 }
1937 
1938 static int rtnl_fill_proto_down(struct sk_buff *skb,
1939 				const struct net_device *dev)
1940 {
1941 	struct nlattr *pr;
1942 	u32 preason;
1943 
1944 	if (nla_put_u8(skb, IFLA_PROTO_DOWN, READ_ONCE(dev->proto_down)))
1945 		goto nla_put_failure;
1946 
1947 	preason = READ_ONCE(dev->proto_down_reason);
1948 	if (!preason)
1949 		return 0;
1950 
1951 	pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1952 	if (!pr)
1953 		return -EMSGSIZE;
1954 
1955 	if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1956 		nla_nest_cancel(skb, pr);
1957 		goto nla_put_failure;
1958 	}
1959 
1960 	nla_nest_end(skb, pr);
1961 	return 0;
1962 
1963 nla_put_failure:
1964 	return -EMSGSIZE;
1965 }
1966 
1967 static int rtnl_fill_devlink_port(struct sk_buff *skb,
1968 				  const struct net_device *dev)
1969 {
1970 	struct nlattr *devlink_port_nest;
1971 	int ret;
1972 
1973 	devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1974 	if (!devlink_port_nest)
1975 		return -EMSGSIZE;
1976 
1977 	if (dev->devlink_port) {
1978 		ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1979 		if (ret < 0)
1980 			goto nest_cancel;
1981 	}
1982 
1983 	nla_nest_end(skb, devlink_port_nest);
1984 	return 0;
1985 
1986 nest_cancel:
1987 	nla_nest_cancel(skb, devlink_port_nest);
1988 	return ret;
1989 }
1990 
1991 static int rtnl_fill_dpll_pin(struct sk_buff *skb,
1992 			      const struct net_device *dev)
1993 {
1994 	struct nlattr *dpll_pin_nest;
1995 	int ret;
1996 
1997 	dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN);
1998 	if (!dpll_pin_nest)
1999 		return -EMSGSIZE;
2000 
2001 	ret = dpll_netdev_add_pin_handle(skb, dev);
2002 	if (ret < 0)
2003 		goto nest_cancel;
2004 
2005 	nla_nest_end(skb, dpll_pin_nest);
2006 	return 0;
2007 
2008 nest_cancel:
2009 	nla_nest_cancel(skb, dpll_pin_nest);
2010 	return ret;
2011 }
2012 
2013 static int rtnl_fill_ifinfo(struct sk_buff *skb,
2014 			    struct net_device *dev, struct net *src_net,
2015 			    int type, u32 pid, u32 seq, u32 change,
2016 			    unsigned int flags, u32 ext_filter_mask,
2017 			    u32 event, int *new_nsid, int new_ifindex,
2018 			    int tgt_netnsid, gfp_t gfp)
2019 {
2020 	char devname[IFNAMSIZ];
2021 	struct ifinfomsg *ifm;
2022 	struct nlmsghdr *nlh;
2023 	struct Qdisc *qdisc;
2024 
2025 	ASSERT_RTNL();
2026 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
2027 	if (nlh == NULL)
2028 		return -EMSGSIZE;
2029 
2030 	ifm = nlmsg_data(nlh);
2031 	ifm->ifi_family = AF_UNSPEC;
2032 	ifm->__ifi_pad = 0;
2033 	ifm->ifi_type = READ_ONCE(dev->type);
2034 	ifm->ifi_index = READ_ONCE(dev->ifindex);
2035 	ifm->ifi_flags = dev_get_flags(dev);
2036 	ifm->ifi_change = change;
2037 
2038 	if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
2039 		goto nla_put_failure;
2040 
2041 	netdev_copy_name(dev, devname);
2042 	if (nla_put_string(skb, IFLA_IFNAME, devname))
2043 		goto nla_put_failure;
2044 
2045 	if (nla_put_u32(skb, IFLA_TXQLEN, READ_ONCE(dev->tx_queue_len)) ||
2046 	    nla_put_u8(skb, IFLA_OPERSTATE,
2047 		       netif_running(dev) ? READ_ONCE(dev->operstate) :
2048 					    IF_OPER_DOWN) ||
2049 	    nla_put_u8(skb, IFLA_LINKMODE, READ_ONCE(dev->link_mode)) ||
2050 	    nla_put_u8(skb, IFLA_NETNS_IMMUTABLE, dev->netns_immutable) ||
2051 	    nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
2052 	    nla_put_u32(skb, IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) ||
2053 	    nla_put_u32(skb, IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) ||
2054 	    nla_put_u32(skb, IFLA_GROUP, READ_ONCE(dev->group)) ||
2055 	    nla_put_u32(skb, IFLA_PROMISCUITY, READ_ONCE(dev->promiscuity)) ||
2056 	    nla_put_u32(skb, IFLA_ALLMULTI, READ_ONCE(dev->allmulti)) ||
2057 	    nla_put_u32(skb, IFLA_NUM_TX_QUEUES,
2058 			READ_ONCE(dev->num_tx_queues)) ||
2059 	    nla_put_u32(skb, IFLA_GSO_MAX_SEGS,
2060 			READ_ONCE(dev->gso_max_segs)) ||
2061 	    nla_put_u32(skb, IFLA_GSO_MAX_SIZE,
2062 			READ_ONCE(dev->gso_max_size)) ||
2063 	    nla_put_u32(skb, IFLA_GRO_MAX_SIZE,
2064 			READ_ONCE(dev->gro_max_size)) ||
2065 	    nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE,
2066 			READ_ONCE(dev->gso_ipv4_max_size)) ||
2067 	    nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE,
2068 			READ_ONCE(dev->gro_ipv4_max_size)) ||
2069 	    nla_put_u32(skb, IFLA_TSO_MAX_SIZE,
2070 			READ_ONCE(dev->tso_max_size)) ||
2071 	    nla_put_u32(skb, IFLA_TSO_MAX_SEGS,
2072 			READ_ONCE(dev->tso_max_segs)) ||
2073 	    nla_put_uint(skb, IFLA_MAX_PACING_OFFLOAD_HORIZON,
2074 			 READ_ONCE(dev->max_pacing_offload_horizon)) ||
2075 #ifdef CONFIG_RPS
2076 	    nla_put_u32(skb, IFLA_NUM_RX_QUEUES,
2077 			READ_ONCE(dev->num_rx_queues)) ||
2078 #endif
2079 	    put_master_ifindex(skb, dev) ||
2080 	    nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
2081 	    nla_put_ifalias(skb, dev) ||
2082 	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
2083 			atomic_read(&dev->carrier_up_count) +
2084 			atomic_read(&dev->carrier_down_count)) ||
2085 	    nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
2086 			atomic_read(&dev->carrier_up_count)) ||
2087 	    nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
2088 			atomic_read(&dev->carrier_down_count)))
2089 		goto nla_put_failure;
2090 
2091 	if (rtnl_fill_proto_down(skb, dev))
2092 		goto nla_put_failure;
2093 
2094 	if (event != IFLA_EVENT_NONE) {
2095 		if (nla_put_u32(skb, IFLA_EVENT, event))
2096 			goto nla_put_failure;
2097 	}
2098 
2099 	if (dev->addr_len) {
2100 		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
2101 		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
2102 			goto nla_put_failure;
2103 	}
2104 
2105 	if (rtnl_phys_port_id_fill(skb, dev))
2106 		goto nla_put_failure;
2107 
2108 	if (rtnl_phys_port_name_fill(skb, dev))
2109 		goto nla_put_failure;
2110 
2111 	if (rtnl_phys_switch_id_fill(skb, dev))
2112 		goto nla_put_failure;
2113 
2114 	if (rtnl_fill_stats(skb, dev))
2115 		goto nla_put_failure;
2116 
2117 	if (rtnl_fill_vf(skb, dev, ext_filter_mask))
2118 		goto nla_put_failure;
2119 
2120 	if (rtnl_port_fill(skb, dev, ext_filter_mask))
2121 		goto nla_put_failure;
2122 
2123 	if (rtnl_xdp_fill(skb, dev))
2124 		goto nla_put_failure;
2125 
2126 	if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
2127 		if (rtnl_link_fill(skb, dev) < 0)
2128 			goto nla_put_failure;
2129 	}
2130 
2131 	if (new_nsid &&
2132 	    nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
2133 		goto nla_put_failure;
2134 	if (new_ifindex &&
2135 	    nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
2136 		goto nla_put_failure;
2137 
2138 	if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
2139 	    nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
2140 		goto nla_put_failure;
2141 
2142 	rcu_read_lock();
2143 	if (rtnl_fill_link_netnsid(skb, dev, src_net, GFP_ATOMIC))
2144 		goto nla_put_failure_rcu;
2145 	qdisc = rcu_dereference(dev->qdisc);
2146 	if (qdisc && nla_put_string(skb, IFLA_QDISC, qdisc->ops->id))
2147 		goto nla_put_failure_rcu;
2148 	if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
2149 		goto nla_put_failure_rcu;
2150 	if (rtnl_fill_link_ifmap(skb, dev))
2151 		goto nla_put_failure_rcu;
2152 	if (rtnl_fill_prop_list(skb, dev))
2153 		goto nla_put_failure_rcu;
2154 	rcu_read_unlock();
2155 
2156 	if (dev->dev.parent &&
2157 	    nla_put_string(skb, IFLA_PARENT_DEV_NAME,
2158 			   dev_name(dev->dev.parent)))
2159 		goto nla_put_failure;
2160 
2161 	if (dev->dev.parent && dev->dev.parent->bus &&
2162 	    nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
2163 			   dev->dev.parent->bus->name))
2164 		goto nla_put_failure;
2165 
2166 	if (rtnl_fill_devlink_port(skb, dev))
2167 		goto nla_put_failure;
2168 
2169 	if (rtnl_fill_dpll_pin(skb, dev))
2170 		goto nla_put_failure;
2171 
2172 	nlmsg_end(skb, nlh);
2173 	return 0;
2174 
2175 nla_put_failure_rcu:
2176 	rcu_read_unlock();
2177 nla_put_failure:
2178 	nlmsg_cancel(skb, nlh);
2179 	return -EMSGSIZE;
2180 }
2181 
2182 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
2183 	[IFLA_UNSPEC]		= { .strict_start_type = IFLA_DPLL_PIN },
2184 	[IFLA_IFNAME]		= { .type = NLA_STRING, .len = IFNAMSIZ-1 },
2185 	[IFLA_ADDRESS]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
2186 	[IFLA_BROADCAST]	= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
2187 	[IFLA_MAP]		= { .len = sizeof(struct rtnl_link_ifmap) },
2188 	[IFLA_MTU]		= { .type = NLA_U32 },
2189 	[IFLA_LINK]		= { .type = NLA_U32 },
2190 	[IFLA_MASTER]		= { .type = NLA_U32 },
2191 	[IFLA_CARRIER]		= { .type = NLA_U8 },
2192 	[IFLA_TXQLEN]		= { .type = NLA_U32 },
2193 	[IFLA_WEIGHT]		= { .type = NLA_U32 },
2194 	[IFLA_OPERSTATE]	= { .type = NLA_U8 },
2195 	[IFLA_LINKMODE]		= { .type = NLA_U8 },
2196 	[IFLA_LINKINFO]		= { .type = NLA_NESTED },
2197 	[IFLA_NET_NS_PID]	= { .type = NLA_U32 },
2198 	[IFLA_NET_NS_FD]	= { .type = NLA_U32 },
2199 	/* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
2200 	 * allow 0-length string (needed to remove an alias).
2201 	 */
2202 	[IFLA_IFALIAS]	        = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
2203 	[IFLA_VFINFO_LIST]	= {. type = NLA_NESTED },
2204 	[IFLA_VF_PORTS]		= { .type = NLA_NESTED },
2205 	[IFLA_PORT_SELF]	= { .type = NLA_NESTED },
2206 	[IFLA_AF_SPEC]		= { .type = NLA_NESTED },
2207 	[IFLA_EXT_MASK]		= { .type = NLA_U32 },
2208 	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
2209 	[IFLA_NUM_TX_QUEUES]	= { .type = NLA_U32 },
2210 	[IFLA_NUM_RX_QUEUES]	= { .type = NLA_U32 },
2211 	[IFLA_GSO_MAX_SEGS]	= { .type = NLA_U32 },
2212 	[IFLA_GSO_MAX_SIZE]	= NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1),
2213 	[IFLA_PHYS_PORT_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
2214 	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
2215 	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
2216 	[IFLA_LINK_NETNSID]	= { .type = NLA_S32 },
2217 	[IFLA_PROTO_DOWN]	= { .type = NLA_U8 },
2218 	[IFLA_XDP]		= { .type = NLA_NESTED },
2219 	[IFLA_EVENT]		= { .type = NLA_U32 },
2220 	[IFLA_GROUP]		= { .type = NLA_U32 },
2221 	[IFLA_TARGET_NETNSID]	= { .type = NLA_S32 },
2222 	[IFLA_CARRIER_UP_COUNT]	= { .type = NLA_U32 },
2223 	[IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
2224 	[IFLA_MIN_MTU]		= { .type = NLA_U32 },
2225 	[IFLA_MAX_MTU]		= { .type = NLA_U32 },
2226 	[IFLA_PROP_LIST]	= { .type = NLA_NESTED },
2227 	[IFLA_ALT_IFNAME]	= { .type = NLA_STRING,
2228 				    .len = ALTIFNAMSIZ - 1 },
2229 	[IFLA_PERM_ADDRESS]	= { .type = NLA_REJECT },
2230 	[IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
2231 	[IFLA_NEW_IFINDEX]	= NLA_POLICY_MIN(NLA_S32, 1),
2232 	[IFLA_PARENT_DEV_NAME]	= { .type = NLA_NUL_STRING },
2233 	[IFLA_GRO_MAX_SIZE]	= { .type = NLA_U32 },
2234 	[IFLA_TSO_MAX_SIZE]	= { .type = NLA_REJECT },
2235 	[IFLA_TSO_MAX_SEGS]	= { .type = NLA_REJECT },
2236 	[IFLA_ALLMULTI]		= { .type = NLA_REJECT },
2237 	[IFLA_GSO_IPV4_MAX_SIZE]	= NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1),
2238 	[IFLA_GRO_IPV4_MAX_SIZE]	= { .type = NLA_U32 },
2239 	[IFLA_NETNS_IMMUTABLE]	= { .type = NLA_REJECT },
2240 };
2241 
2242 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
2243 	[IFLA_INFO_KIND]	= { .type = NLA_STRING },
2244 	[IFLA_INFO_DATA]	= { .type = NLA_NESTED },
2245 	[IFLA_INFO_SLAVE_KIND]	= { .type = NLA_STRING },
2246 	[IFLA_INFO_SLAVE_DATA]	= { .type = NLA_NESTED },
2247 };
2248 
2249 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
2250 	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
2251 	[IFLA_VF_BROADCAST]	= { .type = NLA_REJECT },
2252 	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
2253 	[IFLA_VF_VLAN_LIST]     = { .type = NLA_NESTED },
2254 	[IFLA_VF_TX_RATE]	= { .len = sizeof(struct ifla_vf_tx_rate) },
2255 	[IFLA_VF_SPOOFCHK]	= { .len = sizeof(struct ifla_vf_spoofchk) },
2256 	[IFLA_VF_RATE]		= { .len = sizeof(struct ifla_vf_rate) },
2257 	[IFLA_VF_LINK_STATE]	= { .len = sizeof(struct ifla_vf_link_state) },
2258 	[IFLA_VF_RSS_QUERY_EN]	= { .len = sizeof(struct ifla_vf_rss_query_en) },
2259 	[IFLA_VF_STATS]		= { .type = NLA_NESTED },
2260 	[IFLA_VF_TRUST]		= { .len = sizeof(struct ifla_vf_trust) },
2261 	[IFLA_VF_IB_NODE_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
2262 	[IFLA_VF_IB_PORT_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
2263 };
2264 
2265 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
2266 	[IFLA_PORT_VF]		= { .type = NLA_U32 },
2267 	[IFLA_PORT_PROFILE]	= { .type = NLA_STRING,
2268 				    .len = PORT_PROFILE_MAX },
2269 	[IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2270 				      .len = PORT_UUID_MAX },
2271 	[IFLA_PORT_HOST_UUID]	= { .type = NLA_STRING,
2272 				    .len = PORT_UUID_MAX },
2273 	[IFLA_PORT_REQUEST]	= { .type = NLA_U8, },
2274 	[IFLA_PORT_RESPONSE]	= { .type = NLA_U16, },
2275 
2276 	/* Unused, but we need to keep it here since user space could
2277 	 * fill it. It's also broken with regard to NLA_BINARY use in
2278 	 * combination with structs.
2279 	 */
2280 	[IFLA_PORT_VSI_TYPE]	= { .type = NLA_BINARY,
2281 				    .len = sizeof(struct ifla_port_vsi) },
2282 };
2283 
2284 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
2285 	[IFLA_XDP_UNSPEC]	= { .strict_start_type = IFLA_XDP_EXPECTED_FD },
2286 	[IFLA_XDP_FD]		= { .type = NLA_S32 },
2287 	[IFLA_XDP_EXPECTED_FD]	= { .type = NLA_S32 },
2288 	[IFLA_XDP_ATTACHED]	= { .type = NLA_U8 },
2289 	[IFLA_XDP_FLAGS]	= { .type = NLA_U32 },
2290 	[IFLA_XDP_PROG_ID]	= { .type = NLA_U32 },
2291 };
2292 
2293 static struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla,
2294 						  int *ops_srcu_index)
2295 {
2296 	struct nlattr *linfo[IFLA_INFO_MAX + 1];
2297 	struct rtnl_link_ops *ops = NULL;
2298 
2299 	if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
2300 		return NULL;
2301 
2302 	if (linfo[IFLA_INFO_KIND]) {
2303 		char kind[MODULE_NAME_LEN];
2304 
2305 		nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2306 		ops = rtnl_link_ops_get(kind, ops_srcu_index);
2307 	}
2308 
2309 	return ops;
2310 }
2311 
2312 static bool link_master_filtered(struct net_device *dev, int master_idx)
2313 {
2314 	struct net_device *master;
2315 
2316 	if (!master_idx)
2317 		return false;
2318 
2319 	master = netdev_master_upper_dev_get(dev);
2320 
2321 	/* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2322 	 * another invalid value for ifindex to denote "no master".
2323 	 */
2324 	if (master_idx == -1)
2325 		return !!master;
2326 
2327 	if (!master || master->ifindex != master_idx)
2328 		return true;
2329 
2330 	return false;
2331 }
2332 
2333 static bool link_kind_filtered(const struct net_device *dev,
2334 			       const struct rtnl_link_ops *kind_ops)
2335 {
2336 	if (kind_ops && dev->rtnl_link_ops != kind_ops)
2337 		return true;
2338 
2339 	return false;
2340 }
2341 
2342 static bool link_dump_filtered(struct net_device *dev,
2343 			       int master_idx,
2344 			       const struct rtnl_link_ops *kind_ops)
2345 {
2346 	if (link_master_filtered(dev, master_idx) ||
2347 	    link_kind_filtered(dev, kind_ops))
2348 		return true;
2349 
2350 	return false;
2351 }
2352 
2353 /**
2354  * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2355  * @sk: netlink socket
2356  * @netnsid: network namespace identifier
2357  *
2358  * Returns the network namespace identified by netnsid on success or an error
2359  * pointer on failure.
2360  */
2361 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2362 {
2363 	struct net *net;
2364 
2365 	net = get_net_ns_by_id(sock_net(sk), netnsid);
2366 	if (!net)
2367 		return ERR_PTR(-EINVAL);
2368 
2369 	/* For now, the caller is required to have CAP_NET_ADMIN in
2370 	 * the user namespace owning the target net ns.
2371 	 */
2372 	if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2373 		put_net(net);
2374 		return ERR_PTR(-EACCES);
2375 	}
2376 	return net;
2377 }
2378 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2379 
2380 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2381 				      bool strict_check, struct nlattr **tb,
2382 				      struct netlink_ext_ack *extack)
2383 {
2384 	int hdrlen;
2385 
2386 	if (strict_check) {
2387 		struct ifinfomsg *ifm;
2388 
2389 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2390 			NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2391 			return -EINVAL;
2392 		}
2393 
2394 		ifm = nlmsg_data(nlh);
2395 		if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2396 		    ifm->ifi_change) {
2397 			NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2398 			return -EINVAL;
2399 		}
2400 		if (ifm->ifi_index) {
2401 			NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2402 			return -EINVAL;
2403 		}
2404 
2405 		return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2406 						     IFLA_MAX, ifla_policy,
2407 						     extack);
2408 	}
2409 
2410 	/* A hack to preserve kernel<->userspace interface.
2411 	 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2412 	 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2413 	 * what iproute2 < v3.9.0 used.
2414 	 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2415 	 * attribute, its netlink message is shorter than struct ifinfomsg.
2416 	 */
2417 	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2418 		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2419 
2420 	return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2421 				      extack);
2422 }
2423 
2424 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2425 {
2426 	struct netlink_ext_ack *extack = cb->extack;
2427 	struct rtnl_link_ops *kind_ops = NULL;
2428 	const struct nlmsghdr *nlh = cb->nlh;
2429 	struct net *net = sock_net(skb->sk);
2430 	unsigned int flags = NLM_F_MULTI;
2431 	struct nlattr *tb[IFLA_MAX+1];
2432 	struct {
2433 		unsigned long ifindex;
2434 	} *ctx = (void *)cb->ctx;
2435 	struct net *tgt_net = net;
2436 	u32 ext_filter_mask = 0;
2437 	struct net_device *dev;
2438 	int ops_srcu_index;
2439 	int master_idx = 0;
2440 	int netnsid = -1;
2441 	int err, i;
2442 
2443 	err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2444 	if (err < 0) {
2445 		if (cb->strict_check)
2446 			return err;
2447 
2448 		goto walk_entries;
2449 	}
2450 
2451 	for (i = 0; i <= IFLA_MAX; ++i) {
2452 		if (!tb[i])
2453 			continue;
2454 
2455 		/* new attributes should only be added with strict checking */
2456 		switch (i) {
2457 		case IFLA_TARGET_NETNSID:
2458 			netnsid = nla_get_s32(tb[i]);
2459 			tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2460 			if (IS_ERR(tgt_net)) {
2461 				NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2462 				err = PTR_ERR(tgt_net);
2463 				netnsid = -1;
2464 				goto out;
2465 			}
2466 			break;
2467 		case IFLA_EXT_MASK:
2468 			ext_filter_mask = nla_get_u32(tb[i]);
2469 			break;
2470 		case IFLA_MASTER:
2471 			master_idx = nla_get_u32(tb[i]);
2472 			break;
2473 		case IFLA_LINKINFO:
2474 			kind_ops = linkinfo_to_kind_ops(tb[i], &ops_srcu_index);
2475 			break;
2476 		default:
2477 			if (cb->strict_check) {
2478 				NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2479 				err = -EINVAL;
2480 				goto out;
2481 			}
2482 		}
2483 	}
2484 
2485 	if (master_idx || kind_ops)
2486 		flags |= NLM_F_DUMP_FILTERED;
2487 
2488 walk_entries:
2489 	err = 0;
2490 	for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
2491 		if (link_dump_filtered(dev, master_idx, kind_ops))
2492 			continue;
2493 		err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
2494 				       NETLINK_CB(cb->skb).portid,
2495 				       nlh->nlmsg_seq, 0, flags,
2496 				       ext_filter_mask, 0, NULL, 0,
2497 				       netnsid, GFP_KERNEL);
2498 		if (err < 0)
2499 			break;
2500 	}
2501 
2502 
2503 	cb->seq = tgt_net->dev_base_seq;
2504 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2505 
2506 out:
2507 
2508 	if (kind_ops)
2509 		rtnl_link_ops_put(kind_ops, ops_srcu_index);
2510 	if (netnsid >= 0)
2511 		put_net(tgt_net);
2512 
2513 	return err;
2514 }
2515 
2516 int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
2517 			     struct netlink_ext_ack *exterr)
2518 {
2519 	const struct ifinfomsg *ifmp;
2520 	const struct nlattr *attrs;
2521 	size_t len;
2522 
2523 	ifmp = nla_data(nla_peer);
2524 	attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
2525 	len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
2526 
2527 	if (ifmp->ifi_index < 0) {
2528 		NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
2529 				    "ifindex can't be negative");
2530 		return -EINVAL;
2531 	}
2532 
2533 	return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
2534 				    exterr);
2535 }
2536 EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
2537 
2538 static struct net *rtnl_link_get_net_ifla(struct nlattr *tb[])
2539 {
2540 	struct net *net = NULL;
2541 
2542 	/* Examine the link attributes and figure out which
2543 	 * network namespace we are talking about.
2544 	 */
2545 	if (tb[IFLA_NET_NS_PID])
2546 		net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2547 	else if (tb[IFLA_NET_NS_FD])
2548 		net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2549 
2550 	return net;
2551 }
2552 
2553 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2554 {
2555 	struct net *net = rtnl_link_get_net_ifla(tb);
2556 
2557 	if (!net)
2558 		net = get_net(src_net);
2559 
2560 	return net;
2561 }
2562 EXPORT_SYMBOL(rtnl_link_get_net);
2563 
2564 /* Figure out which network namespace we are talking about by
2565  * examining the link attributes in the following order:
2566  *
2567  * 1. IFLA_NET_NS_PID
2568  * 2. IFLA_NET_NS_FD
2569  * 3. IFLA_TARGET_NETNSID
2570  */
2571 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2572 					       struct nlattr *tb[])
2573 {
2574 	struct net *net;
2575 
2576 	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2577 		return rtnl_link_get_net(src_net, tb);
2578 
2579 	if (!tb[IFLA_TARGET_NETNSID])
2580 		return get_net(src_net);
2581 
2582 	net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2583 	if (!net)
2584 		return ERR_PTR(-EINVAL);
2585 
2586 	return net;
2587 }
2588 
2589 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2590 					     struct net *src_net,
2591 					     struct nlattr *tb[], int cap)
2592 {
2593 	struct net *net;
2594 
2595 	net = rtnl_link_get_net_by_nlattr(src_net, tb);
2596 	if (IS_ERR(net))
2597 		return net;
2598 
2599 	if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2600 		put_net(net);
2601 		return ERR_PTR(-EPERM);
2602 	}
2603 
2604 	return net;
2605 }
2606 
2607 /* Verify that rtnetlink requests do not pass additional properties
2608  * potentially referring to different network namespaces.
2609  */
2610 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2611 				    struct netlink_ext_ack *extack,
2612 				    bool netns_id_only)
2613 {
2614 
2615 	if (netns_id_only) {
2616 		if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2617 			return 0;
2618 
2619 		NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2620 		return -EOPNOTSUPP;
2621 	}
2622 
2623 	if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2624 		goto invalid_attr;
2625 
2626 	if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2627 		goto invalid_attr;
2628 
2629 	if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2630 		goto invalid_attr;
2631 
2632 	return 0;
2633 
2634 invalid_attr:
2635 	NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2636 	return -EINVAL;
2637 }
2638 
2639 static	int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2640 			     int max_tx_rate)
2641 {
2642 	const struct net_device_ops *ops = dev->netdev_ops;
2643 
2644 	if (!ops->ndo_set_vf_rate)
2645 		return -EOPNOTSUPP;
2646 	if (max_tx_rate && max_tx_rate < min_tx_rate)
2647 		return -EINVAL;
2648 
2649 	return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2650 }
2651 
2652 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2653 			    struct netlink_ext_ack *extack)
2654 {
2655 	if (tb[IFLA_ADDRESS] &&
2656 	    nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2657 		return -EINVAL;
2658 
2659 	if (tb[IFLA_BROADCAST] &&
2660 	    nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2661 		return -EINVAL;
2662 
2663 	if (tb[IFLA_GSO_MAX_SIZE] &&
2664 	    nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) {
2665 		NL_SET_ERR_MSG(extack, "too big gso_max_size");
2666 		return -EINVAL;
2667 	}
2668 
2669 	if (tb[IFLA_GSO_MAX_SEGS] &&
2670 	    (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS ||
2671 	     nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) {
2672 		NL_SET_ERR_MSG(extack, "too big gso_max_segs");
2673 		return -EINVAL;
2674 	}
2675 
2676 	if (tb[IFLA_GRO_MAX_SIZE] &&
2677 	    nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) {
2678 		NL_SET_ERR_MSG(extack, "too big gro_max_size");
2679 		return -EINVAL;
2680 	}
2681 
2682 	if (tb[IFLA_GSO_IPV4_MAX_SIZE] &&
2683 	    nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) {
2684 		NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size");
2685 		return -EINVAL;
2686 	}
2687 
2688 	if (tb[IFLA_GRO_IPV4_MAX_SIZE] &&
2689 	    nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) {
2690 		NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size");
2691 		return -EINVAL;
2692 	}
2693 
2694 	if (tb[IFLA_AF_SPEC]) {
2695 		struct nlattr *af;
2696 		int rem, err;
2697 
2698 		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2699 			struct rtnl_af_ops *af_ops;
2700 			int af_ops_srcu_index;
2701 
2702 			af_ops = rtnl_af_lookup(nla_type(af), &af_ops_srcu_index);
2703 			if (!af_ops)
2704 				return -EAFNOSUPPORT;
2705 
2706 			if (!af_ops->set_link_af)
2707 				err = -EOPNOTSUPP;
2708 			else if (af_ops->validate_link_af)
2709 				err = af_ops->validate_link_af(dev, af, extack);
2710 			else
2711 				err = 0;
2712 
2713 			rtnl_af_put(af_ops, af_ops_srcu_index);
2714 
2715 			if (err < 0)
2716 				return err;
2717 		}
2718 	}
2719 
2720 	return 0;
2721 }
2722 
2723 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2724 				  int guid_type)
2725 {
2726 	const struct net_device_ops *ops = dev->netdev_ops;
2727 
2728 	return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2729 }
2730 
2731 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2732 {
2733 	if (dev->type != ARPHRD_INFINIBAND)
2734 		return -EOPNOTSUPP;
2735 
2736 	return handle_infiniband_guid(dev, ivt, guid_type);
2737 }
2738 
2739 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2740 {
2741 	const struct net_device_ops *ops = dev->netdev_ops;
2742 	int err = -EINVAL;
2743 
2744 	if (tb[IFLA_VF_MAC]) {
2745 		struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2746 
2747 		if (ivm->vf >= INT_MAX)
2748 			return -EINVAL;
2749 		err = -EOPNOTSUPP;
2750 		if (ops->ndo_set_vf_mac)
2751 			err = ops->ndo_set_vf_mac(dev, ivm->vf,
2752 						  ivm->mac);
2753 		if (err < 0)
2754 			return err;
2755 	}
2756 
2757 	if (tb[IFLA_VF_VLAN]) {
2758 		struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2759 
2760 		if (ivv->vf >= INT_MAX)
2761 			return -EINVAL;
2762 		err = -EOPNOTSUPP;
2763 		if (ops->ndo_set_vf_vlan)
2764 			err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2765 						   ivv->qos,
2766 						   htons(ETH_P_8021Q));
2767 		if (err < 0)
2768 			return err;
2769 	}
2770 
2771 	if (tb[IFLA_VF_VLAN_LIST]) {
2772 		struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2773 		struct nlattr *attr;
2774 		int rem, len = 0;
2775 
2776 		err = -EOPNOTSUPP;
2777 		if (!ops->ndo_set_vf_vlan)
2778 			return err;
2779 
2780 		nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2781 			if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2782 			    nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) {
2783 				return -EINVAL;
2784 			}
2785 			if (len >= MAX_VLAN_LIST_LEN)
2786 				return -EOPNOTSUPP;
2787 			ivvl[len] = nla_data(attr);
2788 
2789 			len++;
2790 		}
2791 		if (len == 0)
2792 			return -EINVAL;
2793 
2794 		if (ivvl[0]->vf >= INT_MAX)
2795 			return -EINVAL;
2796 		err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2797 					   ivvl[0]->qos, ivvl[0]->vlan_proto);
2798 		if (err < 0)
2799 			return err;
2800 	}
2801 
2802 	if (tb[IFLA_VF_TX_RATE]) {
2803 		struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2804 		struct ifla_vf_info ivf;
2805 
2806 		if (ivt->vf >= INT_MAX)
2807 			return -EINVAL;
2808 		err = -EOPNOTSUPP;
2809 		if (ops->ndo_get_vf_config)
2810 			err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2811 		if (err < 0)
2812 			return err;
2813 
2814 		err = rtnl_set_vf_rate(dev, ivt->vf,
2815 				       ivf.min_tx_rate, ivt->rate);
2816 		if (err < 0)
2817 			return err;
2818 	}
2819 
2820 	if (tb[IFLA_VF_RATE]) {
2821 		struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2822 
2823 		if (ivt->vf >= INT_MAX)
2824 			return -EINVAL;
2825 
2826 		err = rtnl_set_vf_rate(dev, ivt->vf,
2827 				       ivt->min_tx_rate, ivt->max_tx_rate);
2828 		if (err < 0)
2829 			return err;
2830 	}
2831 
2832 	if (tb[IFLA_VF_SPOOFCHK]) {
2833 		struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2834 
2835 		if (ivs->vf >= INT_MAX)
2836 			return -EINVAL;
2837 		err = -EOPNOTSUPP;
2838 		if (ops->ndo_set_vf_spoofchk)
2839 			err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2840 						       ivs->setting);
2841 		if (err < 0)
2842 			return err;
2843 	}
2844 
2845 	if (tb[IFLA_VF_LINK_STATE]) {
2846 		struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2847 
2848 		if (ivl->vf >= INT_MAX)
2849 			return -EINVAL;
2850 		err = -EOPNOTSUPP;
2851 		if (ops->ndo_set_vf_link_state)
2852 			err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2853 							 ivl->link_state);
2854 		if (err < 0)
2855 			return err;
2856 	}
2857 
2858 	if (tb[IFLA_VF_RSS_QUERY_EN]) {
2859 		struct ifla_vf_rss_query_en *ivrssq_en;
2860 
2861 		err = -EOPNOTSUPP;
2862 		ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2863 		if (ivrssq_en->vf >= INT_MAX)
2864 			return -EINVAL;
2865 		if (ops->ndo_set_vf_rss_query_en)
2866 			err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2867 							   ivrssq_en->setting);
2868 		if (err < 0)
2869 			return err;
2870 	}
2871 
2872 	if (tb[IFLA_VF_TRUST]) {
2873 		struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2874 
2875 		if (ivt->vf >= INT_MAX)
2876 			return -EINVAL;
2877 		err = -EOPNOTSUPP;
2878 		if (ops->ndo_set_vf_trust)
2879 			err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2880 		if (err < 0)
2881 			return err;
2882 	}
2883 
2884 	if (tb[IFLA_VF_IB_NODE_GUID]) {
2885 		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2886 
2887 		if (ivt->vf >= INT_MAX)
2888 			return -EINVAL;
2889 		if (!ops->ndo_set_vf_guid)
2890 			return -EOPNOTSUPP;
2891 		return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2892 	}
2893 
2894 	if (tb[IFLA_VF_IB_PORT_GUID]) {
2895 		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2896 
2897 		if (ivt->vf >= INT_MAX)
2898 			return -EINVAL;
2899 		if (!ops->ndo_set_vf_guid)
2900 			return -EOPNOTSUPP;
2901 
2902 		return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2903 	}
2904 
2905 	return err;
2906 }
2907 
2908 static int do_set_master(struct net_device *dev, int ifindex,
2909 			 struct netlink_ext_ack *extack)
2910 {
2911 	struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2912 	const struct net_device_ops *ops;
2913 	int err;
2914 
2915 	if (upper_dev) {
2916 		if (upper_dev->ifindex == ifindex)
2917 			return 0;
2918 		ops = upper_dev->netdev_ops;
2919 		if (ops->ndo_del_slave) {
2920 			err = ops->ndo_del_slave(upper_dev, dev);
2921 			if (err)
2922 				return err;
2923 		} else {
2924 			return -EOPNOTSUPP;
2925 		}
2926 	}
2927 
2928 	if (ifindex) {
2929 		upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2930 		if (!upper_dev)
2931 			return -EINVAL;
2932 		ops = upper_dev->netdev_ops;
2933 		if (ops->ndo_add_slave) {
2934 			err = ops->ndo_add_slave(upper_dev, dev, extack);
2935 			if (err)
2936 				return err;
2937 		} else {
2938 			return -EOPNOTSUPP;
2939 		}
2940 	}
2941 	return 0;
2942 }
2943 
2944 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2945 	[IFLA_PROTO_DOWN_REASON_MASK]	= { .type = NLA_U32 },
2946 	[IFLA_PROTO_DOWN_REASON_VALUE]	= { .type = NLA_U32 },
2947 };
2948 
2949 static int do_set_proto_down(struct net_device *dev,
2950 			     struct nlattr *nl_proto_down,
2951 			     struct nlattr *nl_proto_down_reason,
2952 			     struct netlink_ext_ack *extack)
2953 {
2954 	struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2955 	unsigned long mask = 0;
2956 	u32 value;
2957 	bool proto_down;
2958 	int err;
2959 
2960 	if (!dev->change_proto_down) {
2961 		NL_SET_ERR_MSG(extack,  "Protodown not supported by device");
2962 		return -EOPNOTSUPP;
2963 	}
2964 
2965 	if (nl_proto_down_reason) {
2966 		err = nla_parse_nested_deprecated(pdreason,
2967 						  IFLA_PROTO_DOWN_REASON_MAX,
2968 						  nl_proto_down_reason,
2969 						  ifla_proto_down_reason_policy,
2970 						  NULL);
2971 		if (err < 0)
2972 			return err;
2973 
2974 		if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2975 			NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2976 			return -EINVAL;
2977 		}
2978 
2979 		value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2980 
2981 		if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2982 			mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2983 
2984 		dev_change_proto_down_reason(dev, mask, value);
2985 	}
2986 
2987 	if (nl_proto_down) {
2988 		proto_down = nla_get_u8(nl_proto_down);
2989 
2990 		/* Don't turn off protodown if there are active reasons */
2991 		if (!proto_down && dev->proto_down_reason) {
2992 			NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2993 			return -EBUSY;
2994 		}
2995 		err = dev_change_proto_down(dev,
2996 					    proto_down);
2997 		if (err)
2998 			return err;
2999 	}
3000 
3001 	return 0;
3002 }
3003 
3004 #define DO_SETLINK_MODIFIED	0x01
3005 /* notify flag means notify + modified. */
3006 #define DO_SETLINK_NOTIFY	0x03
3007 static int do_setlink(const struct sk_buff *skb, struct net_device *dev,
3008 		      struct net *tgt_net, struct ifinfomsg *ifm,
3009 		      struct netlink_ext_ack *extack,
3010 		      struct nlattr **tb, int status)
3011 {
3012 	const struct net_device_ops *ops = dev->netdev_ops;
3013 	char ifname[IFNAMSIZ];
3014 	int err;
3015 
3016 	err = validate_linkmsg(dev, tb, extack);
3017 	if (err < 0)
3018 		goto errout;
3019 
3020 	if (tb[IFLA_IFNAME])
3021 		nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3022 	else
3023 		ifname[0] = '\0';
3024 
3025 	if (!net_eq(tgt_net, dev_net(dev))) {
3026 		const char *pat = ifname[0] ? ifname : NULL;
3027 		int new_ifindex;
3028 
3029 		new_ifindex = nla_get_s32_default(tb[IFLA_NEW_IFINDEX], 0);
3030 
3031 		err = __dev_change_net_namespace(dev, tgt_net, pat, new_ifindex, extack);
3032 		if (err)
3033 			goto errout;
3034 
3035 		status |= DO_SETLINK_MODIFIED;
3036 	}
3037 
3038 	if (tb[IFLA_MAP]) {
3039 		struct rtnl_link_ifmap *u_map;
3040 		struct ifmap k_map;
3041 
3042 		if (!ops->ndo_set_config) {
3043 			err = -EOPNOTSUPP;
3044 			goto errout;
3045 		}
3046 
3047 		if (!netif_device_present(dev)) {
3048 			err = -ENODEV;
3049 			goto errout;
3050 		}
3051 
3052 		u_map = nla_data(tb[IFLA_MAP]);
3053 		k_map.mem_start = (unsigned long) u_map->mem_start;
3054 		k_map.mem_end = (unsigned long) u_map->mem_end;
3055 		k_map.base_addr = (unsigned short) u_map->base_addr;
3056 		k_map.irq = (unsigned char) u_map->irq;
3057 		k_map.dma = (unsigned char) u_map->dma;
3058 		k_map.port = (unsigned char) u_map->port;
3059 
3060 		err = ops->ndo_set_config(dev, &k_map);
3061 		if (err < 0)
3062 			goto errout;
3063 
3064 		status |= DO_SETLINK_NOTIFY;
3065 	}
3066 
3067 	if (tb[IFLA_ADDRESS]) {
3068 		struct sockaddr *sa;
3069 		int len;
3070 
3071 		len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
3072 						  sizeof(*sa));
3073 		sa = kmalloc(len, GFP_KERNEL);
3074 		if (!sa) {
3075 			err = -ENOMEM;
3076 			goto errout;
3077 		}
3078 		sa->sa_family = dev->type;
3079 		memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
3080 		       dev->addr_len);
3081 		err = dev_set_mac_address_user(dev, sa, extack);
3082 		kfree(sa);
3083 		if (err)
3084 			goto errout;
3085 		status |= DO_SETLINK_MODIFIED;
3086 	}
3087 
3088 	if (tb[IFLA_MTU]) {
3089 		err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
3090 		if (err < 0)
3091 			goto errout;
3092 		status |= DO_SETLINK_MODIFIED;
3093 	}
3094 
3095 	if (tb[IFLA_GROUP]) {
3096 		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3097 		status |= DO_SETLINK_NOTIFY;
3098 	}
3099 
3100 	/*
3101 	 * Interface selected by interface index but interface
3102 	 * name provided implies that a name change has been
3103 	 * requested.
3104 	 */
3105 	if (ifm->ifi_index > 0 && ifname[0]) {
3106 		err = dev_change_name(dev, ifname);
3107 		if (err < 0)
3108 			goto errout;
3109 		status |= DO_SETLINK_MODIFIED;
3110 	}
3111 
3112 	if (tb[IFLA_IFALIAS]) {
3113 		err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
3114 				    nla_len(tb[IFLA_IFALIAS]));
3115 		if (err < 0)
3116 			goto errout;
3117 		status |= DO_SETLINK_NOTIFY;
3118 	}
3119 
3120 	if (tb[IFLA_BROADCAST]) {
3121 		nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
3122 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3123 	}
3124 
3125 	if (ifm->ifi_flags || ifm->ifi_change) {
3126 		err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3127 				       extack);
3128 		if (err < 0)
3129 			goto errout;
3130 	}
3131 
3132 	if (tb[IFLA_MASTER]) {
3133 		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3134 		if (err)
3135 			goto errout;
3136 		status |= DO_SETLINK_MODIFIED;
3137 	}
3138 
3139 	if (tb[IFLA_CARRIER]) {
3140 		err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
3141 		if (err)
3142 			goto errout;
3143 		status |= DO_SETLINK_MODIFIED;
3144 	}
3145 
3146 	if (tb[IFLA_TXQLEN]) {
3147 		unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
3148 
3149 		err = dev_change_tx_queue_len(dev, value);
3150 		if (err)
3151 			goto errout;
3152 		status |= DO_SETLINK_MODIFIED;
3153 	}
3154 
3155 	if (tb[IFLA_GSO_MAX_SIZE]) {
3156 		u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
3157 
3158 		if (dev->gso_max_size ^ max_size) {
3159 			netif_set_gso_max_size(dev, max_size);
3160 			status |= DO_SETLINK_MODIFIED;
3161 		}
3162 	}
3163 
3164 	if (tb[IFLA_GSO_MAX_SEGS]) {
3165 		u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
3166 
3167 		if (dev->gso_max_segs ^ max_segs) {
3168 			netif_set_gso_max_segs(dev, max_segs);
3169 			status |= DO_SETLINK_MODIFIED;
3170 		}
3171 	}
3172 
3173 	if (tb[IFLA_GRO_MAX_SIZE]) {
3174 		u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
3175 
3176 		if (dev->gro_max_size ^ gro_max_size) {
3177 			netif_set_gro_max_size(dev, gro_max_size);
3178 			status |= DO_SETLINK_MODIFIED;
3179 		}
3180 	}
3181 
3182 	if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
3183 		u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
3184 
3185 		if (dev->gso_ipv4_max_size ^ max_size) {
3186 			netif_set_gso_ipv4_max_size(dev, max_size);
3187 			status |= DO_SETLINK_MODIFIED;
3188 		}
3189 	}
3190 
3191 	if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
3192 		u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
3193 
3194 		if (dev->gro_ipv4_max_size ^ gro_max_size) {
3195 			netif_set_gro_ipv4_max_size(dev, gro_max_size);
3196 			status |= DO_SETLINK_MODIFIED;
3197 		}
3198 	}
3199 
3200 	if (tb[IFLA_OPERSTATE])
3201 		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3202 
3203 	if (tb[IFLA_LINKMODE]) {
3204 		unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
3205 
3206 		if (dev->link_mode ^ value)
3207 			status |= DO_SETLINK_NOTIFY;
3208 		WRITE_ONCE(dev->link_mode, value);
3209 	}
3210 
3211 	if (tb[IFLA_VFINFO_LIST]) {
3212 		struct nlattr *vfinfo[IFLA_VF_MAX + 1];
3213 		struct nlattr *attr;
3214 		int rem;
3215 
3216 		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
3217 			if (nla_type(attr) != IFLA_VF_INFO ||
3218 			    nla_len(attr) < NLA_HDRLEN) {
3219 				err = -EINVAL;
3220 				goto errout;
3221 			}
3222 			err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
3223 							  attr,
3224 							  ifla_vf_policy,
3225 							  NULL);
3226 			if (err < 0)
3227 				goto errout;
3228 			err = do_setvfinfo(dev, vfinfo);
3229 			if (err < 0)
3230 				goto errout;
3231 			status |= DO_SETLINK_NOTIFY;
3232 		}
3233 	}
3234 	err = 0;
3235 
3236 	if (tb[IFLA_VF_PORTS]) {
3237 		struct nlattr *port[IFLA_PORT_MAX+1];
3238 		struct nlattr *attr;
3239 		int vf;
3240 		int rem;
3241 
3242 		err = -EOPNOTSUPP;
3243 		if (!ops->ndo_set_vf_port)
3244 			goto errout;
3245 
3246 		nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
3247 			if (nla_type(attr) != IFLA_VF_PORT ||
3248 			    nla_len(attr) < NLA_HDRLEN) {
3249 				err = -EINVAL;
3250 				goto errout;
3251 			}
3252 			err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3253 							  attr,
3254 							  ifla_port_policy,
3255 							  NULL);
3256 			if (err < 0)
3257 				goto errout;
3258 			if (!port[IFLA_PORT_VF]) {
3259 				err = -EOPNOTSUPP;
3260 				goto errout;
3261 			}
3262 			vf = nla_get_u32(port[IFLA_PORT_VF]);
3263 			err = ops->ndo_set_vf_port(dev, vf, port);
3264 			if (err < 0)
3265 				goto errout;
3266 			status |= DO_SETLINK_NOTIFY;
3267 		}
3268 	}
3269 	err = 0;
3270 
3271 	if (tb[IFLA_PORT_SELF]) {
3272 		struct nlattr *port[IFLA_PORT_MAX+1];
3273 
3274 		err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3275 						  tb[IFLA_PORT_SELF],
3276 						  ifla_port_policy, NULL);
3277 		if (err < 0)
3278 			goto errout;
3279 
3280 		err = -EOPNOTSUPP;
3281 		if (ops->ndo_set_vf_port)
3282 			err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
3283 		if (err < 0)
3284 			goto errout;
3285 		status |= DO_SETLINK_NOTIFY;
3286 	}
3287 
3288 	if (tb[IFLA_AF_SPEC]) {
3289 		struct nlattr *af;
3290 		int rem;
3291 
3292 		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
3293 			struct rtnl_af_ops *af_ops;
3294 			int af_ops_srcu_index;
3295 
3296 			af_ops = rtnl_af_lookup(nla_type(af), &af_ops_srcu_index);
3297 			if (!af_ops) {
3298 				err = -EAFNOSUPPORT;
3299 				goto errout;
3300 			}
3301 
3302 			err = af_ops->set_link_af(dev, af, extack);
3303 			rtnl_af_put(af_ops, af_ops_srcu_index);
3304 
3305 			if (err < 0)
3306 				goto errout;
3307 
3308 			status |= DO_SETLINK_NOTIFY;
3309 		}
3310 	}
3311 	err = 0;
3312 
3313 	if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
3314 		err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
3315 					tb[IFLA_PROTO_DOWN_REASON], extack);
3316 		if (err)
3317 			goto errout;
3318 		status |= DO_SETLINK_NOTIFY;
3319 	}
3320 
3321 	if (tb[IFLA_XDP]) {
3322 		struct nlattr *xdp[IFLA_XDP_MAX + 1];
3323 		u32 xdp_flags = 0;
3324 
3325 		err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3326 						  tb[IFLA_XDP],
3327 						  ifla_xdp_policy, NULL);
3328 		if (err < 0)
3329 			goto errout;
3330 
3331 		if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
3332 			err = -EINVAL;
3333 			goto errout;
3334 		}
3335 
3336 		if (xdp[IFLA_XDP_FLAGS]) {
3337 			xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3338 			if (xdp_flags & ~XDP_FLAGS_MASK) {
3339 				err = -EINVAL;
3340 				goto errout;
3341 			}
3342 			if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3343 				err = -EINVAL;
3344 				goto errout;
3345 			}
3346 		}
3347 
3348 		if (xdp[IFLA_XDP_FD]) {
3349 			int expected_fd = -1;
3350 
3351 			if (xdp_flags & XDP_FLAGS_REPLACE) {
3352 				if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3353 					err = -EINVAL;
3354 					goto errout;
3355 				}
3356 				expected_fd =
3357 					nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3358 			}
3359 
3360 			err = dev_change_xdp_fd(dev, extack,
3361 						nla_get_s32(xdp[IFLA_XDP_FD]),
3362 						expected_fd,
3363 						xdp_flags);
3364 			if (err)
3365 				goto errout;
3366 			status |= DO_SETLINK_NOTIFY;
3367 		}
3368 	}
3369 
3370 errout:
3371 	if (status & DO_SETLINK_MODIFIED) {
3372 		if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3373 			netdev_state_change(dev);
3374 
3375 		if (err < 0)
3376 			net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3377 					     dev->name);
3378 	}
3379 
3380 	return err;
3381 }
3382 
3383 static struct net_device *rtnl_dev_get(struct net *net,
3384 				       struct nlattr *tb[])
3385 {
3386 	char ifname[ALTIFNAMSIZ];
3387 
3388 	if (tb[IFLA_IFNAME])
3389 		nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3390 	else if (tb[IFLA_ALT_IFNAME])
3391 		nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3392 	else
3393 		return NULL;
3394 
3395 	return __dev_get_by_name(net, ifname);
3396 }
3397 
3398 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3399 			struct netlink_ext_ack *extack)
3400 {
3401 	struct ifinfomsg *ifm = nlmsg_data(nlh);
3402 	struct net *net = sock_net(skb->sk);
3403 	struct nlattr *tb[IFLA_MAX+1];
3404 	struct net_device *dev = NULL;
3405 	struct rtnl_nets rtnl_nets;
3406 	struct net *tgt_net;
3407 	int err;
3408 
3409 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3410 				     ifla_policy, extack);
3411 	if (err < 0)
3412 		goto errout;
3413 
3414 	err = rtnl_ensure_unique_netns(tb, extack, false);
3415 	if (err < 0)
3416 		goto errout;
3417 
3418 	tgt_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3419 	if (IS_ERR(tgt_net)) {
3420 		err = PTR_ERR(tgt_net);
3421 		goto errout;
3422 	}
3423 
3424 	rtnl_nets_init(&rtnl_nets);
3425 	rtnl_nets_add(&rtnl_nets, get_net(net));
3426 	rtnl_nets_add(&rtnl_nets, tgt_net);
3427 
3428 	rtnl_nets_lock(&rtnl_nets);
3429 
3430 	if (ifm->ifi_index > 0)
3431 		dev = __dev_get_by_index(net, ifm->ifi_index);
3432 	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3433 		dev = rtnl_dev_get(net, tb);
3434 	else
3435 		err = -EINVAL;
3436 
3437 	if (dev)
3438 		err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, 0);
3439 	else if (!err)
3440 		err = -ENODEV;
3441 
3442 	rtnl_nets_unlock(&rtnl_nets);
3443 	rtnl_nets_destroy(&rtnl_nets);
3444 errout:
3445 	return err;
3446 }
3447 
3448 static int rtnl_group_dellink(const struct net *net, int group)
3449 {
3450 	struct net_device *dev, *aux;
3451 	LIST_HEAD(list_kill);
3452 	bool found = false;
3453 
3454 	if (!group)
3455 		return -EPERM;
3456 
3457 	for_each_netdev(net, dev) {
3458 		if (dev->group == group) {
3459 			const struct rtnl_link_ops *ops;
3460 
3461 			found = true;
3462 			ops = dev->rtnl_link_ops;
3463 			if (!ops || !ops->dellink)
3464 				return -EOPNOTSUPP;
3465 		}
3466 	}
3467 
3468 	if (!found)
3469 		return -ENODEV;
3470 
3471 	for_each_netdev_safe(net, dev, aux) {
3472 		if (dev->group == group) {
3473 			const struct rtnl_link_ops *ops;
3474 
3475 			ops = dev->rtnl_link_ops;
3476 			ops->dellink(dev, &list_kill);
3477 		}
3478 	}
3479 	unregister_netdevice_many(&list_kill);
3480 
3481 	return 0;
3482 }
3483 
3484 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3485 {
3486 	const struct rtnl_link_ops *ops;
3487 	LIST_HEAD(list_kill);
3488 
3489 	ops = dev->rtnl_link_ops;
3490 	if (!ops || !ops->dellink)
3491 		return -EOPNOTSUPP;
3492 
3493 	ops->dellink(dev, &list_kill);
3494 	unregister_netdevice_many_notify(&list_kill, portid, nlh);
3495 
3496 	return 0;
3497 }
3498 EXPORT_SYMBOL_GPL(rtnl_delete_link);
3499 
3500 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3501 			struct netlink_ext_ack *extack)
3502 {
3503 	struct ifinfomsg *ifm = nlmsg_data(nlh);
3504 	struct net *net = sock_net(skb->sk);
3505 	u32 portid = NETLINK_CB(skb).portid;
3506 	struct nlattr *tb[IFLA_MAX+1];
3507 	struct net_device *dev = NULL;
3508 	struct net *tgt_net = net;
3509 	int netnsid = -1;
3510 	int err;
3511 
3512 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3513 				     ifla_policy, extack);
3514 	if (err < 0)
3515 		return err;
3516 
3517 	err = rtnl_ensure_unique_netns(tb, extack, true);
3518 	if (err < 0)
3519 		return err;
3520 
3521 	if (tb[IFLA_TARGET_NETNSID]) {
3522 		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3523 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3524 		if (IS_ERR(tgt_net))
3525 			return PTR_ERR(tgt_net);
3526 	}
3527 
3528 	rtnl_net_lock(tgt_net);
3529 
3530 	if (ifm->ifi_index > 0)
3531 		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3532 	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3533 		dev = rtnl_dev_get(tgt_net, tb);
3534 
3535 	if (dev)
3536 		err = rtnl_delete_link(dev, portid, nlh);
3537 	else if (ifm->ifi_index > 0 || tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3538 		err = -ENODEV;
3539 	else if (tb[IFLA_GROUP])
3540 		err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3541 	else
3542 		err = -EINVAL;
3543 
3544 	rtnl_net_unlock(tgt_net);
3545 
3546 	if (netnsid >= 0)
3547 		put_net(tgt_net);
3548 
3549 	return err;
3550 }
3551 
3552 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3553 			u32 portid, const struct nlmsghdr *nlh)
3554 {
3555 	unsigned int old_flags;
3556 	int err;
3557 
3558 	old_flags = dev->flags;
3559 	if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3560 		err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3561 					 NULL);
3562 		if (err < 0)
3563 			return err;
3564 	}
3565 
3566 	if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3567 		__dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3568 	} else {
3569 		dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3570 		__dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3571 	}
3572 	return 0;
3573 }
3574 EXPORT_SYMBOL(rtnl_configure_link);
3575 
3576 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3577 				    unsigned char name_assign_type,
3578 				    const struct rtnl_link_ops *ops,
3579 				    struct nlattr *tb[],
3580 				    struct netlink_ext_ack *extack)
3581 {
3582 	struct net_device *dev;
3583 	unsigned int num_tx_queues = 1;
3584 	unsigned int num_rx_queues = 1;
3585 	int err;
3586 
3587 	if (tb[IFLA_NUM_TX_QUEUES])
3588 		num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3589 	else if (ops->get_num_tx_queues)
3590 		num_tx_queues = ops->get_num_tx_queues();
3591 
3592 	if (tb[IFLA_NUM_RX_QUEUES])
3593 		num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3594 	else if (ops->get_num_rx_queues)
3595 		num_rx_queues = ops->get_num_rx_queues();
3596 
3597 	if (num_tx_queues < 1 || num_tx_queues > 4096) {
3598 		NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3599 		return ERR_PTR(-EINVAL);
3600 	}
3601 
3602 	if (num_rx_queues < 1 || num_rx_queues > 4096) {
3603 		NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3604 		return ERR_PTR(-EINVAL);
3605 	}
3606 
3607 	if (ops->alloc) {
3608 		dev = ops->alloc(tb, ifname, name_assign_type,
3609 				 num_tx_queues, num_rx_queues);
3610 		if (IS_ERR(dev))
3611 			return dev;
3612 	} else {
3613 		dev = alloc_netdev_mqs(ops->priv_size, ifname,
3614 				       name_assign_type, ops->setup,
3615 				       num_tx_queues, num_rx_queues);
3616 	}
3617 
3618 	if (!dev)
3619 		return ERR_PTR(-ENOMEM);
3620 
3621 	err = validate_linkmsg(dev, tb, extack);
3622 	if (err < 0) {
3623 		free_netdev(dev);
3624 		return ERR_PTR(err);
3625 	}
3626 
3627 	dev_net_set(dev, net);
3628 	dev->rtnl_link_ops = ops;
3629 	dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3630 
3631 	if (tb[IFLA_MTU]) {
3632 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3633 
3634 		err = dev_validate_mtu(dev, mtu, extack);
3635 		if (err) {
3636 			free_netdev(dev);
3637 			return ERR_PTR(err);
3638 		}
3639 		dev->mtu = mtu;
3640 	}
3641 	if (tb[IFLA_ADDRESS]) {
3642 		__dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3643 			       nla_len(tb[IFLA_ADDRESS]));
3644 		dev->addr_assign_type = NET_ADDR_SET;
3645 	}
3646 	if (tb[IFLA_BROADCAST])
3647 		memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3648 				nla_len(tb[IFLA_BROADCAST]));
3649 	if (tb[IFLA_TXQLEN])
3650 		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3651 	if (tb[IFLA_OPERSTATE])
3652 		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3653 	if (tb[IFLA_LINKMODE])
3654 		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3655 	if (tb[IFLA_GROUP])
3656 		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3657 	if (tb[IFLA_GSO_MAX_SIZE])
3658 		netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3659 	if (tb[IFLA_GSO_MAX_SEGS])
3660 		netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3661 	if (tb[IFLA_GRO_MAX_SIZE])
3662 		netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3663 	if (tb[IFLA_GSO_IPV4_MAX_SIZE])
3664 		netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
3665 	if (tb[IFLA_GRO_IPV4_MAX_SIZE])
3666 		netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
3667 
3668 	return dev;
3669 }
3670 EXPORT_SYMBOL(rtnl_create_link);
3671 
3672 struct rtnl_newlink_tbs {
3673 	struct nlattr *tb[IFLA_MAX + 1];
3674 	struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3675 	struct nlattr *attr[RTNL_MAX_TYPE + 1];
3676 	struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3677 };
3678 
3679 static int rtnl_changelink(const struct sk_buff *skb, struct nlmsghdr *nlh,
3680 			   const struct rtnl_link_ops *ops,
3681 			   struct net_device *dev, struct net *tgt_net,
3682 			   struct rtnl_newlink_tbs *tbs,
3683 			   struct nlattr **data,
3684 			   struct netlink_ext_ack *extack)
3685 {
3686 	struct nlattr ** const linkinfo = tbs->linkinfo;
3687 	struct nlattr ** const tb = tbs->tb;
3688 	int status = 0;
3689 	int err;
3690 
3691 	if (nlh->nlmsg_flags & NLM_F_EXCL)
3692 		return -EEXIST;
3693 
3694 	if (nlh->nlmsg_flags & NLM_F_REPLACE)
3695 		return -EOPNOTSUPP;
3696 
3697 	if (linkinfo[IFLA_INFO_DATA]) {
3698 		if (!ops || ops != dev->rtnl_link_ops || !ops->changelink)
3699 			return -EOPNOTSUPP;
3700 
3701 		err = ops->changelink(dev, tb, data, extack);
3702 		if (err < 0)
3703 			return err;
3704 
3705 		status |= DO_SETLINK_NOTIFY;
3706 	}
3707 
3708 	if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3709 		const struct rtnl_link_ops *m_ops = NULL;
3710 		struct nlattr **slave_data = NULL;
3711 		struct net_device *master_dev;
3712 
3713 		master_dev = netdev_master_upper_dev_get(dev);
3714 		if (master_dev)
3715 			m_ops = master_dev->rtnl_link_ops;
3716 
3717 		if (!m_ops || !m_ops->slave_changelink)
3718 			return -EOPNOTSUPP;
3719 
3720 		if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3721 			return -EINVAL;
3722 
3723 		if (m_ops->slave_maxtype) {
3724 			err = nla_parse_nested_deprecated(tbs->slave_attr,
3725 							  m_ops->slave_maxtype,
3726 							  linkinfo[IFLA_INFO_SLAVE_DATA],
3727 							  m_ops->slave_policy, extack);
3728 			if (err < 0)
3729 				return err;
3730 
3731 			slave_data = tbs->slave_attr;
3732 		}
3733 
3734 		err = m_ops->slave_changelink(master_dev, dev, tb, slave_data, extack);
3735 		if (err < 0)
3736 			return err;
3737 
3738 		status |= DO_SETLINK_NOTIFY;
3739 	}
3740 
3741 	return do_setlink(skb, dev, tgt_net, nlmsg_data(nlh), extack, tb, status);
3742 }
3743 
3744 static int rtnl_group_changelink(const struct sk_buff *skb,
3745 				 struct net *net, struct net *tgt_net,
3746 				 int group, struct ifinfomsg *ifm,
3747 				 struct netlink_ext_ack *extack,
3748 				 struct nlattr **tb)
3749 {
3750 	struct net_device *dev, *aux;
3751 	int err;
3752 
3753 	for_each_netdev_safe(net, dev, aux) {
3754 		if (dev->group == group) {
3755 			err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, 0);
3756 			if (err < 0)
3757 				return err;
3758 		}
3759 	}
3760 
3761 	return 0;
3762 }
3763 
3764 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3765 			       const struct rtnl_link_ops *ops,
3766 			       struct net *tgt_net, struct net *link_net,
3767 			       struct net *peer_net,
3768 			       const struct nlmsghdr *nlh,
3769 			       struct nlattr **tb, struct nlattr **data,
3770 			       struct netlink_ext_ack *extack)
3771 {
3772 	unsigned char name_assign_type = NET_NAME_USER;
3773 	struct rtnl_newlink_params params = {
3774 		.src_net = sock_net(skb->sk),
3775 		.link_net = link_net,
3776 		.peer_net = peer_net,
3777 		.tb = tb,
3778 		.data = data,
3779 	};
3780 	u32 portid = NETLINK_CB(skb).portid;
3781 	struct net_device *dev;
3782 	char ifname[IFNAMSIZ];
3783 	int err;
3784 
3785 	if (!ops->alloc && !ops->setup)
3786 		return -EOPNOTSUPP;
3787 
3788 	if (tb[IFLA_IFNAME]) {
3789 		nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3790 	} else {
3791 		snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3792 		name_assign_type = NET_NAME_ENUM;
3793 	}
3794 
3795 	dev = rtnl_create_link(tgt_net, ifname, name_assign_type, ops, tb,
3796 			       extack);
3797 	if (IS_ERR(dev)) {
3798 		err = PTR_ERR(dev);
3799 		goto out;
3800 	}
3801 
3802 	dev->ifindex = ifm->ifi_index;
3803 
3804 	if (ops->newlink)
3805 		err = ops->newlink(dev, &params, extack);
3806 	else
3807 		err = register_netdevice(dev);
3808 	if (err < 0) {
3809 		free_netdev(dev);
3810 		goto out;
3811 	}
3812 
3813 	err = rtnl_configure_link(dev, ifm, portid, nlh);
3814 	if (err < 0)
3815 		goto out_unregister;
3816 	if (tb[IFLA_MASTER]) {
3817 		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3818 		if (err)
3819 			goto out_unregister;
3820 	}
3821 out:
3822 	return err;
3823 out_unregister:
3824 	if (ops->newlink) {
3825 		LIST_HEAD(list_kill);
3826 
3827 		ops->dellink(dev, &list_kill);
3828 		unregister_netdevice_many(&list_kill);
3829 	} else {
3830 		unregister_netdevice(dev);
3831 	}
3832 	goto out;
3833 }
3834 
3835 static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops,
3836 				     struct nlattr *tbp[],
3837 				     struct nlattr *data[],
3838 				     struct netlink_ext_ack *extack)
3839 {
3840 	struct nlattr *tb[IFLA_MAX + 1];
3841 	int err;
3842 
3843 	if (!data || !data[ops->peer_type])
3844 		return rtnl_link_get_net_ifla(tbp);
3845 
3846 	err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack);
3847 	if (err < 0)
3848 		return ERR_PTR(err);
3849 
3850 	if (ops->validate) {
3851 		err = ops->validate(tb, NULL, extack);
3852 		if (err < 0)
3853 			return ERR_PTR(err);
3854 	}
3855 
3856 	return rtnl_link_get_net_ifla(tb);
3857 }
3858 
3859 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3860 			  const struct rtnl_link_ops *ops,
3861 			  struct net *tgt_net, struct net *link_net,
3862 			  struct net *peer_net,
3863 			  struct rtnl_newlink_tbs *tbs,
3864 			  struct nlattr **data,
3865 			  struct netlink_ext_ack *extack)
3866 {
3867 	struct nlattr ** const tb = tbs->tb;
3868 	struct net *net = sock_net(skb->sk);
3869 	struct net *device_net;
3870 	struct net_device *dev;
3871 	struct ifinfomsg *ifm;
3872 	bool link_specified;
3873 
3874 	/* When creating, lookup for existing device in target net namespace */
3875 	device_net = (nlh->nlmsg_flags & NLM_F_CREATE) &&
3876 		     (nlh->nlmsg_flags & NLM_F_EXCL) ?
3877 		     tgt_net : net;
3878 
3879 	ifm = nlmsg_data(nlh);
3880 	if (ifm->ifi_index > 0) {
3881 		link_specified = true;
3882 		dev = __dev_get_by_index(device_net, ifm->ifi_index);
3883 	} else if (ifm->ifi_index < 0) {
3884 		NL_SET_ERR_MSG(extack, "ifindex can't be negative");
3885 		return -EINVAL;
3886 	} else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3887 		link_specified = true;
3888 		dev = rtnl_dev_get(device_net, tb);
3889 	} else {
3890 		link_specified = false;
3891 		dev = NULL;
3892 	}
3893 
3894 	if (dev)
3895 		return rtnl_changelink(skb, nlh, ops, dev, tgt_net, tbs, data, extack);
3896 
3897 	if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3898 		/* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3899 		 * or it's for a group
3900 		*/
3901 		if (link_specified || !tb[IFLA_GROUP])
3902 			return -ENODEV;
3903 
3904 		return rtnl_group_changelink(skb, net, tgt_net,
3905 					     nla_get_u32(tb[IFLA_GROUP]),
3906 					     ifm, extack, tb);
3907 	}
3908 
3909 	if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3910 		return -EOPNOTSUPP;
3911 
3912 	if (!ops) {
3913 		NL_SET_ERR_MSG(extack, "Unknown device type");
3914 		return -EOPNOTSUPP;
3915 	}
3916 
3917 	return rtnl_newlink_create(skb, ifm, ops, tgt_net, link_net, peer_net, nlh,
3918 				   tb, data, extack);
3919 }
3920 
3921 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3922 			struct netlink_ext_ack *extack)
3923 {
3924 	struct net *tgt_net, *link_net = NULL, *peer_net = NULL;
3925 	struct nlattr **tb, **linkinfo, **data = NULL;
3926 	struct rtnl_link_ops *ops = NULL;
3927 	struct rtnl_newlink_tbs *tbs;
3928 	struct rtnl_nets rtnl_nets;
3929 	int ops_srcu_index;
3930 	int ret;
3931 
3932 	tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3933 	if (!tbs)
3934 		return -ENOMEM;
3935 
3936 	tb = tbs->tb;
3937 	ret = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), tb,
3938 				     IFLA_MAX, ifla_policy, extack);
3939 	if (ret < 0)
3940 		goto free;
3941 
3942 	ret = rtnl_ensure_unique_netns(tb, extack, false);
3943 	if (ret < 0)
3944 		goto free;
3945 
3946 	linkinfo = tbs->linkinfo;
3947 	if (tb[IFLA_LINKINFO]) {
3948 		ret = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3949 						  tb[IFLA_LINKINFO],
3950 						  ifla_info_policy, NULL);
3951 		if (ret < 0)
3952 			goto free;
3953 	} else {
3954 		memset(linkinfo, 0, sizeof(tbs->linkinfo));
3955 	}
3956 
3957 	if (linkinfo[IFLA_INFO_KIND]) {
3958 		char kind[MODULE_NAME_LEN];
3959 
3960 		nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3961 		ops = rtnl_link_ops_get(kind, &ops_srcu_index);
3962 #ifdef CONFIG_MODULES
3963 		if (!ops) {
3964 			request_module("rtnl-link-%s", kind);
3965 			ops = rtnl_link_ops_get(kind, &ops_srcu_index);
3966 		}
3967 #endif
3968 	}
3969 
3970 	rtnl_nets_init(&rtnl_nets);
3971 
3972 	if (ops) {
3973 		if (ops->maxtype > RTNL_MAX_TYPE) {
3974 			ret = -EINVAL;
3975 			goto put_ops;
3976 		}
3977 
3978 		if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3979 			ret = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3980 							  linkinfo[IFLA_INFO_DATA],
3981 							  ops->policy, extack);
3982 			if (ret < 0)
3983 				goto put_ops;
3984 
3985 			data = tbs->attr;
3986 		}
3987 
3988 		if (ops->validate) {
3989 			ret = ops->validate(tb, data, extack);
3990 			if (ret < 0)
3991 				goto put_ops;
3992 		}
3993 
3994 		if (ops->peer_type) {
3995 			peer_net = rtnl_get_peer_net(ops, tb, data, extack);
3996 			if (IS_ERR(peer_net)) {
3997 				ret = PTR_ERR(peer_net);
3998 				goto put_ops;
3999 			}
4000 			if (peer_net)
4001 				rtnl_nets_add(&rtnl_nets, peer_net);
4002 		}
4003 	}
4004 
4005 	tgt_net = rtnl_link_get_net_capable(skb, sock_net(skb->sk), tb, CAP_NET_ADMIN);
4006 	if (IS_ERR(tgt_net)) {
4007 		ret = PTR_ERR(tgt_net);
4008 		goto put_net;
4009 	}
4010 
4011 	rtnl_nets_add(&rtnl_nets, tgt_net);
4012 
4013 	if (tb[IFLA_LINK_NETNSID]) {
4014 		int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
4015 
4016 		link_net = get_net_ns_by_id(tgt_net, id);
4017 		if (!link_net) {
4018 			NL_SET_ERR_MSG(extack, "Unknown network namespace id");
4019 			ret =  -EINVAL;
4020 			goto put_net;
4021 		}
4022 
4023 		rtnl_nets_add(&rtnl_nets, link_net);
4024 
4025 		if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) {
4026 			ret = -EPERM;
4027 			goto put_net;
4028 		}
4029 	}
4030 
4031 	rtnl_nets_lock(&rtnl_nets);
4032 	ret = __rtnl_newlink(skb, nlh, ops, tgt_net, link_net, peer_net, tbs, data, extack);
4033 	rtnl_nets_unlock(&rtnl_nets);
4034 
4035 put_net:
4036 	rtnl_nets_destroy(&rtnl_nets);
4037 put_ops:
4038 	if (ops)
4039 		rtnl_link_ops_put(ops, ops_srcu_index);
4040 free:
4041 	kfree(tbs);
4042 	return ret;
4043 }
4044 
4045 static int rtnl_valid_getlink_req(struct sk_buff *skb,
4046 				  const struct nlmsghdr *nlh,
4047 				  struct nlattr **tb,
4048 				  struct netlink_ext_ack *extack)
4049 {
4050 	struct ifinfomsg *ifm;
4051 	int i, err;
4052 
4053 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4054 		NL_SET_ERR_MSG(extack, "Invalid header for get link");
4055 		return -EINVAL;
4056 	}
4057 
4058 	if (!netlink_strict_get_check(skb))
4059 		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
4060 					      ifla_policy, extack);
4061 
4062 	ifm = nlmsg_data(nlh);
4063 	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4064 	    ifm->ifi_change) {
4065 		NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
4066 		return -EINVAL;
4067 	}
4068 
4069 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
4070 					    ifla_policy, extack);
4071 	if (err)
4072 		return err;
4073 
4074 	for (i = 0; i <= IFLA_MAX; i++) {
4075 		if (!tb[i])
4076 			continue;
4077 
4078 		switch (i) {
4079 		case IFLA_IFNAME:
4080 		case IFLA_ALT_IFNAME:
4081 		case IFLA_EXT_MASK:
4082 		case IFLA_TARGET_NETNSID:
4083 			break;
4084 		default:
4085 			NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
4086 			return -EINVAL;
4087 		}
4088 	}
4089 
4090 	return 0;
4091 }
4092 
4093 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
4094 			struct netlink_ext_ack *extack)
4095 {
4096 	struct net *net = sock_net(skb->sk);
4097 	struct net *tgt_net = net;
4098 	struct ifinfomsg *ifm;
4099 	struct nlattr *tb[IFLA_MAX+1];
4100 	struct net_device *dev = NULL;
4101 	struct sk_buff *nskb;
4102 	int netnsid = -1;
4103 	int err;
4104 	u32 ext_filter_mask = 0;
4105 
4106 	err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
4107 	if (err < 0)
4108 		return err;
4109 
4110 	err = rtnl_ensure_unique_netns(tb, extack, true);
4111 	if (err < 0)
4112 		return err;
4113 
4114 	if (tb[IFLA_TARGET_NETNSID]) {
4115 		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
4116 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
4117 		if (IS_ERR(tgt_net))
4118 			return PTR_ERR(tgt_net);
4119 	}
4120 
4121 	if (tb[IFLA_EXT_MASK])
4122 		ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
4123 
4124 	err = -EINVAL;
4125 	ifm = nlmsg_data(nlh);
4126 	if (ifm->ifi_index > 0)
4127 		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
4128 	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
4129 		dev = rtnl_dev_get(tgt_net, tb);
4130 	else
4131 		goto out;
4132 
4133 	err = -ENODEV;
4134 	if (dev == NULL)
4135 		goto out;
4136 
4137 	err = -ENOBUFS;
4138 	nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask));
4139 	if (nskb == NULL)
4140 		goto out;
4141 
4142 	/* Synchronize the carrier state so we don't report a state
4143 	 * that we're not actually going to honour immediately; if
4144 	 * the driver just did a carrier off->on transition, we can
4145 	 * only TX if link watch work has run, but without this we'd
4146 	 * already report carrier on, even if it doesn't work yet.
4147 	 */
4148 	linkwatch_sync_dev(dev);
4149 
4150 	err = rtnl_fill_ifinfo(nskb, dev, net,
4151 			       RTM_NEWLINK, NETLINK_CB(skb).portid,
4152 			       nlh->nlmsg_seq, 0, 0, ext_filter_mask,
4153 			       0, NULL, 0, netnsid, GFP_KERNEL);
4154 	if (err < 0) {
4155 		/* -EMSGSIZE implies BUG in if_nlmsg_size */
4156 		WARN_ON(err == -EMSGSIZE);
4157 		kfree_skb(nskb);
4158 	} else
4159 		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
4160 out:
4161 	if (netnsid >= 0)
4162 		put_net(tgt_net);
4163 
4164 	return err;
4165 }
4166 
4167 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
4168 			   bool *changed, struct netlink_ext_ack *extack)
4169 {
4170 	char *alt_ifname;
4171 	size_t size;
4172 	int err;
4173 
4174 	err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
4175 	if (err)
4176 		return err;
4177 
4178 	if (cmd == RTM_NEWLINKPROP) {
4179 		size = rtnl_prop_list_size(dev);
4180 		size += nla_total_size(ALTIFNAMSIZ);
4181 		if (size >= U16_MAX) {
4182 			NL_SET_ERR_MSG(extack,
4183 				       "effective property list too long");
4184 			return -EINVAL;
4185 		}
4186 	}
4187 
4188 	alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
4189 	if (!alt_ifname)
4190 		return -ENOMEM;
4191 
4192 	if (cmd == RTM_NEWLINKPROP) {
4193 		err = netdev_name_node_alt_create(dev, alt_ifname);
4194 		if (!err)
4195 			alt_ifname = NULL;
4196 	} else if (cmd == RTM_DELLINKPROP) {
4197 		err = netdev_name_node_alt_destroy(dev, alt_ifname);
4198 	} else {
4199 		WARN_ON_ONCE(1);
4200 		err = -EINVAL;
4201 	}
4202 
4203 	kfree(alt_ifname);
4204 	if (!err)
4205 		*changed = true;
4206 	return err;
4207 }
4208 
4209 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
4210 			 struct netlink_ext_ack *extack)
4211 {
4212 	struct net *net = sock_net(skb->sk);
4213 	struct nlattr *tb[IFLA_MAX + 1];
4214 	struct net_device *dev;
4215 	struct ifinfomsg *ifm;
4216 	bool changed = false;
4217 	struct nlattr *attr;
4218 	int err, rem;
4219 
4220 	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
4221 	if (err)
4222 		return err;
4223 
4224 	err = rtnl_ensure_unique_netns(tb, extack, true);
4225 	if (err)
4226 		return err;
4227 
4228 	ifm = nlmsg_data(nlh);
4229 	if (ifm->ifi_index > 0)
4230 		dev = __dev_get_by_index(net, ifm->ifi_index);
4231 	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
4232 		dev = rtnl_dev_get(net, tb);
4233 	else
4234 		return -EINVAL;
4235 
4236 	if (!dev)
4237 		return -ENODEV;
4238 
4239 	if (!tb[IFLA_PROP_LIST])
4240 		return 0;
4241 
4242 	nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
4243 		switch (nla_type(attr)) {
4244 		case IFLA_ALT_IFNAME:
4245 			err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
4246 			if (err)
4247 				return err;
4248 			break;
4249 		}
4250 	}
4251 
4252 	if (changed)
4253 		netdev_state_change(dev);
4254 	return 0;
4255 }
4256 
4257 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
4258 			    struct netlink_ext_ack *extack)
4259 {
4260 	return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
4261 }
4262 
4263 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
4264 			    struct netlink_ext_ack *extack)
4265 {
4266 	return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
4267 }
4268 
4269 static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb,
4270 					  struct nlmsghdr *nlh)
4271 {
4272 	struct net *net = sock_net(skb->sk);
4273 	size_t min_ifinfo_dump_size = 0;
4274 	u32 ext_filter_mask = 0;
4275 	struct net_device *dev;
4276 	struct nlattr *nla;
4277 	int hdrlen, rem;
4278 
4279 	/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
4280 	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
4281 		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
4282 
4283 	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
4284 		return NLMSG_GOODSIZE;
4285 
4286 	nla_for_each_attr_type(nla, IFLA_EXT_MASK,
4287 			       nlmsg_attrdata(nlh, hdrlen),
4288 			       nlmsg_attrlen(nlh, hdrlen), rem) {
4289 		if (nla_len(nla) == sizeof(u32))
4290 			ext_filter_mask = nla_get_u32(nla);
4291 	}
4292 
4293 	if (!ext_filter_mask)
4294 		return NLMSG_GOODSIZE;
4295 	/*
4296 	 * traverse the list of net devices and compute the minimum
4297 	 * buffer size based upon the filter mask.
4298 	 */
4299 	rcu_read_lock();
4300 	for_each_netdev_rcu(net, dev) {
4301 		min_ifinfo_dump_size = max(min_ifinfo_dump_size,
4302 					   if_nlmsg_size(dev, ext_filter_mask));
4303 	}
4304 	rcu_read_unlock();
4305 
4306 	return nlmsg_total_size(min_ifinfo_dump_size);
4307 }
4308 
4309 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
4310 {
4311 	int idx;
4312 	int s_idx = cb->family;
4313 	int type = cb->nlh->nlmsg_type - RTM_BASE;
4314 	int ret = 0;
4315 
4316 	if (s_idx == 0)
4317 		s_idx = 1;
4318 
4319 	for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
4320 		struct rtnl_link __rcu **tab;
4321 		struct rtnl_link *link;
4322 		rtnl_dumpit_func dumpit;
4323 
4324 		if (idx < s_idx || idx == PF_PACKET)
4325 			continue;
4326 
4327 		if (type < 0 || type >= RTM_NR_MSGTYPES)
4328 			continue;
4329 
4330 		tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
4331 		if (!tab)
4332 			continue;
4333 
4334 		link = rcu_dereference_rtnl(tab[type]);
4335 		if (!link)
4336 			continue;
4337 
4338 		dumpit = link->dumpit;
4339 		if (!dumpit)
4340 			continue;
4341 
4342 		if (idx > s_idx) {
4343 			memset(&cb->args[0], 0, sizeof(cb->args));
4344 			cb->prev_seq = 0;
4345 			cb->seq = 0;
4346 		}
4347 		ret = dumpit(skb, cb);
4348 		if (ret)
4349 			break;
4350 	}
4351 	cb->family = idx;
4352 
4353 	return skb->len ? : ret;
4354 }
4355 
4356 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
4357 				       unsigned int change,
4358 				       u32 event, gfp_t flags, int *new_nsid,
4359 				       int new_ifindex, u32 portid,
4360 				       const struct nlmsghdr *nlh)
4361 {
4362 	struct net *net = dev_net(dev);
4363 	struct sk_buff *skb;
4364 	int err = -ENOBUFS;
4365 	u32 seq = 0;
4366 
4367 	skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
4368 	if (skb == NULL)
4369 		goto errout;
4370 
4371 	if (nlmsg_report(nlh))
4372 		seq = nlmsg_seq(nlh);
4373 	else
4374 		portid = 0;
4375 
4376 	err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
4377 			       type, portid, seq, change, 0, 0, event,
4378 			       new_nsid, new_ifindex, -1, flags);
4379 	if (err < 0) {
4380 		/* -EMSGSIZE implies BUG in if_nlmsg_size() */
4381 		WARN_ON(err == -EMSGSIZE);
4382 		kfree_skb(skb);
4383 		goto errout;
4384 	}
4385 	return skb;
4386 errout:
4387 	rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4388 	return NULL;
4389 }
4390 
4391 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
4392 		       u32 portid, const struct nlmsghdr *nlh)
4393 {
4394 	struct net *net = dev_net(dev);
4395 
4396 	rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
4397 }
4398 
4399 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
4400 			       unsigned int change, u32 event,
4401 			       gfp_t flags, int *new_nsid, int new_ifindex,
4402 			       u32 portid, const struct nlmsghdr *nlh)
4403 {
4404 	struct sk_buff *skb;
4405 
4406 	if (dev->reg_state != NETREG_REGISTERED)
4407 		return;
4408 
4409 	skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
4410 				     new_ifindex, portid, nlh);
4411 	if (skb)
4412 		rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
4413 }
4414 
4415 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
4416 		  gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
4417 {
4418 	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4419 			   NULL, 0, portid, nlh);
4420 }
4421 
4422 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4423 			 gfp_t flags, int *new_nsid, int new_ifindex)
4424 {
4425 	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4426 			   new_nsid, new_ifindex, 0, NULL);
4427 }
4428 
4429 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4430 				   struct net_device *dev,
4431 				   u8 *addr, u16 vid, u32 pid, u32 seq,
4432 				   int type, unsigned int flags,
4433 				   int nlflags, u16 ndm_state)
4434 {
4435 	struct nlmsghdr *nlh;
4436 	struct ndmsg *ndm;
4437 
4438 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4439 	if (!nlh)
4440 		return -EMSGSIZE;
4441 
4442 	ndm = nlmsg_data(nlh);
4443 	ndm->ndm_family  = AF_BRIDGE;
4444 	ndm->ndm_pad1	 = 0;
4445 	ndm->ndm_pad2    = 0;
4446 	ndm->ndm_flags	 = flags;
4447 	ndm->ndm_type	 = 0;
4448 	ndm->ndm_ifindex = dev->ifindex;
4449 	ndm->ndm_state   = ndm_state;
4450 
4451 	if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
4452 		goto nla_put_failure;
4453 	if (vid)
4454 		if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4455 			goto nla_put_failure;
4456 
4457 	nlmsg_end(skb, nlh);
4458 	return 0;
4459 
4460 nla_put_failure:
4461 	nlmsg_cancel(skb, nlh);
4462 	return -EMSGSIZE;
4463 }
4464 
4465 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
4466 {
4467 	return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4468 	       nla_total_size(dev->addr_len) +	/* NDA_LLADDR */
4469 	       nla_total_size(sizeof(u16)) +	/* NDA_VLAN */
4470 	       0;
4471 }
4472 
4473 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4474 			    u16 ndm_state)
4475 {
4476 	struct net *net = dev_net(dev);
4477 	struct sk_buff *skb;
4478 	int err = -ENOBUFS;
4479 
4480 	skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
4481 	if (!skb)
4482 		goto errout;
4483 
4484 	err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4485 				      0, 0, type, NTF_SELF, 0, ndm_state);
4486 	if (err < 0) {
4487 		kfree_skb(skb);
4488 		goto errout;
4489 	}
4490 
4491 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4492 	return;
4493 errout:
4494 	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4495 }
4496 
4497 /*
4498  * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4499  */
4500 int ndo_dflt_fdb_add(struct ndmsg *ndm,
4501 		     struct nlattr *tb[],
4502 		     struct net_device *dev,
4503 		     const unsigned char *addr, u16 vid,
4504 		     u16 flags)
4505 {
4506 	int err = -EINVAL;
4507 
4508 	/* If aging addresses are supported device will need to
4509 	 * implement its own handler for this.
4510 	 */
4511 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4512 		netdev_info(dev, "default FDB implementation only supports local addresses\n");
4513 		return err;
4514 	}
4515 
4516 	if (tb[NDA_FLAGS_EXT]) {
4517 		netdev_info(dev, "invalid flags given to default FDB implementation\n");
4518 		return err;
4519 	}
4520 
4521 	if (vid) {
4522 		netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4523 		return err;
4524 	}
4525 
4526 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4527 		err = dev_uc_add_excl(dev, addr);
4528 	else if (is_multicast_ether_addr(addr))
4529 		err = dev_mc_add_excl(dev, addr);
4530 
4531 	/* Only return duplicate errors if NLM_F_EXCL is set */
4532 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
4533 		err = 0;
4534 
4535 	return err;
4536 }
4537 EXPORT_SYMBOL(ndo_dflt_fdb_add);
4538 
4539 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4540 			 struct netlink_ext_ack *extack)
4541 {
4542 	u16 vid = 0;
4543 
4544 	if (vlan_attr) {
4545 		if (nla_len(vlan_attr) != sizeof(u16)) {
4546 			NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4547 			return -EINVAL;
4548 		}
4549 
4550 		vid = nla_get_u16(vlan_attr);
4551 
4552 		if (!vid || vid >= VLAN_VID_MASK) {
4553 			NL_SET_ERR_MSG(extack, "invalid vlan id");
4554 			return -EINVAL;
4555 		}
4556 	}
4557 	*p_vid = vid;
4558 	return 0;
4559 }
4560 
4561 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4562 			struct netlink_ext_ack *extack)
4563 {
4564 	struct net *net = sock_net(skb->sk);
4565 	struct ndmsg *ndm;
4566 	struct nlattr *tb[NDA_MAX+1];
4567 	struct net_device *dev;
4568 	u8 *addr;
4569 	u16 vid;
4570 	int err;
4571 
4572 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4573 				     extack);
4574 	if (err < 0)
4575 		return err;
4576 
4577 	ndm = nlmsg_data(nlh);
4578 	if (ndm->ndm_ifindex == 0) {
4579 		NL_SET_ERR_MSG(extack, "invalid ifindex");
4580 		return -EINVAL;
4581 	}
4582 
4583 	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4584 	if (dev == NULL) {
4585 		NL_SET_ERR_MSG(extack, "unknown ifindex");
4586 		return -ENODEV;
4587 	}
4588 
4589 	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4590 		NL_SET_ERR_MSG(extack, "invalid address");
4591 		return -EINVAL;
4592 	}
4593 
4594 	if (dev->type != ARPHRD_ETHER) {
4595 		NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4596 		return -EINVAL;
4597 	}
4598 
4599 	addr = nla_data(tb[NDA_LLADDR]);
4600 
4601 	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4602 	if (err)
4603 		return err;
4604 
4605 	err = -EOPNOTSUPP;
4606 
4607 	/* Support fdb on master device the net/bridge default case */
4608 	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4609 	    netif_is_bridge_port(dev)) {
4610 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4611 		const struct net_device_ops *ops = br_dev->netdev_ops;
4612 		bool notified = false;
4613 
4614 		err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4615 				       nlh->nlmsg_flags, &notified, extack);
4616 		if (err)
4617 			goto out;
4618 		else
4619 			ndm->ndm_flags &= ~NTF_MASTER;
4620 	}
4621 
4622 	/* Embedded bridge, macvlan, and any other device support */
4623 	if ((ndm->ndm_flags & NTF_SELF)) {
4624 		bool notified = false;
4625 
4626 		if (dev->netdev_ops->ndo_fdb_add)
4627 			err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4628 							   vid,
4629 							   nlh->nlmsg_flags,
4630 							   &notified, extack);
4631 		else
4632 			err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4633 					       nlh->nlmsg_flags);
4634 
4635 		if (!err && !notified) {
4636 			rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4637 					ndm->ndm_state);
4638 			ndm->ndm_flags &= ~NTF_SELF;
4639 		}
4640 	}
4641 out:
4642 	return err;
4643 }
4644 
4645 /*
4646  * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4647  */
4648 int ndo_dflt_fdb_del(struct ndmsg *ndm,
4649 		     struct nlattr *tb[],
4650 		     struct net_device *dev,
4651 		     const unsigned char *addr, u16 vid)
4652 {
4653 	int err = -EINVAL;
4654 
4655 	/* If aging addresses are supported device will need to
4656 	 * implement its own handler for this.
4657 	 */
4658 	if (!(ndm->ndm_state & NUD_PERMANENT)) {
4659 		netdev_info(dev, "default FDB implementation only supports local addresses\n");
4660 		return err;
4661 	}
4662 
4663 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4664 		err = dev_uc_del(dev, addr);
4665 	else if (is_multicast_ether_addr(addr))
4666 		err = dev_mc_del(dev, addr);
4667 
4668 	return err;
4669 }
4670 EXPORT_SYMBOL(ndo_dflt_fdb_del);
4671 
4672 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4673 			struct netlink_ext_ack *extack)
4674 {
4675 	bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4676 	struct net *net = sock_net(skb->sk);
4677 	const struct net_device_ops *ops;
4678 	struct ndmsg *ndm;
4679 	struct nlattr *tb[NDA_MAX+1];
4680 	struct net_device *dev;
4681 	__u8 *addr = NULL;
4682 	int err;
4683 	u16 vid;
4684 
4685 	if (!netlink_capable(skb, CAP_NET_ADMIN))
4686 		return -EPERM;
4687 
4688 	if (!del_bulk) {
4689 		err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4690 					     NULL, extack);
4691 	} else {
4692 		/* For bulk delete, the drivers will parse the message with
4693 		 * policy.
4694 		 */
4695 		err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
4696 	}
4697 	if (err < 0)
4698 		return err;
4699 
4700 	ndm = nlmsg_data(nlh);
4701 	if (ndm->ndm_ifindex == 0) {
4702 		NL_SET_ERR_MSG(extack, "invalid ifindex");
4703 		return -EINVAL;
4704 	}
4705 
4706 	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4707 	if (dev == NULL) {
4708 		NL_SET_ERR_MSG(extack, "unknown ifindex");
4709 		return -ENODEV;
4710 	}
4711 
4712 	if (!del_bulk) {
4713 		if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4714 			NL_SET_ERR_MSG(extack, "invalid address");
4715 			return -EINVAL;
4716 		}
4717 		addr = nla_data(tb[NDA_LLADDR]);
4718 
4719 		err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4720 		if (err)
4721 			return err;
4722 	}
4723 
4724 	if (dev->type != ARPHRD_ETHER) {
4725 		NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4726 		return -EINVAL;
4727 	}
4728 
4729 	err = -EOPNOTSUPP;
4730 
4731 	/* Support fdb on master device the net/bridge default case */
4732 	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4733 	    netif_is_bridge_port(dev)) {
4734 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4735 		bool notified = false;
4736 
4737 		ops = br_dev->netdev_ops;
4738 		if (!del_bulk) {
4739 			if (ops->ndo_fdb_del)
4740 				err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid,
4741 						       &notified, extack);
4742 		} else {
4743 			if (ops->ndo_fdb_del_bulk)
4744 				err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
4745 		}
4746 
4747 		if (err)
4748 			goto out;
4749 		else
4750 			ndm->ndm_flags &= ~NTF_MASTER;
4751 	}
4752 
4753 	/* Embedded bridge, macvlan, and any other device support */
4754 	if (ndm->ndm_flags & NTF_SELF) {
4755 		bool notified = false;
4756 
4757 		ops = dev->netdev_ops;
4758 		if (!del_bulk) {
4759 			if (ops->ndo_fdb_del)
4760 				err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid,
4761 						       &notified, extack);
4762 			else
4763 				err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4764 		} else {
4765 			/* in case err was cleared by NTF_MASTER call */
4766 			err = -EOPNOTSUPP;
4767 			if (ops->ndo_fdb_del_bulk)
4768 				err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
4769 		}
4770 
4771 		if (!err) {
4772 			if (!del_bulk && !notified)
4773 				rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4774 						ndm->ndm_state);
4775 			ndm->ndm_flags &= ~NTF_SELF;
4776 		}
4777 	}
4778 out:
4779 	return err;
4780 }
4781 
4782 static int nlmsg_populate_fdb(struct sk_buff *skb,
4783 			      struct netlink_callback *cb,
4784 			      struct net_device *dev,
4785 			      int *idx,
4786 			      struct netdev_hw_addr_list *list)
4787 {
4788 	struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
4789 	struct netdev_hw_addr *ha;
4790 	u32 portid, seq;
4791 	int err;
4792 
4793 	portid = NETLINK_CB(cb->skb).portid;
4794 	seq = cb->nlh->nlmsg_seq;
4795 
4796 	list_for_each_entry(ha, &list->list, list) {
4797 		if (*idx < ctx->fdb_idx)
4798 			goto skip;
4799 
4800 		err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4801 					      portid, seq,
4802 					      RTM_NEWNEIGH, NTF_SELF,
4803 					      NLM_F_MULTI, NUD_PERMANENT);
4804 		if (err < 0)
4805 			return err;
4806 skip:
4807 		*idx += 1;
4808 	}
4809 	return 0;
4810 }
4811 
4812 /**
4813  * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4814  * @skb: socket buffer to store message in
4815  * @cb: netlink callback
4816  * @dev: netdevice
4817  * @filter_dev: ignored
4818  * @idx: the number of FDB table entries dumped is added to *@idx
4819  *
4820  * Default netdevice operation to dump the existing unicast address list.
4821  * Returns number of addresses from list put in skb.
4822  */
4823 int ndo_dflt_fdb_dump(struct sk_buff *skb,
4824 		      struct netlink_callback *cb,
4825 		      struct net_device *dev,
4826 		      struct net_device *filter_dev,
4827 		      int *idx)
4828 {
4829 	int err;
4830 
4831 	if (dev->type != ARPHRD_ETHER)
4832 		return -EINVAL;
4833 
4834 	netif_addr_lock_bh(dev);
4835 	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4836 	if (err)
4837 		goto out;
4838 	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4839 out:
4840 	netif_addr_unlock_bh(dev);
4841 	return err;
4842 }
4843 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4844 
4845 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4846 				 int *br_idx, int *brport_idx,
4847 				 struct netlink_ext_ack *extack)
4848 {
4849 	struct nlattr *tb[NDA_MAX + 1];
4850 	struct ndmsg *ndm;
4851 	int err, i;
4852 
4853 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4854 		NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4855 		return -EINVAL;
4856 	}
4857 
4858 	ndm = nlmsg_data(nlh);
4859 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
4860 	    ndm->ndm_flags || ndm->ndm_type) {
4861 		NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4862 		return -EINVAL;
4863 	}
4864 
4865 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4866 					    NDA_MAX, NULL, extack);
4867 	if (err < 0)
4868 		return err;
4869 
4870 	*brport_idx = ndm->ndm_ifindex;
4871 	for (i = 0; i <= NDA_MAX; ++i) {
4872 		if (!tb[i])
4873 			continue;
4874 
4875 		switch (i) {
4876 		case NDA_IFINDEX:
4877 			if (nla_len(tb[i]) != sizeof(u32)) {
4878 				NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4879 				return -EINVAL;
4880 			}
4881 			*brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4882 			break;
4883 		case NDA_MASTER:
4884 			if (nla_len(tb[i]) != sizeof(u32)) {
4885 				NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4886 				return -EINVAL;
4887 			}
4888 			*br_idx = nla_get_u32(tb[NDA_MASTER]);
4889 			break;
4890 		default:
4891 			NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4892 			return -EINVAL;
4893 		}
4894 	}
4895 
4896 	return 0;
4897 }
4898 
4899 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4900 				 int *br_idx, int *brport_idx,
4901 				 struct netlink_ext_ack *extack)
4902 {
4903 	struct nlattr *tb[IFLA_MAX+1];
4904 	int err;
4905 
4906 	/* A hack to preserve kernel<->userspace interface.
4907 	 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4908 	 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4909 	 * So, check for ndmsg with an optional u32 attribute (not used here).
4910 	 * Fortunately these sizes don't conflict with the size of ifinfomsg
4911 	 * with an optional attribute.
4912 	 */
4913 	if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4914 	    (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4915 	     nla_attr_size(sizeof(u32)))) {
4916 		struct ifinfomsg *ifm;
4917 
4918 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4919 					     tb, IFLA_MAX, ifla_policy,
4920 					     extack);
4921 		if (err < 0) {
4922 			return -EINVAL;
4923 		} else if (err == 0) {
4924 			if (tb[IFLA_MASTER])
4925 				*br_idx = nla_get_u32(tb[IFLA_MASTER]);
4926 		}
4927 
4928 		ifm = nlmsg_data(nlh);
4929 		*brport_idx = ifm->ifi_index;
4930 	}
4931 	return 0;
4932 }
4933 
4934 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4935 {
4936 	const struct net_device_ops *ops = NULL, *cops = NULL;
4937 	struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
4938 	struct net_device *dev, *br_dev = NULL;
4939 	struct net *net = sock_net(skb->sk);
4940 	int brport_idx = 0;
4941 	int br_idx = 0;
4942 	int fidx = 0;
4943 	int err;
4944 
4945 	NL_ASSERT_CTX_FITS(struct ndo_fdb_dump_context);
4946 
4947 	if (cb->strict_check)
4948 		err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4949 					    cb->extack);
4950 	else
4951 		err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4952 					    cb->extack);
4953 	if (err < 0)
4954 		return err;
4955 
4956 	if (br_idx) {
4957 		br_dev = __dev_get_by_index(net, br_idx);
4958 		if (!br_dev)
4959 			return -ENODEV;
4960 
4961 		ops = br_dev->netdev_ops;
4962 	}
4963 
4964 	for_each_netdev_dump(net, dev, ctx->ifindex) {
4965 		if (brport_idx && (dev->ifindex != brport_idx))
4966 			continue;
4967 
4968 		if (!br_idx) { /* user did not specify a specific bridge */
4969 			if (netif_is_bridge_port(dev)) {
4970 				br_dev = netdev_master_upper_dev_get(dev);
4971 				cops = br_dev->netdev_ops;
4972 			}
4973 		} else {
4974 			if (dev != br_dev &&
4975 			    !netif_is_bridge_port(dev))
4976 				continue;
4977 
4978 			if (br_dev != netdev_master_upper_dev_get(dev) &&
4979 			    !netif_is_bridge_master(dev))
4980 				continue;
4981 			cops = ops;
4982 		}
4983 
4984 		if (netif_is_bridge_port(dev)) {
4985 			if (cops && cops->ndo_fdb_dump) {
4986 				err = cops->ndo_fdb_dump(skb, cb, br_dev, dev,
4987 							&fidx);
4988 				if (err == -EMSGSIZE)
4989 					break;
4990 			}
4991 		}
4992 
4993 		if (dev->netdev_ops->ndo_fdb_dump)
4994 			err = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
4995 							    &fidx);
4996 		else
4997 			err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, &fidx);
4998 		if (err == -EMSGSIZE)
4999 			break;
5000 
5001 		cops = NULL;
5002 
5003 		/* reset fdb offset to 0 for rest of the interfaces */
5004 		ctx->fdb_idx = 0;
5005 		fidx = 0;
5006 	}
5007 
5008 	ctx->fdb_idx = fidx;
5009 
5010 	return skb->len;
5011 }
5012 
5013 static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
5014 				struct nlattr **tb, u8 *ndm_flags,
5015 				int *br_idx, int *brport_idx, u8 **addr,
5016 				u16 *vid, struct netlink_ext_ack *extack)
5017 {
5018 	struct ndmsg *ndm;
5019 	int err, i;
5020 
5021 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
5022 		NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
5023 		return -EINVAL;
5024 	}
5025 
5026 	ndm = nlmsg_data(nlh);
5027 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
5028 	    ndm->ndm_type) {
5029 		NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
5030 		return -EINVAL;
5031 	}
5032 
5033 	if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
5034 		NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
5035 		return -EINVAL;
5036 	}
5037 
5038 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
5039 					    NDA_MAX, nda_policy, extack);
5040 	if (err < 0)
5041 		return err;
5042 
5043 	*ndm_flags = ndm->ndm_flags;
5044 	*brport_idx = ndm->ndm_ifindex;
5045 	for (i = 0; i <= NDA_MAX; ++i) {
5046 		if (!tb[i])
5047 			continue;
5048 
5049 		switch (i) {
5050 		case NDA_MASTER:
5051 			*br_idx = nla_get_u32(tb[i]);
5052 			break;
5053 		case NDA_LLADDR:
5054 			if (nla_len(tb[i]) != ETH_ALEN) {
5055 				NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
5056 				return -EINVAL;
5057 			}
5058 			*addr = nla_data(tb[i]);
5059 			break;
5060 		case NDA_VLAN:
5061 			err = fdb_vid_parse(tb[i], vid, extack);
5062 			if (err)
5063 				return err;
5064 			break;
5065 		case NDA_VNI:
5066 			break;
5067 		default:
5068 			NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
5069 			return -EINVAL;
5070 		}
5071 	}
5072 
5073 	return 0;
5074 }
5075 
5076 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5077 			struct netlink_ext_ack *extack)
5078 {
5079 	struct net_device *dev = NULL, *br_dev = NULL;
5080 	const struct net_device_ops *ops = NULL;
5081 	struct net *net = sock_net(in_skb->sk);
5082 	struct nlattr *tb[NDA_MAX + 1];
5083 	struct sk_buff *skb;
5084 	int brport_idx = 0;
5085 	u8 ndm_flags = 0;
5086 	int br_idx = 0;
5087 	u8 *addr = NULL;
5088 	u16 vid = 0;
5089 	int err;
5090 
5091 	err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
5092 				   &brport_idx, &addr, &vid, extack);
5093 	if (err < 0)
5094 		return err;
5095 
5096 	if (!addr) {
5097 		NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
5098 		return -EINVAL;
5099 	}
5100 
5101 	if (brport_idx) {
5102 		dev = __dev_get_by_index(net, brport_idx);
5103 		if (!dev) {
5104 			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
5105 			return -ENODEV;
5106 		}
5107 	}
5108 
5109 	if (br_idx) {
5110 		if (dev) {
5111 			NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
5112 			return -EINVAL;
5113 		}
5114 
5115 		br_dev = __dev_get_by_index(net, br_idx);
5116 		if (!br_dev) {
5117 			NL_SET_ERR_MSG(extack, "Invalid master ifindex");
5118 			return -EINVAL;
5119 		}
5120 		ops = br_dev->netdev_ops;
5121 	}
5122 
5123 	if (dev) {
5124 		if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
5125 			if (!netif_is_bridge_port(dev)) {
5126 				NL_SET_ERR_MSG(extack, "Device is not a bridge port");
5127 				return -EINVAL;
5128 			}
5129 			br_dev = netdev_master_upper_dev_get(dev);
5130 			if (!br_dev) {
5131 				NL_SET_ERR_MSG(extack, "Master of device not found");
5132 				return -EINVAL;
5133 			}
5134 			ops = br_dev->netdev_ops;
5135 		} else {
5136 			if (!(ndm_flags & NTF_SELF)) {
5137 				NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
5138 				return -EINVAL;
5139 			}
5140 			ops = dev->netdev_ops;
5141 		}
5142 	}
5143 
5144 	if (!br_dev && !dev) {
5145 		NL_SET_ERR_MSG(extack, "No device specified");
5146 		return -ENODEV;
5147 	}
5148 
5149 	if (!ops || !ops->ndo_fdb_get) {
5150 		NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
5151 		return -EOPNOTSUPP;
5152 	}
5153 
5154 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
5155 	if (!skb)
5156 		return -ENOBUFS;
5157 
5158 	if (br_dev)
5159 		dev = br_dev;
5160 	err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
5161 			       NETLINK_CB(in_skb).portid,
5162 			       nlh->nlmsg_seq, extack);
5163 	if (err)
5164 		goto out;
5165 
5166 	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5167 out:
5168 	kfree_skb(skb);
5169 	return err;
5170 }
5171 
5172 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
5173 			       unsigned int attrnum, unsigned int flag)
5174 {
5175 	if (mask & flag)
5176 		return nla_put_u8(skb, attrnum, !!(flags & flag));
5177 	return 0;
5178 }
5179 
5180 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5181 			    struct net_device *dev, u16 mode,
5182 			    u32 flags, u32 mask, int nlflags,
5183 			    u32 filter_mask,
5184 			    int (*vlan_fill)(struct sk_buff *skb,
5185 					     struct net_device *dev,
5186 					     u32 filter_mask))
5187 {
5188 	struct nlmsghdr *nlh;
5189 	struct ifinfomsg *ifm;
5190 	struct nlattr *br_afspec;
5191 	struct nlattr *protinfo;
5192 	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
5193 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5194 	int err = 0;
5195 
5196 	nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
5197 	if (nlh == NULL)
5198 		return -EMSGSIZE;
5199 
5200 	ifm = nlmsg_data(nlh);
5201 	ifm->ifi_family = AF_BRIDGE;
5202 	ifm->__ifi_pad = 0;
5203 	ifm->ifi_type = dev->type;
5204 	ifm->ifi_index = dev->ifindex;
5205 	ifm->ifi_flags = dev_get_flags(dev);
5206 	ifm->ifi_change = 0;
5207 
5208 
5209 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5210 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5211 	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
5212 	    (br_dev &&
5213 	     nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
5214 	    (dev->addr_len &&
5215 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5216 	    (dev->ifindex != dev_get_iflink(dev) &&
5217 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
5218 		goto nla_put_failure;
5219 
5220 	br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
5221 	if (!br_afspec)
5222 		goto nla_put_failure;
5223 
5224 	if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
5225 		nla_nest_cancel(skb, br_afspec);
5226 		goto nla_put_failure;
5227 	}
5228 
5229 	if (mode != BRIDGE_MODE_UNDEF) {
5230 		if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
5231 			nla_nest_cancel(skb, br_afspec);
5232 			goto nla_put_failure;
5233 		}
5234 	}
5235 	if (vlan_fill) {
5236 		err = vlan_fill(skb, dev, filter_mask);
5237 		if (err) {
5238 			nla_nest_cancel(skb, br_afspec);
5239 			goto nla_put_failure;
5240 		}
5241 	}
5242 	nla_nest_end(skb, br_afspec);
5243 
5244 	protinfo = nla_nest_start(skb, IFLA_PROTINFO);
5245 	if (!protinfo)
5246 		goto nla_put_failure;
5247 
5248 	if (brport_nla_put_flag(skb, flags, mask,
5249 				IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
5250 	    brport_nla_put_flag(skb, flags, mask,
5251 				IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
5252 	    brport_nla_put_flag(skb, flags, mask,
5253 				IFLA_BRPORT_FAST_LEAVE,
5254 				BR_MULTICAST_FAST_LEAVE) ||
5255 	    brport_nla_put_flag(skb, flags, mask,
5256 				IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
5257 	    brport_nla_put_flag(skb, flags, mask,
5258 				IFLA_BRPORT_LEARNING, BR_LEARNING) ||
5259 	    brport_nla_put_flag(skb, flags, mask,
5260 				IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
5261 	    brport_nla_put_flag(skb, flags, mask,
5262 				IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
5263 	    brport_nla_put_flag(skb, flags, mask,
5264 				IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
5265 	    brport_nla_put_flag(skb, flags, mask,
5266 				IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
5267 	    brport_nla_put_flag(skb, flags, mask,
5268 				IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
5269 		nla_nest_cancel(skb, protinfo);
5270 		goto nla_put_failure;
5271 	}
5272 
5273 	nla_nest_end(skb, protinfo);
5274 
5275 	nlmsg_end(skb, nlh);
5276 	return 0;
5277 nla_put_failure:
5278 	nlmsg_cancel(skb, nlh);
5279 	return err ? err : -EMSGSIZE;
5280 }
5281 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
5282 
5283 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
5284 				    bool strict_check, u32 *filter_mask,
5285 				    struct netlink_ext_ack *extack)
5286 {
5287 	struct nlattr *tb[IFLA_MAX+1];
5288 	int err, i;
5289 
5290 	if (strict_check) {
5291 		struct ifinfomsg *ifm;
5292 
5293 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5294 			NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
5295 			return -EINVAL;
5296 		}
5297 
5298 		ifm = nlmsg_data(nlh);
5299 		if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5300 		    ifm->ifi_change || ifm->ifi_index) {
5301 			NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
5302 			return -EINVAL;
5303 		}
5304 
5305 		err = nlmsg_parse_deprecated_strict(nlh,
5306 						    sizeof(struct ifinfomsg),
5307 						    tb, IFLA_MAX, ifla_policy,
5308 						    extack);
5309 	} else {
5310 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
5311 					     tb, IFLA_MAX, ifla_policy,
5312 					     extack);
5313 	}
5314 	if (err < 0)
5315 		return err;
5316 
5317 	/* new attributes should only be added with strict checking */
5318 	for (i = 0; i <= IFLA_MAX; ++i) {
5319 		if (!tb[i])
5320 			continue;
5321 
5322 		switch (i) {
5323 		case IFLA_EXT_MASK:
5324 			*filter_mask = nla_get_u32(tb[i]);
5325 			break;
5326 		default:
5327 			if (strict_check) {
5328 				NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
5329 				return -EINVAL;
5330 			}
5331 		}
5332 	}
5333 
5334 	return 0;
5335 }
5336 
5337 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
5338 {
5339 	const struct nlmsghdr *nlh = cb->nlh;
5340 	struct net *net = sock_net(skb->sk);
5341 	struct net_device *dev;
5342 	int idx = 0;
5343 	u32 portid = NETLINK_CB(cb->skb).portid;
5344 	u32 seq = nlh->nlmsg_seq;
5345 	u32 filter_mask = 0;
5346 	int err;
5347 
5348 	err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
5349 				       cb->extack);
5350 	if (err < 0 && cb->strict_check)
5351 		return err;
5352 
5353 	rcu_read_lock();
5354 	for_each_netdev_rcu(net, dev) {
5355 		const struct net_device_ops *ops = dev->netdev_ops;
5356 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5357 
5358 		if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
5359 			if (idx >= cb->args[0]) {
5360 				err = br_dev->netdev_ops->ndo_bridge_getlink(
5361 						skb, portid, seq, dev,
5362 						filter_mask, NLM_F_MULTI);
5363 				if (err < 0 && err != -EOPNOTSUPP) {
5364 					if (likely(skb->len))
5365 						break;
5366 
5367 					goto out_err;
5368 				}
5369 			}
5370 			idx++;
5371 		}
5372 
5373 		if (ops->ndo_bridge_getlink) {
5374 			if (idx >= cb->args[0]) {
5375 				err = ops->ndo_bridge_getlink(skb, portid,
5376 							      seq, dev,
5377 							      filter_mask,
5378 							      NLM_F_MULTI);
5379 				if (err < 0 && err != -EOPNOTSUPP) {
5380 					if (likely(skb->len))
5381 						break;
5382 
5383 					goto out_err;
5384 				}
5385 			}
5386 			idx++;
5387 		}
5388 	}
5389 	err = skb->len;
5390 out_err:
5391 	rcu_read_unlock();
5392 	cb->args[0] = idx;
5393 
5394 	return err;
5395 }
5396 
5397 static inline size_t bridge_nlmsg_size(void)
5398 {
5399 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5400 		+ nla_total_size(IFNAMSIZ)	/* IFLA_IFNAME */
5401 		+ nla_total_size(MAX_ADDR_LEN)	/* IFLA_ADDRESS */
5402 		+ nla_total_size(sizeof(u32))	/* IFLA_MASTER */
5403 		+ nla_total_size(sizeof(u32))	/* IFLA_MTU */
5404 		+ nla_total_size(sizeof(u32))	/* IFLA_LINK */
5405 		+ nla_total_size(sizeof(u32))	/* IFLA_OPERSTATE */
5406 		+ nla_total_size(sizeof(u8))	/* IFLA_PROTINFO */
5407 		+ nla_total_size(sizeof(struct nlattr))	/* IFLA_AF_SPEC */
5408 		+ nla_total_size(sizeof(u16))	/* IFLA_BRIDGE_FLAGS */
5409 		+ nla_total_size(sizeof(u16));	/* IFLA_BRIDGE_MODE */
5410 }
5411 
5412 static int rtnl_bridge_notify(struct net_device *dev)
5413 {
5414 	struct net *net = dev_net(dev);
5415 	struct sk_buff *skb;
5416 	int err = -EOPNOTSUPP;
5417 
5418 	if (!dev->netdev_ops->ndo_bridge_getlink)
5419 		return 0;
5420 
5421 	skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5422 	if (!skb) {
5423 		err = -ENOMEM;
5424 		goto errout;
5425 	}
5426 
5427 	err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5428 	if (err < 0)
5429 		goto errout;
5430 
5431 	/* Notification info is only filled for bridge ports, not the bridge
5432 	 * device itself. Therefore, a zero notification length is valid and
5433 	 * should not result in an error.
5434 	 */
5435 	if (!skb->len)
5436 		goto errout;
5437 
5438 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5439 	return 0;
5440 errout:
5441 	WARN_ON(err == -EMSGSIZE);
5442 	kfree_skb(skb);
5443 	if (err)
5444 		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5445 	return err;
5446 }
5447 
5448 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5449 			       struct netlink_ext_ack *extack)
5450 {
5451 	struct net *net = sock_net(skb->sk);
5452 	struct ifinfomsg *ifm;
5453 	struct net_device *dev;
5454 	struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
5455 	int rem, err = -EOPNOTSUPP;
5456 	u16 flags = 0;
5457 
5458 	if (nlmsg_len(nlh) < sizeof(*ifm))
5459 		return -EINVAL;
5460 
5461 	ifm = nlmsg_data(nlh);
5462 	if (ifm->ifi_family != AF_BRIDGE)
5463 		return -EPFNOSUPPORT;
5464 
5465 	dev = __dev_get_by_index(net, ifm->ifi_index);
5466 	if (!dev) {
5467 		NL_SET_ERR_MSG(extack, "unknown ifindex");
5468 		return -ENODEV;
5469 	}
5470 
5471 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5472 	if (br_spec) {
5473 		nla_for_each_nested(attr, br_spec, rem) {
5474 			if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
5475 				if (nla_len(attr) < sizeof(flags))
5476 					return -EINVAL;
5477 
5478 				br_flags_attr = attr;
5479 				flags = nla_get_u16(attr);
5480 			}
5481 
5482 			if (nla_type(attr) == IFLA_BRIDGE_MODE) {
5483 				if (nla_len(attr) < sizeof(u16))
5484 					return -EINVAL;
5485 			}
5486 		}
5487 	}
5488 
5489 	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5490 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5491 
5492 		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5493 			err = -EOPNOTSUPP;
5494 			goto out;
5495 		}
5496 
5497 		err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5498 							     extack);
5499 		if (err)
5500 			goto out;
5501 
5502 		flags &= ~BRIDGE_FLAGS_MASTER;
5503 	}
5504 
5505 	if ((flags & BRIDGE_FLAGS_SELF)) {
5506 		if (!dev->netdev_ops->ndo_bridge_setlink)
5507 			err = -EOPNOTSUPP;
5508 		else
5509 			err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5510 								  flags,
5511 								  extack);
5512 		if (!err) {
5513 			flags &= ~BRIDGE_FLAGS_SELF;
5514 
5515 			/* Generate event to notify upper layer of bridge
5516 			 * change
5517 			 */
5518 			err = rtnl_bridge_notify(dev);
5519 		}
5520 	}
5521 
5522 	if (br_flags_attr)
5523 		memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
5524 out:
5525 	return err;
5526 }
5527 
5528 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5529 			       struct netlink_ext_ack *extack)
5530 {
5531 	struct net *net = sock_net(skb->sk);
5532 	struct ifinfomsg *ifm;
5533 	struct net_device *dev;
5534 	struct nlattr *br_spec, *attr = NULL;
5535 	int rem, err = -EOPNOTSUPP;
5536 	u16 flags = 0;
5537 	bool have_flags = false;
5538 
5539 	if (nlmsg_len(nlh) < sizeof(*ifm))
5540 		return -EINVAL;
5541 
5542 	ifm = nlmsg_data(nlh);
5543 	if (ifm->ifi_family != AF_BRIDGE)
5544 		return -EPFNOSUPPORT;
5545 
5546 	dev = __dev_get_by_index(net, ifm->ifi_index);
5547 	if (!dev) {
5548 		NL_SET_ERR_MSG(extack, "unknown ifindex");
5549 		return -ENODEV;
5550 	}
5551 
5552 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5553 	if (br_spec) {
5554 		nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec,
5555 					 rem) {
5556 			if (nla_len(attr) < sizeof(flags))
5557 				return -EINVAL;
5558 
5559 			have_flags = true;
5560 			flags = nla_get_u16(attr);
5561 			break;
5562 		}
5563 	}
5564 
5565 	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5566 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5567 
5568 		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5569 			err = -EOPNOTSUPP;
5570 			goto out;
5571 		}
5572 
5573 		err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5574 		if (err)
5575 			goto out;
5576 
5577 		flags &= ~BRIDGE_FLAGS_MASTER;
5578 	}
5579 
5580 	if ((flags & BRIDGE_FLAGS_SELF)) {
5581 		if (!dev->netdev_ops->ndo_bridge_dellink)
5582 			err = -EOPNOTSUPP;
5583 		else
5584 			err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5585 								  flags);
5586 
5587 		if (!err) {
5588 			flags &= ~BRIDGE_FLAGS_SELF;
5589 
5590 			/* Generate event to notify upper layer of bridge
5591 			 * change
5592 			 */
5593 			err = rtnl_bridge_notify(dev);
5594 		}
5595 	}
5596 
5597 	if (have_flags)
5598 		memcpy(nla_data(attr), &flags, sizeof(flags));
5599 out:
5600 	return err;
5601 }
5602 
5603 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5604 {
5605 	return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5606 	       (!idxattr || idxattr == attrid);
5607 }
5608 
5609 static bool
5610 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5611 {
5612 	return dev->netdev_ops &&
5613 	       dev->netdev_ops->ndo_has_offload_stats &&
5614 	       dev->netdev_ops->ndo_get_offload_stats &&
5615 	       dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5616 }
5617 
5618 static unsigned int
5619 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5620 {
5621 	return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5622 	       sizeof(struct rtnl_link_stats64) : 0;
5623 }
5624 
5625 static int
5626 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5627 			     struct sk_buff *skb)
5628 {
5629 	unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5630 	struct nlattr *attr = NULL;
5631 	void *attr_data;
5632 	int err;
5633 
5634 	if (!size)
5635 		return -ENODATA;
5636 
5637 	attr = nla_reserve_64bit(skb, attr_id, size,
5638 				 IFLA_OFFLOAD_XSTATS_UNSPEC);
5639 	if (!attr)
5640 		return -EMSGSIZE;
5641 
5642 	attr_data = nla_data(attr);
5643 	memset(attr_data, 0, size);
5644 
5645 	err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5646 	if (err)
5647 		return err;
5648 
5649 	return 0;
5650 }
5651 
5652 static unsigned int
5653 rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5654 				   enum netdev_offload_xstats_type type)
5655 {
5656 	bool enabled = netdev_offload_xstats_enabled(dev, type);
5657 
5658 	return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5659 }
5660 
5661 struct rtnl_offload_xstats_request_used {
5662 	bool request;
5663 	bool used;
5664 };
5665 
5666 static int
5667 rtnl_offload_xstats_get_stats(struct net_device *dev,
5668 			      enum netdev_offload_xstats_type type,
5669 			      struct rtnl_offload_xstats_request_used *ru,
5670 			      struct rtnl_hw_stats64 *stats,
5671 			      struct netlink_ext_ack *extack)
5672 {
5673 	bool request;
5674 	bool used;
5675 	int err;
5676 
5677 	request = netdev_offload_xstats_enabled(dev, type);
5678 	if (!request) {
5679 		used = false;
5680 		goto out;
5681 	}
5682 
5683 	err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5684 	if (err)
5685 		return err;
5686 
5687 out:
5688 	if (ru) {
5689 		ru->request = request;
5690 		ru->used = used;
5691 	}
5692 	return 0;
5693 }
5694 
5695 static int
5696 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5697 				       struct rtnl_offload_xstats_request_used *ru)
5698 {
5699 	struct nlattr *nest;
5700 
5701 	nest = nla_nest_start(skb, attr_id);
5702 	if (!nest)
5703 		return -EMSGSIZE;
5704 
5705 	if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5706 		goto nla_put_failure;
5707 
5708 	if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5709 		goto nla_put_failure;
5710 
5711 	nla_nest_end(skb, nest);
5712 	return 0;
5713 
5714 nla_put_failure:
5715 	nla_nest_cancel(skb, nest);
5716 	return -EMSGSIZE;
5717 }
5718 
5719 static int
5720 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5721 				   struct netlink_ext_ack *extack)
5722 {
5723 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5724 	struct rtnl_offload_xstats_request_used ru_l3;
5725 	struct nlattr *nest;
5726 	int err;
5727 
5728 	err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5729 	if (err)
5730 		return err;
5731 
5732 	nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5733 	if (!nest)
5734 		return -EMSGSIZE;
5735 
5736 	if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5737 						   IFLA_OFFLOAD_XSTATS_L3_STATS,
5738 						   &ru_l3))
5739 		goto nla_put_failure;
5740 
5741 	nla_nest_end(skb, nest);
5742 	return 0;
5743 
5744 nla_put_failure:
5745 	nla_nest_cancel(skb, nest);
5746 	return -EMSGSIZE;
5747 }
5748 
5749 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5750 				    int *prividx, u32 off_filter_mask,
5751 				    struct netlink_ext_ack *extack)
5752 {
5753 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5754 	int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5755 	int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5756 	int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5757 	bool have_data = false;
5758 	int err;
5759 
5760 	if (*prividx <= attr_id_cpu_hit &&
5761 	    (off_filter_mask &
5762 	     IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5763 		err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5764 		if (!err) {
5765 			have_data = true;
5766 		} else if (err != -ENODATA) {
5767 			*prividx = attr_id_cpu_hit;
5768 			return err;
5769 		}
5770 	}
5771 
5772 	if (*prividx <= attr_id_hw_s_info &&
5773 	    (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5774 		*prividx = attr_id_hw_s_info;
5775 
5776 		err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5777 		if (err)
5778 			return err;
5779 
5780 		have_data = true;
5781 		*prividx = 0;
5782 	}
5783 
5784 	if (*prividx <= attr_id_l3_stats &&
5785 	    (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5786 		unsigned int size_l3;
5787 		struct nlattr *attr;
5788 
5789 		*prividx = attr_id_l3_stats;
5790 
5791 		size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5792 		if (!size_l3)
5793 			goto skip_l3_stats;
5794 		attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5795 					 IFLA_OFFLOAD_XSTATS_UNSPEC);
5796 		if (!attr)
5797 			return -EMSGSIZE;
5798 
5799 		err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5800 						    nla_data(attr), extack);
5801 		if (err)
5802 			return err;
5803 
5804 		have_data = true;
5805 skip_l3_stats:
5806 		*prividx = 0;
5807 	}
5808 
5809 	if (!have_data)
5810 		return -ENODATA;
5811 
5812 	*prividx = 0;
5813 	return 0;
5814 }
5815 
5816 static unsigned int
5817 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5818 					   enum netdev_offload_xstats_type type)
5819 {
5820 	return nla_total_size(0) +
5821 		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5822 		nla_total_size(sizeof(u8)) +
5823 		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5824 		nla_total_size(sizeof(u8)) +
5825 		0;
5826 }
5827 
5828 static unsigned int
5829 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5830 {
5831 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5832 
5833 	return nla_total_size(0) +
5834 		/* IFLA_OFFLOAD_XSTATS_L3_STATS */
5835 		rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5836 		0;
5837 }
5838 
5839 static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5840 					u32 off_filter_mask)
5841 {
5842 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5843 	int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5844 	int nla_size = 0;
5845 	int size;
5846 
5847 	if (off_filter_mask &
5848 	    IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5849 		size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5850 		nla_size += nla_total_size_64bit(size);
5851 	}
5852 
5853 	if (off_filter_mask &
5854 	    IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5855 		nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5856 
5857 	if (off_filter_mask &
5858 	    IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5859 		size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5860 		nla_size += nla_total_size_64bit(size);
5861 	}
5862 
5863 	if (nla_size != 0)
5864 		nla_size += nla_total_size(0);
5865 
5866 	return nla_size;
5867 }
5868 
5869 struct rtnl_stats_dump_filters {
5870 	/* mask[0] filters outer attributes. Then individual nests have their
5871 	 * filtering mask at the index of the nested attribute.
5872 	 */
5873 	u32 mask[IFLA_STATS_MAX + 1];
5874 };
5875 
5876 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5877 			       int type, u32 pid, u32 seq, u32 change,
5878 			       unsigned int flags,
5879 			       const struct rtnl_stats_dump_filters *filters,
5880 			       int *idxattr, int *prividx,
5881 			       struct netlink_ext_ack *extack)
5882 {
5883 	unsigned int filter_mask = filters->mask[0];
5884 	struct if_stats_msg *ifsm;
5885 	struct nlmsghdr *nlh;
5886 	struct nlattr *attr;
5887 	int s_prividx = *prividx;
5888 	int err;
5889 
5890 	ASSERT_RTNL();
5891 
5892 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5893 	if (!nlh)
5894 		return -EMSGSIZE;
5895 
5896 	ifsm = nlmsg_data(nlh);
5897 	ifsm->family = PF_UNSPEC;
5898 	ifsm->pad1 = 0;
5899 	ifsm->pad2 = 0;
5900 	ifsm->ifindex = dev->ifindex;
5901 	ifsm->filter_mask = filter_mask;
5902 
5903 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5904 		struct rtnl_link_stats64 *sp;
5905 
5906 		attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5907 					 sizeof(struct rtnl_link_stats64),
5908 					 IFLA_STATS_UNSPEC);
5909 		if (!attr) {
5910 			err = -EMSGSIZE;
5911 			goto nla_put_failure;
5912 		}
5913 
5914 		sp = nla_data(attr);
5915 		dev_get_stats(dev, sp);
5916 	}
5917 
5918 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5919 		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5920 
5921 		if (ops && ops->fill_linkxstats) {
5922 			*idxattr = IFLA_STATS_LINK_XSTATS;
5923 			attr = nla_nest_start_noflag(skb,
5924 						     IFLA_STATS_LINK_XSTATS);
5925 			if (!attr) {
5926 				err = -EMSGSIZE;
5927 				goto nla_put_failure;
5928 			}
5929 
5930 			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5931 			nla_nest_end(skb, attr);
5932 			if (err)
5933 				goto nla_put_failure;
5934 			*idxattr = 0;
5935 		}
5936 	}
5937 
5938 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5939 			     *idxattr)) {
5940 		const struct rtnl_link_ops *ops = NULL;
5941 		const struct net_device *master;
5942 
5943 		master = netdev_master_upper_dev_get(dev);
5944 		if (master)
5945 			ops = master->rtnl_link_ops;
5946 		if (ops && ops->fill_linkxstats) {
5947 			*idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5948 			attr = nla_nest_start_noflag(skb,
5949 						     IFLA_STATS_LINK_XSTATS_SLAVE);
5950 			if (!attr) {
5951 				err = -EMSGSIZE;
5952 				goto nla_put_failure;
5953 			}
5954 
5955 			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5956 			nla_nest_end(skb, attr);
5957 			if (err)
5958 				goto nla_put_failure;
5959 			*idxattr = 0;
5960 		}
5961 	}
5962 
5963 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5964 			     *idxattr)) {
5965 		u32 off_filter_mask;
5966 
5967 		off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5968 		*idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5969 		attr = nla_nest_start_noflag(skb,
5970 					     IFLA_STATS_LINK_OFFLOAD_XSTATS);
5971 		if (!attr) {
5972 			err = -EMSGSIZE;
5973 			goto nla_put_failure;
5974 		}
5975 
5976 		err = rtnl_offload_xstats_fill(skb, dev, prividx,
5977 					       off_filter_mask, extack);
5978 		if (err == -ENODATA)
5979 			nla_nest_cancel(skb, attr);
5980 		else
5981 			nla_nest_end(skb, attr);
5982 
5983 		if (err && err != -ENODATA)
5984 			goto nla_put_failure;
5985 		*idxattr = 0;
5986 	}
5987 
5988 	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5989 		struct rtnl_af_ops *af_ops;
5990 
5991 		*idxattr = IFLA_STATS_AF_SPEC;
5992 		attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5993 		if (!attr) {
5994 			err = -EMSGSIZE;
5995 			goto nla_put_failure;
5996 		}
5997 
5998 		rcu_read_lock();
5999 		list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
6000 			if (af_ops->fill_stats_af) {
6001 				struct nlattr *af;
6002 
6003 				af = nla_nest_start_noflag(skb,
6004 							   af_ops->family);
6005 				if (!af) {
6006 					rcu_read_unlock();
6007 					err = -EMSGSIZE;
6008 					goto nla_put_failure;
6009 				}
6010 				err = af_ops->fill_stats_af(skb, dev);
6011 
6012 				if (err == -ENODATA) {
6013 					nla_nest_cancel(skb, af);
6014 				} else if (err < 0) {
6015 					rcu_read_unlock();
6016 					goto nla_put_failure;
6017 				}
6018 
6019 				nla_nest_end(skb, af);
6020 			}
6021 		}
6022 		rcu_read_unlock();
6023 
6024 		nla_nest_end(skb, attr);
6025 
6026 		*idxattr = 0;
6027 	}
6028 
6029 	nlmsg_end(skb, nlh);
6030 
6031 	return 0;
6032 
6033 nla_put_failure:
6034 	/* not a multi message or no progress mean a real error */
6035 	if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
6036 		nlmsg_cancel(skb, nlh);
6037 	else
6038 		nlmsg_end(skb, nlh);
6039 
6040 	return err;
6041 }
6042 
6043 static size_t if_nlmsg_stats_size(const struct net_device *dev,
6044 				  const struct rtnl_stats_dump_filters *filters)
6045 {
6046 	size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
6047 	unsigned int filter_mask = filters->mask[0];
6048 
6049 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
6050 		size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
6051 
6052 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
6053 		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
6054 		int attr = IFLA_STATS_LINK_XSTATS;
6055 
6056 		if (ops && ops->get_linkxstats_size) {
6057 			size += nla_total_size(ops->get_linkxstats_size(dev,
6058 									attr));
6059 			/* for IFLA_STATS_LINK_XSTATS */
6060 			size += nla_total_size(0);
6061 		}
6062 	}
6063 
6064 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
6065 		struct net_device *_dev = (struct net_device *)dev;
6066 		const struct rtnl_link_ops *ops = NULL;
6067 		const struct net_device *master;
6068 
6069 		/* netdev_master_upper_dev_get can't take const */
6070 		master = netdev_master_upper_dev_get(_dev);
6071 		if (master)
6072 			ops = master->rtnl_link_ops;
6073 		if (ops && ops->get_linkxstats_size) {
6074 			int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
6075 
6076 			size += nla_total_size(ops->get_linkxstats_size(dev,
6077 									attr));
6078 			/* for IFLA_STATS_LINK_XSTATS_SLAVE */
6079 			size += nla_total_size(0);
6080 		}
6081 	}
6082 
6083 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
6084 		u32 off_filter_mask;
6085 
6086 		off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
6087 		size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
6088 	}
6089 
6090 	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
6091 		struct rtnl_af_ops *af_ops;
6092 
6093 		/* for IFLA_STATS_AF_SPEC */
6094 		size += nla_total_size(0);
6095 
6096 		rcu_read_lock();
6097 		list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
6098 			if (af_ops->get_stats_af_size) {
6099 				size += nla_total_size(
6100 					af_ops->get_stats_af_size(dev));
6101 
6102 				/* for AF_* */
6103 				size += nla_total_size(0);
6104 			}
6105 		}
6106 		rcu_read_unlock();
6107 	}
6108 
6109 	return size;
6110 }
6111 
6112 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
6113 
6114 static const struct nla_policy
6115 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
6116 	[IFLA_STATS_LINK_OFFLOAD_XSTATS] =
6117 		    NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
6118 };
6119 
6120 static const struct nla_policy
6121 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
6122 	[IFLA_STATS_GET_FILTERS] =
6123 		    NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
6124 };
6125 
6126 static const struct nla_policy
6127 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
6128 	[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
6129 };
6130 
6131 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
6132 					struct rtnl_stats_dump_filters *filters,
6133 					struct netlink_ext_ack *extack)
6134 {
6135 	struct nlattr *tb[IFLA_STATS_MAX + 1];
6136 	int err;
6137 	int at;
6138 
6139 	err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
6140 			       rtnl_stats_get_policy_filters, extack);
6141 	if (err < 0)
6142 		return err;
6143 
6144 	for (at = 1; at <= IFLA_STATS_MAX; at++) {
6145 		if (tb[at]) {
6146 			if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
6147 				NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
6148 				return -EINVAL;
6149 			}
6150 			filters->mask[at] = nla_get_u32(tb[at]);
6151 		}
6152 	}
6153 
6154 	return 0;
6155 }
6156 
6157 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
6158 				u32 filter_mask,
6159 				struct rtnl_stats_dump_filters *filters,
6160 				struct netlink_ext_ack *extack)
6161 {
6162 	struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6163 	int err;
6164 	int i;
6165 
6166 	filters->mask[0] = filter_mask;
6167 	for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
6168 		filters->mask[i] = -1U;
6169 
6170 	err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
6171 			  IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
6172 	if (err < 0)
6173 		return err;
6174 
6175 	if (tb[IFLA_STATS_GET_FILTERS]) {
6176 		err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
6177 						   filters, extack);
6178 		if (err)
6179 			return err;
6180 	}
6181 
6182 	return 0;
6183 }
6184 
6185 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
6186 				bool is_dump, struct netlink_ext_ack *extack)
6187 {
6188 	struct if_stats_msg *ifsm;
6189 
6190 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
6191 		NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
6192 		return -EINVAL;
6193 	}
6194 
6195 	if (!strict_check)
6196 		return 0;
6197 
6198 	ifsm = nlmsg_data(nlh);
6199 
6200 	/* only requests using strict checks can pass data to influence
6201 	 * the dump. The legacy exception is filter_mask.
6202 	 */
6203 	if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
6204 		NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
6205 		return -EINVAL;
6206 	}
6207 	if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
6208 		NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
6209 		return -EINVAL;
6210 	}
6211 
6212 	return 0;
6213 }
6214 
6215 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
6216 			  struct netlink_ext_ack *extack)
6217 {
6218 	struct rtnl_stats_dump_filters filters;
6219 	struct net *net = sock_net(skb->sk);
6220 	struct net_device *dev = NULL;
6221 	int idxattr = 0, prividx = 0;
6222 	struct if_stats_msg *ifsm;
6223 	struct sk_buff *nskb;
6224 	int err;
6225 
6226 	err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6227 				   false, extack);
6228 	if (err)
6229 		return err;
6230 
6231 	ifsm = nlmsg_data(nlh);
6232 	if (ifsm->ifindex > 0)
6233 		dev = __dev_get_by_index(net, ifsm->ifindex);
6234 	else
6235 		return -EINVAL;
6236 
6237 	if (!dev)
6238 		return -ENODEV;
6239 
6240 	if (!ifsm->filter_mask) {
6241 		NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
6242 		return -EINVAL;
6243 	}
6244 
6245 	err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
6246 	if (err)
6247 		return err;
6248 
6249 	nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
6250 	if (!nskb)
6251 		return -ENOBUFS;
6252 
6253 	err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
6254 				  NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
6255 				  0, &filters, &idxattr, &prividx, extack);
6256 	if (err < 0) {
6257 		/* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
6258 		WARN_ON(err == -EMSGSIZE);
6259 		kfree_skb(nskb);
6260 	} else {
6261 		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
6262 	}
6263 
6264 	return err;
6265 }
6266 
6267 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
6268 {
6269 	struct netlink_ext_ack *extack = cb->extack;
6270 	struct rtnl_stats_dump_filters filters;
6271 	struct net *net = sock_net(skb->sk);
6272 	unsigned int flags = NLM_F_MULTI;
6273 	struct if_stats_msg *ifsm;
6274 	struct {
6275 		unsigned long ifindex;
6276 		int idxattr;
6277 		int prividx;
6278 	} *ctx = (void *)cb->ctx;
6279 	struct net_device *dev;
6280 	int err;
6281 
6282 	cb->seq = net->dev_base_seq;
6283 
6284 	err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
6285 	if (err)
6286 		return err;
6287 
6288 	ifsm = nlmsg_data(cb->nlh);
6289 	if (!ifsm->filter_mask) {
6290 		NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
6291 		return -EINVAL;
6292 	}
6293 
6294 	err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
6295 				   extack);
6296 	if (err)
6297 		return err;
6298 
6299 	for_each_netdev_dump(net, dev, ctx->ifindex) {
6300 		err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
6301 					  NETLINK_CB(cb->skb).portid,
6302 					  cb->nlh->nlmsg_seq, 0,
6303 					  flags, &filters,
6304 					  &ctx->idxattr, &ctx->prividx,
6305 					  extack);
6306 		/* If we ran out of room on the first message,
6307 		 * we're in trouble.
6308 		 */
6309 		WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
6310 
6311 		if (err < 0)
6312 			break;
6313 		ctx->prividx = 0;
6314 		ctx->idxattr = 0;
6315 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
6316 	}
6317 
6318 	return err;
6319 }
6320 
6321 void rtnl_offload_xstats_notify(struct net_device *dev)
6322 {
6323 	struct rtnl_stats_dump_filters response_filters = {};
6324 	struct net *net = dev_net(dev);
6325 	int idxattr = 0, prividx = 0;
6326 	struct sk_buff *skb;
6327 	int err = -ENOBUFS;
6328 
6329 	ASSERT_RTNL();
6330 
6331 	response_filters.mask[0] |=
6332 		IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6333 	response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6334 		IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6335 
6336 	skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
6337 			GFP_KERNEL);
6338 	if (!skb)
6339 		goto errout;
6340 
6341 	err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
6342 				  &response_filters, &idxattr, &prividx, NULL);
6343 	if (err < 0) {
6344 		kfree_skb(skb);
6345 		goto errout;
6346 	}
6347 
6348 	rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
6349 	return;
6350 
6351 errout:
6352 	rtnl_set_sk_err(net, RTNLGRP_STATS, err);
6353 }
6354 EXPORT_SYMBOL(rtnl_offload_xstats_notify);
6355 
6356 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
6357 			  struct netlink_ext_ack *extack)
6358 {
6359 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
6360 	struct rtnl_stats_dump_filters response_filters = {};
6361 	struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6362 	struct net *net = sock_net(skb->sk);
6363 	struct net_device *dev = NULL;
6364 	struct if_stats_msg *ifsm;
6365 	bool notify = false;
6366 	int err;
6367 
6368 	err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6369 				   false, extack);
6370 	if (err)
6371 		return err;
6372 
6373 	ifsm = nlmsg_data(nlh);
6374 	if (ifsm->family != AF_UNSPEC) {
6375 		NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
6376 		return -EINVAL;
6377 	}
6378 
6379 	if (ifsm->ifindex > 0)
6380 		dev = __dev_get_by_index(net, ifsm->ifindex);
6381 	else
6382 		return -EINVAL;
6383 
6384 	if (!dev)
6385 		return -ENODEV;
6386 
6387 	if (ifsm->filter_mask) {
6388 		NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6389 		return -EINVAL;
6390 	}
6391 
6392 	err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6393 			  ifla_stats_set_policy, extack);
6394 	if (err < 0)
6395 		return err;
6396 
6397 	if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6398 		u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
6399 
6400 		if (req)
6401 			err = netdev_offload_xstats_enable(dev, t_l3, extack);
6402 		else
6403 			err = netdev_offload_xstats_disable(dev, t_l3);
6404 
6405 		if (!err)
6406 			notify = true;
6407 		else if (err != -EALREADY)
6408 			return err;
6409 
6410 		response_filters.mask[0] |=
6411 			IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6412 		response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6413 			IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6414 	}
6415 
6416 	if (notify)
6417 		rtnl_offload_xstats_notify(dev);
6418 
6419 	return 0;
6420 }
6421 
6422 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh,
6423 				   struct netlink_ext_ack *extack)
6424 {
6425 	struct br_port_msg *bpm;
6426 
6427 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
6428 		NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request");
6429 		return -EINVAL;
6430 	}
6431 
6432 	bpm = nlmsg_data(nlh);
6433 	if (bpm->ifindex) {
6434 		NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request");
6435 		return -EINVAL;
6436 	}
6437 	if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
6438 		NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
6439 		return -EINVAL;
6440 	}
6441 
6442 	return 0;
6443 }
6444 
6445 struct rtnl_mdb_dump_ctx {
6446 	long idx;
6447 };
6448 
6449 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
6450 {
6451 	struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx;
6452 	struct net *net = sock_net(skb->sk);
6453 	struct net_device *dev;
6454 	int idx, s_idx;
6455 	int err;
6456 
6457 	NL_ASSERT_CTX_FITS(struct rtnl_mdb_dump_ctx);
6458 
6459 	if (cb->strict_check) {
6460 		err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
6461 		if (err)
6462 			return err;
6463 	}
6464 
6465 	s_idx = ctx->idx;
6466 	idx = 0;
6467 
6468 	for_each_netdev(net, dev) {
6469 		if (idx < s_idx)
6470 			goto skip;
6471 		if (!dev->netdev_ops->ndo_mdb_dump)
6472 			goto skip;
6473 
6474 		err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
6475 		if (err == -EMSGSIZE)
6476 			goto out;
6477 		/* Moving on to next device, reset markers and sequence
6478 		 * counters since they are all maintained per-device.
6479 		 */
6480 		memset(cb->ctx, 0, sizeof(cb->ctx));
6481 		cb->prev_seq = 0;
6482 		cb->seq = 0;
6483 skip:
6484 		idx++;
6485 	}
6486 
6487 out:
6488 	ctx->idx = idx;
6489 	return skb->len;
6490 }
6491 
6492 static int rtnl_validate_mdb_entry_get(const struct nlattr *attr,
6493 				       struct netlink_ext_ack *extack)
6494 {
6495 	struct br_mdb_entry *entry = nla_data(attr);
6496 
6497 	if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6498 		NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6499 		return -EINVAL;
6500 	}
6501 
6502 	if (entry->ifindex) {
6503 		NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified");
6504 		return -EINVAL;
6505 	}
6506 
6507 	if (entry->state) {
6508 		NL_SET_ERR_MSG(extack, "Entry state cannot be specified");
6509 		return -EINVAL;
6510 	}
6511 
6512 	if (entry->flags) {
6513 		NL_SET_ERR_MSG(extack, "Entry flags cannot be specified");
6514 		return -EINVAL;
6515 	}
6516 
6517 	if (entry->vid >= VLAN_VID_MASK) {
6518 		NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6519 		return -EINVAL;
6520 	}
6521 
6522 	if (entry->addr.proto != htons(ETH_P_IP) &&
6523 	    entry->addr.proto != htons(ETH_P_IPV6) &&
6524 	    entry->addr.proto != 0) {
6525 		NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6526 		return -EINVAL;
6527 	}
6528 
6529 	return 0;
6530 }
6531 
6532 static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = {
6533 	[MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6534 						  rtnl_validate_mdb_entry_get,
6535 						  sizeof(struct br_mdb_entry)),
6536 	[MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6537 };
6538 
6539 static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6540 			struct netlink_ext_ack *extack)
6541 {
6542 	struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1];
6543 	struct net *net = sock_net(in_skb->sk);
6544 	struct br_port_msg *bpm;
6545 	struct net_device *dev;
6546 	int err;
6547 
6548 	err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb,
6549 			  MDBA_GET_ENTRY_MAX, mdba_get_policy, extack);
6550 	if (err)
6551 		return err;
6552 
6553 	bpm = nlmsg_data(nlh);
6554 	if (!bpm->ifindex) {
6555 		NL_SET_ERR_MSG(extack, "Invalid ifindex");
6556 		return -EINVAL;
6557 	}
6558 
6559 	dev = __dev_get_by_index(net, bpm->ifindex);
6560 	if (!dev) {
6561 		NL_SET_ERR_MSG(extack, "Device doesn't exist");
6562 		return -ENODEV;
6563 	}
6564 
6565 	if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) {
6566 		NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute");
6567 		return -EINVAL;
6568 	}
6569 
6570 	if (!dev->netdev_ops->ndo_mdb_get) {
6571 		NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6572 		return -EOPNOTSUPP;
6573 	}
6574 
6575 	return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid,
6576 					    nlh->nlmsg_seq, extack);
6577 }
6578 
6579 static int rtnl_validate_mdb_entry(const struct nlattr *attr,
6580 				   struct netlink_ext_ack *extack)
6581 {
6582 	struct br_mdb_entry *entry = nla_data(attr);
6583 
6584 	if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6585 		NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6586 		return -EINVAL;
6587 	}
6588 
6589 	if (entry->ifindex == 0) {
6590 		NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed");
6591 		return -EINVAL;
6592 	}
6593 
6594 	if (entry->addr.proto == htons(ETH_P_IP)) {
6595 		if (!ipv4_is_multicast(entry->addr.u.ip4) &&
6596 		    !ipv4_is_zeronet(entry->addr.u.ip4)) {
6597 			NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0");
6598 			return -EINVAL;
6599 		}
6600 		if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
6601 			NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast");
6602 			return -EINVAL;
6603 		}
6604 #if IS_ENABLED(CONFIG_IPV6)
6605 	} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
6606 		if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
6607 			NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes");
6608 			return -EINVAL;
6609 		}
6610 #endif
6611 	} else if (entry->addr.proto == 0) {
6612 		/* L2 mdb */
6613 		if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
6614 			NL_SET_ERR_MSG(extack, "L2 entry group is not multicast");
6615 			return -EINVAL;
6616 		}
6617 	} else {
6618 		NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6619 		return -EINVAL;
6620 	}
6621 
6622 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6623 		NL_SET_ERR_MSG(extack, "Unknown entry state");
6624 		return -EINVAL;
6625 	}
6626 	if (entry->vid >= VLAN_VID_MASK) {
6627 		NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6628 		return -EINVAL;
6629 	}
6630 
6631 	return 0;
6632 }
6633 
6634 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
6635 	[MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
6636 	[MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6637 						  rtnl_validate_mdb_entry,
6638 						  sizeof(struct br_mdb_entry)),
6639 	[MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6640 };
6641 
6642 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
6643 			struct netlink_ext_ack *extack)
6644 {
6645 	struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6646 	struct net *net = sock_net(skb->sk);
6647 	struct br_port_msg *bpm;
6648 	struct net_device *dev;
6649 	int err;
6650 
6651 	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6652 				     MDBA_SET_ENTRY_MAX, mdba_policy, extack);
6653 	if (err)
6654 		return err;
6655 
6656 	bpm = nlmsg_data(nlh);
6657 	if (!bpm->ifindex) {
6658 		NL_SET_ERR_MSG(extack, "Invalid ifindex");
6659 		return -EINVAL;
6660 	}
6661 
6662 	dev = __dev_get_by_index(net, bpm->ifindex);
6663 	if (!dev) {
6664 		NL_SET_ERR_MSG(extack, "Device doesn't exist");
6665 		return -ENODEV;
6666 	}
6667 
6668 	if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6669 		NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6670 		return -EINVAL;
6671 	}
6672 
6673 	if (!dev->netdev_ops->ndo_mdb_add) {
6674 		NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6675 		return -EOPNOTSUPP;
6676 	}
6677 
6678 	return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
6679 }
6680 
6681 static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr,
6682 					    struct netlink_ext_ack *extack)
6683 {
6684 	struct br_mdb_entry *entry = nla_data(attr);
6685 	struct br_mdb_entry zero_entry = {};
6686 
6687 	if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6688 		NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6689 		return -EINVAL;
6690 	}
6691 
6692 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6693 		NL_SET_ERR_MSG(extack, "Unknown entry state");
6694 		return -EINVAL;
6695 	}
6696 
6697 	if (entry->flags) {
6698 		NL_SET_ERR_MSG(extack, "Entry flags cannot be set");
6699 		return -EINVAL;
6700 	}
6701 
6702 	if (entry->vid >= VLAN_N_VID - 1) {
6703 		NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6704 		return -EINVAL;
6705 	}
6706 
6707 	if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) {
6708 		NL_SET_ERR_MSG(extack, "Entry address cannot be set");
6709 		return -EINVAL;
6710 	}
6711 
6712 	return 0;
6713 }
6714 
6715 static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = {
6716 	[MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6717 						  rtnl_validate_mdb_entry_del_bulk,
6718 						  sizeof(struct br_mdb_entry)),
6719 	[MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6720 };
6721 
6722 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
6723 			struct netlink_ext_ack *extack)
6724 {
6725 	bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
6726 	struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6727 	struct net *net = sock_net(skb->sk);
6728 	struct br_port_msg *bpm;
6729 	struct net_device *dev;
6730 	int err;
6731 
6732 	if (!del_bulk)
6733 		err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6734 					     MDBA_SET_ENTRY_MAX, mdba_policy,
6735 					     extack);
6736 	else
6737 		err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX,
6738 				  mdba_del_bulk_policy, extack);
6739 	if (err)
6740 		return err;
6741 
6742 	bpm = nlmsg_data(nlh);
6743 	if (!bpm->ifindex) {
6744 		NL_SET_ERR_MSG(extack, "Invalid ifindex");
6745 		return -EINVAL;
6746 	}
6747 
6748 	dev = __dev_get_by_index(net, bpm->ifindex);
6749 	if (!dev) {
6750 		NL_SET_ERR_MSG(extack, "Device doesn't exist");
6751 		return -ENODEV;
6752 	}
6753 
6754 	if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6755 		NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6756 		return -EINVAL;
6757 	}
6758 
6759 	if (del_bulk) {
6760 		if (!dev->netdev_ops->ndo_mdb_del_bulk) {
6761 			NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion");
6762 			return -EOPNOTSUPP;
6763 		}
6764 		return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack);
6765 	}
6766 
6767 	if (!dev->netdev_ops->ndo_mdb_del) {
6768 		NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6769 		return -EOPNOTSUPP;
6770 	}
6771 
6772 	return dev->netdev_ops->ndo_mdb_del(dev, tb, extack);
6773 }
6774 
6775 /* Process one rtnetlink message. */
6776 
6777 static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
6778 {
6779 	const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED);
6780 	rtnl_dumpit_func dumpit = cb->data;
6781 	int err;
6782 
6783 	/* Previous iteration have already finished, avoid calling->dumpit()
6784 	 * again, it may not expect to be called after it reached the end.
6785 	 */
6786 	if (!dumpit)
6787 		return 0;
6788 
6789 	if (needs_lock)
6790 		rtnl_lock();
6791 	err = dumpit(skb, cb);
6792 	if (needs_lock)
6793 		rtnl_unlock();
6794 
6795 	/* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
6796 	 * Some applications which parse netlink manually depend on this.
6797 	 */
6798 	if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
6799 		if (err < 0 && err != -EMSGSIZE)
6800 			return err;
6801 		if (!err)
6802 			cb->data = NULL;
6803 
6804 		return skb->len;
6805 	}
6806 	return err;
6807 }
6808 
6809 static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
6810 				const struct nlmsghdr *nlh,
6811 				struct netlink_dump_control *control)
6812 {
6813 	if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE ||
6814 	    !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) {
6815 		WARN_ON(control->data);
6816 		control->data = control->dump;
6817 		control->dump = rtnl_dumpit;
6818 	}
6819 
6820 	return netlink_dump_start(ssk, skb, nlh, control);
6821 }
6822 
6823 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6824 			     struct netlink_ext_ack *extack)
6825 {
6826 	struct net *net = sock_net(skb->sk);
6827 	struct rtnl_link *link;
6828 	enum rtnl_kinds kind;
6829 	struct module *owner;
6830 	int err = -EOPNOTSUPP;
6831 	rtnl_doit_func doit;
6832 	unsigned int flags;
6833 	int family;
6834 	int type;
6835 
6836 	type = nlh->nlmsg_type;
6837 	if (type > RTM_MAX)
6838 		return -EOPNOTSUPP;
6839 
6840 	type -= RTM_BASE;
6841 
6842 	/* All the messages must have at least 1 byte length */
6843 	if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6844 		return 0;
6845 
6846 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6847 	kind = rtnl_msgtype_kind(type);
6848 
6849 	if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6850 		return -EPERM;
6851 
6852 	rcu_read_lock();
6853 	if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6854 		struct sock *rtnl;
6855 		rtnl_dumpit_func dumpit;
6856 		u32 min_dump_alloc = 0;
6857 
6858 		link = rtnl_get_link(family, type);
6859 		if (!link || !link->dumpit) {
6860 			family = PF_UNSPEC;
6861 			link = rtnl_get_link(family, type);
6862 			if (!link || !link->dumpit)
6863 				goto err_unlock;
6864 		}
6865 		owner = link->owner;
6866 		dumpit = link->dumpit;
6867 		flags = link->flags;
6868 
6869 		if (type == RTM_GETLINK - RTM_BASE)
6870 			min_dump_alloc = rtnl_calcit(skb, nlh);
6871 
6872 		err = 0;
6873 		/* need to do this before rcu_read_unlock() */
6874 		if (!try_module_get(owner))
6875 			err = -EPROTONOSUPPORT;
6876 
6877 		rcu_read_unlock();
6878 
6879 		rtnl = net->rtnl;
6880 		if (err == 0) {
6881 			struct netlink_dump_control c = {
6882 				.dump		= dumpit,
6883 				.min_dump_alloc	= min_dump_alloc,
6884 				.module		= owner,
6885 				.flags		= flags,
6886 			};
6887 			err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
6888 			/* netlink_dump_start() will keep a reference on
6889 			 * module if dump is still in progress.
6890 			 */
6891 			module_put(owner);
6892 		}
6893 		return err;
6894 	}
6895 
6896 	link = rtnl_get_link(family, type);
6897 	if (!link || !link->doit) {
6898 		family = PF_UNSPEC;
6899 		link = rtnl_get_link(PF_UNSPEC, type);
6900 		if (!link || !link->doit)
6901 			goto out_unlock;
6902 	}
6903 
6904 	owner = link->owner;
6905 	if (!try_module_get(owner)) {
6906 		err = -EPROTONOSUPPORT;
6907 		goto out_unlock;
6908 	}
6909 
6910 	flags = link->flags;
6911 	if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6912 	    !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6913 		NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6914 		module_put(owner);
6915 		goto err_unlock;
6916 	}
6917 
6918 	if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6919 		doit = link->doit;
6920 		rcu_read_unlock();
6921 		if (doit)
6922 			err = doit(skb, nlh, extack);
6923 		module_put(owner);
6924 		return err;
6925 	}
6926 	rcu_read_unlock();
6927 
6928 	rtnl_lock();
6929 	link = rtnl_get_link(family, type);
6930 	if (link && link->doit)
6931 		err = link->doit(skb, nlh, extack);
6932 	rtnl_unlock();
6933 
6934 	module_put(owner);
6935 
6936 	return err;
6937 
6938 out_unlock:
6939 	rcu_read_unlock();
6940 	return err;
6941 
6942 err_unlock:
6943 	rcu_read_unlock();
6944 	return -EOPNOTSUPP;
6945 }
6946 
6947 static void rtnetlink_rcv(struct sk_buff *skb)
6948 {
6949 	netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6950 }
6951 
6952 static int rtnetlink_bind(struct net *net, int group)
6953 {
6954 	switch (group) {
6955 	case RTNLGRP_IPV4_MROUTE_R:
6956 	case RTNLGRP_IPV6_MROUTE_R:
6957 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6958 			return -EPERM;
6959 		break;
6960 	}
6961 	return 0;
6962 }
6963 
6964 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6965 {
6966 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6967 
6968 	switch (event) {
6969 	case NETDEV_REBOOT:
6970 	case NETDEV_CHANGEMTU:
6971 	case NETDEV_CHANGEADDR:
6972 	case NETDEV_CHANGENAME:
6973 	case NETDEV_FEAT_CHANGE:
6974 	case NETDEV_BONDING_FAILOVER:
6975 	case NETDEV_POST_TYPE_CHANGE:
6976 	case NETDEV_NOTIFY_PEERS:
6977 	case NETDEV_CHANGEUPPER:
6978 	case NETDEV_RESEND_IGMP:
6979 	case NETDEV_CHANGEINFODATA:
6980 	case NETDEV_CHANGELOWERSTATE:
6981 	case NETDEV_CHANGE_TX_QUEUE_LEN:
6982 		rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6983 				   GFP_KERNEL, NULL, 0, 0, NULL);
6984 		break;
6985 	default:
6986 		break;
6987 	}
6988 	return NOTIFY_DONE;
6989 }
6990 
6991 static struct notifier_block rtnetlink_dev_notifier = {
6992 	.notifier_call	= rtnetlink_event,
6993 };
6994 
6995 
6996 static int __net_init rtnetlink_net_init(struct net *net)
6997 {
6998 	struct sock *sk;
6999 	struct netlink_kernel_cfg cfg = {
7000 		.groups		= RTNLGRP_MAX,
7001 		.input		= rtnetlink_rcv,
7002 		.flags		= NL_CFG_F_NONROOT_RECV,
7003 		.bind		= rtnetlink_bind,
7004 	};
7005 
7006 	sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
7007 	if (!sk)
7008 		return -ENOMEM;
7009 	net->rtnl = sk;
7010 	return 0;
7011 }
7012 
7013 static void __net_exit rtnetlink_net_exit(struct net *net)
7014 {
7015 	netlink_kernel_release(net->rtnl);
7016 	net->rtnl = NULL;
7017 }
7018 
7019 static struct pernet_operations rtnetlink_net_ops = {
7020 	.init = rtnetlink_net_init,
7021 	.exit = rtnetlink_net_exit,
7022 };
7023 
7024 static const struct rtnl_msg_handler rtnetlink_rtnl_msg_handlers[] __initconst = {
7025 	{.msgtype = RTM_NEWLINK, .doit = rtnl_newlink,
7026 	 .flags = RTNL_FLAG_DOIT_PERNET},
7027 	{.msgtype = RTM_DELLINK, .doit = rtnl_dellink,
7028 	 .flags = RTNL_FLAG_DOIT_PERNET_WIP},
7029 	{.msgtype = RTM_GETLINK, .doit = rtnl_getlink,
7030 	 .dumpit = rtnl_dump_ifinfo, .flags = RTNL_FLAG_DUMP_SPLIT_NLM_DONE},
7031 	{.msgtype = RTM_SETLINK, .doit = rtnl_setlink,
7032 	 .flags = RTNL_FLAG_DOIT_PERNET_WIP},
7033 	{.msgtype = RTM_GETADDR, .dumpit = rtnl_dump_all},
7034 	{.msgtype = RTM_GETROUTE, .dumpit = rtnl_dump_all},
7035 	{.msgtype = RTM_GETNETCONF, .dumpit = rtnl_dump_all},
7036 	{.msgtype = RTM_GETSTATS, .doit = rtnl_stats_get,
7037 	 .dumpit = rtnl_stats_dump},
7038 	{.msgtype = RTM_SETSTATS, .doit = rtnl_stats_set},
7039 	{.msgtype = RTM_NEWLINKPROP, .doit = rtnl_newlinkprop},
7040 	{.msgtype = RTM_DELLINKPROP, .doit = rtnl_dellinkprop},
7041 	{.protocol = PF_BRIDGE, .msgtype = RTM_GETLINK,
7042 	 .dumpit = rtnl_bridge_getlink},
7043 	{.protocol = PF_BRIDGE, .msgtype = RTM_DELLINK,
7044 	 .doit = rtnl_bridge_dellink},
7045 	{.protocol = PF_BRIDGE, .msgtype = RTM_SETLINK,
7046 	 .doit = rtnl_bridge_setlink},
7047 	{.protocol = PF_BRIDGE, .msgtype = RTM_NEWNEIGH, .doit = rtnl_fdb_add},
7048 	{.protocol = PF_BRIDGE, .msgtype = RTM_DELNEIGH, .doit = rtnl_fdb_del,
7049 	 .flags = RTNL_FLAG_BULK_DEL_SUPPORTED},
7050 	{.protocol = PF_BRIDGE, .msgtype = RTM_GETNEIGH, .doit = rtnl_fdb_get,
7051 	 .dumpit = rtnl_fdb_dump},
7052 	{.protocol = PF_BRIDGE, .msgtype = RTM_NEWMDB, .doit = rtnl_mdb_add},
7053 	{.protocol = PF_BRIDGE, .msgtype = RTM_DELMDB, .doit = rtnl_mdb_del,
7054 	 .flags = RTNL_FLAG_BULK_DEL_SUPPORTED},
7055 	{.protocol = PF_BRIDGE, .msgtype = RTM_GETMDB, .doit = rtnl_mdb_get,
7056 	 .dumpit = rtnl_mdb_dump},
7057 };
7058 
7059 void __init rtnetlink_init(void)
7060 {
7061 	if (register_pernet_subsys(&rtnetlink_net_ops))
7062 		panic("rtnetlink_init: cannot initialize rtnetlink\n");
7063 
7064 	register_netdevice_notifier(&rtnetlink_dev_notifier);
7065 
7066 	rtnl_register_many(rtnetlink_rtnl_msg_handlers);
7067 }
7068