xref: /linux/net/core/rtnetlink.c (revision 8386f58f8deda81110283798a387fb53ec21957c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Routing netlink socket interface: protocol independent part.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  *	Fixes:
12  *	Vitaly E. Lavrov		RTA_OK arithmetic was wrong.
13  */
14 
15 #include <linux/bitops.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/timer.h>
22 #include <linux/string.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/fcntl.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/capability.h>
30 #include <linux/skbuff.h>
31 #include <linux/init.h>
32 #include <linux/security.h>
33 #include <linux/mutex.h>
34 #include <linux/if_addr.h>
35 #include <linux/if_bridge.h>
36 #include <linux/if_vlan.h>
37 #include <linux/pci.h>
38 #include <linux/etherdevice.h>
39 #include <linux/bpf.h>
40 
41 #include <linux/uaccess.h>
42 
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
45 #include <net/ip.h>
46 #include <net/protocol.h>
47 #include <net/arp.h>
48 #include <net/route.h>
49 #include <net/udp.h>
50 #include <net/tcp.h>
51 #include <net/sock.h>
52 #include <net/pkt_sched.h>
53 #include <net/fib_rules.h>
54 #include <net/rtnetlink.h>
55 #include <net/net_namespace.h>
56 #include <net/devlink.h>
57 #if IS_ENABLED(CONFIG_IPV6)
58 #include <net/addrconf.h>
59 #endif
60 
61 #include "dev.h"
62 
63 #define RTNL_MAX_TYPE		50
64 #define RTNL_SLAVE_MAX_TYPE	43
65 
66 struct rtnl_link {
67 	rtnl_doit_func		doit;
68 	rtnl_dumpit_func	dumpit;
69 	struct module		*owner;
70 	unsigned int		flags;
71 	struct rcu_head		rcu;
72 };
73 
74 static DEFINE_MUTEX(rtnl_mutex);
75 
76 void rtnl_lock(void)
77 {
78 	mutex_lock(&rtnl_mutex);
79 }
80 EXPORT_SYMBOL(rtnl_lock);
81 
82 int rtnl_lock_killable(void)
83 {
84 	return mutex_lock_killable(&rtnl_mutex);
85 }
86 EXPORT_SYMBOL(rtnl_lock_killable);
87 
88 static struct sk_buff *defer_kfree_skb_list;
89 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
90 {
91 	if (head && tail) {
92 		tail->next = defer_kfree_skb_list;
93 		defer_kfree_skb_list = head;
94 	}
95 }
96 EXPORT_SYMBOL(rtnl_kfree_skbs);
97 
98 void __rtnl_unlock(void)
99 {
100 	struct sk_buff *head = defer_kfree_skb_list;
101 
102 	defer_kfree_skb_list = NULL;
103 
104 	/* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
105 	 * is used. In some places, e.g. in cfg80211, we have code that will do
106 	 * something like
107 	 *   rtnl_lock()
108 	 *   wiphy_lock()
109 	 *   ...
110 	 *   rtnl_unlock()
111 	 *
112 	 * and because netdev_run_todo() acquires the RTNL for items on the list
113 	 * we could cause a situation such as this:
114 	 * Thread 1			Thread 2
115 	 *				  rtnl_lock()
116 	 *				  unregister_netdevice()
117 	 *				  __rtnl_unlock()
118 	 * rtnl_lock()
119 	 * wiphy_lock()
120 	 * rtnl_unlock()
121 	 *   netdev_run_todo()
122 	 *     __rtnl_unlock()
123 	 *
124 	 *     // list not empty now
125 	 *     // because of thread 2
126 	 *				  rtnl_lock()
127 	 *     while (!list_empty(...))
128 	 *       rtnl_lock()
129 	 *				  wiphy_lock()
130 	 * **** DEADLOCK ****
131 	 *
132 	 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
133 	 * it's not used in cases where something is added to do the list.
134 	 */
135 	WARN_ON(!list_empty(&net_todo_list));
136 
137 	mutex_unlock(&rtnl_mutex);
138 
139 	while (head) {
140 		struct sk_buff *next = head->next;
141 
142 		kfree_skb(head);
143 		cond_resched();
144 		head = next;
145 	}
146 }
147 
148 void rtnl_unlock(void)
149 {
150 	/* This fellow will unlock it for us. */
151 	netdev_run_todo();
152 }
153 EXPORT_SYMBOL(rtnl_unlock);
154 
155 int rtnl_trylock(void)
156 {
157 	return mutex_trylock(&rtnl_mutex);
158 }
159 EXPORT_SYMBOL(rtnl_trylock);
160 
161 int rtnl_is_locked(void)
162 {
163 	return mutex_is_locked(&rtnl_mutex);
164 }
165 EXPORT_SYMBOL(rtnl_is_locked);
166 
167 bool refcount_dec_and_rtnl_lock(refcount_t *r)
168 {
169 	return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
170 }
171 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
172 
173 #ifdef CONFIG_PROVE_LOCKING
174 bool lockdep_rtnl_is_held(void)
175 {
176 	return lockdep_is_held(&rtnl_mutex);
177 }
178 EXPORT_SYMBOL(lockdep_rtnl_is_held);
179 #endif /* #ifdef CONFIG_PROVE_LOCKING */
180 
181 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
182 
183 static inline int rtm_msgindex(int msgtype)
184 {
185 	int msgindex = msgtype - RTM_BASE;
186 
187 	/*
188 	 * msgindex < 0 implies someone tried to register a netlink
189 	 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
190 	 * the message type has not been added to linux/rtnetlink.h
191 	 */
192 	BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
193 
194 	return msgindex;
195 }
196 
197 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
198 {
199 	struct rtnl_link __rcu **tab;
200 
201 	if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
202 		protocol = PF_UNSPEC;
203 
204 	tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
205 	if (!tab)
206 		tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
207 
208 	return rcu_dereference_rtnl(tab[msgtype]);
209 }
210 
211 static int rtnl_register_internal(struct module *owner,
212 				  int protocol, int msgtype,
213 				  rtnl_doit_func doit, rtnl_dumpit_func dumpit,
214 				  unsigned int flags)
215 {
216 	struct rtnl_link *link, *old;
217 	struct rtnl_link __rcu **tab;
218 	int msgindex;
219 	int ret = -ENOBUFS;
220 
221 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
222 	msgindex = rtm_msgindex(msgtype);
223 
224 	rtnl_lock();
225 	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
226 	if (tab == NULL) {
227 		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
228 		if (!tab)
229 			goto unlock;
230 
231 		/* ensures we see the 0 stores */
232 		rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
233 	}
234 
235 	old = rtnl_dereference(tab[msgindex]);
236 	if (old) {
237 		link = kmemdup(old, sizeof(*old), GFP_KERNEL);
238 		if (!link)
239 			goto unlock;
240 	} else {
241 		link = kzalloc(sizeof(*link), GFP_KERNEL);
242 		if (!link)
243 			goto unlock;
244 	}
245 
246 	WARN_ON(link->owner && link->owner != owner);
247 	link->owner = owner;
248 
249 	WARN_ON(doit && link->doit && link->doit != doit);
250 	if (doit)
251 		link->doit = doit;
252 	WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
253 	if (dumpit)
254 		link->dumpit = dumpit;
255 
256 	WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
257 		(flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
258 	link->flags |= flags;
259 
260 	/* publish protocol:msgtype */
261 	rcu_assign_pointer(tab[msgindex], link);
262 	ret = 0;
263 	if (old)
264 		kfree_rcu(old, rcu);
265 unlock:
266 	rtnl_unlock();
267 	return ret;
268 }
269 
270 /**
271  * rtnl_register_module - Register a rtnetlink message type
272  *
273  * @owner: module registering the hook (THIS_MODULE)
274  * @protocol: Protocol family or PF_UNSPEC
275  * @msgtype: rtnetlink message type
276  * @doit: Function pointer called for each request message
277  * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
278  * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
279  *
280  * Like rtnl_register, but for use by removable modules.
281  */
282 int rtnl_register_module(struct module *owner,
283 			 int protocol, int msgtype,
284 			 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
285 			 unsigned int flags)
286 {
287 	return rtnl_register_internal(owner, protocol, msgtype,
288 				      doit, dumpit, flags);
289 }
290 EXPORT_SYMBOL_GPL(rtnl_register_module);
291 
292 /**
293  * rtnl_register - Register a rtnetlink message type
294  * @protocol: Protocol family or PF_UNSPEC
295  * @msgtype: rtnetlink message type
296  * @doit: Function pointer called for each request message
297  * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
298  * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
299  *
300  * Registers the specified function pointers (at least one of them has
301  * to be non-NULL) to be called whenever a request message for the
302  * specified protocol family and message type is received.
303  *
304  * The special protocol family PF_UNSPEC may be used to define fallback
305  * function pointers for the case when no entry for the specific protocol
306  * family exists.
307  */
308 void rtnl_register(int protocol, int msgtype,
309 		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
310 		   unsigned int flags)
311 {
312 	int err;
313 
314 	err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
315 				     flags);
316 	if (err)
317 		pr_err("Unable to register rtnetlink message handler, "
318 		       "protocol = %d, message type = %d\n", protocol, msgtype);
319 }
320 
321 /**
322  * rtnl_unregister - Unregister a rtnetlink message type
323  * @protocol: Protocol family or PF_UNSPEC
324  * @msgtype: rtnetlink message type
325  *
326  * Returns 0 on success or a negative error code.
327  */
328 int rtnl_unregister(int protocol, int msgtype)
329 {
330 	struct rtnl_link __rcu **tab;
331 	struct rtnl_link *link;
332 	int msgindex;
333 
334 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
335 	msgindex = rtm_msgindex(msgtype);
336 
337 	rtnl_lock();
338 	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
339 	if (!tab) {
340 		rtnl_unlock();
341 		return -ENOENT;
342 	}
343 
344 	link = rtnl_dereference(tab[msgindex]);
345 	RCU_INIT_POINTER(tab[msgindex], NULL);
346 	rtnl_unlock();
347 
348 	kfree_rcu(link, rcu);
349 
350 	return 0;
351 }
352 EXPORT_SYMBOL_GPL(rtnl_unregister);
353 
354 /**
355  * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
356  * @protocol : Protocol family or PF_UNSPEC
357  *
358  * Identical to calling rtnl_unregster() for all registered message types
359  * of a certain protocol family.
360  */
361 void rtnl_unregister_all(int protocol)
362 {
363 	struct rtnl_link __rcu **tab;
364 	struct rtnl_link *link;
365 	int msgindex;
366 
367 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
368 
369 	rtnl_lock();
370 	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
371 	if (!tab) {
372 		rtnl_unlock();
373 		return;
374 	}
375 	RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
376 	for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
377 		link = rtnl_dereference(tab[msgindex]);
378 		if (!link)
379 			continue;
380 
381 		RCU_INIT_POINTER(tab[msgindex], NULL);
382 		kfree_rcu(link, rcu);
383 	}
384 	rtnl_unlock();
385 
386 	synchronize_net();
387 
388 	kfree(tab);
389 }
390 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
391 
392 static LIST_HEAD(link_ops);
393 
394 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
395 {
396 	const struct rtnl_link_ops *ops;
397 
398 	list_for_each_entry(ops, &link_ops, list) {
399 		if (!strcmp(ops->kind, kind))
400 			return ops;
401 	}
402 	return NULL;
403 }
404 
405 /**
406  * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
407  * @ops: struct rtnl_link_ops * to register
408  *
409  * The caller must hold the rtnl_mutex. This function should be used
410  * by drivers that create devices during module initialization. It
411  * must be called before registering the devices.
412  *
413  * Returns 0 on success or a negative error code.
414  */
415 int __rtnl_link_register(struct rtnl_link_ops *ops)
416 {
417 	if (rtnl_link_ops_get(ops->kind))
418 		return -EEXIST;
419 
420 	/* The check for alloc/setup is here because if ops
421 	 * does not have that filled up, it is not possible
422 	 * to use the ops for creating device. So do not
423 	 * fill up dellink as well. That disables rtnl_dellink.
424 	 */
425 	if ((ops->alloc || ops->setup) && !ops->dellink)
426 		ops->dellink = unregister_netdevice_queue;
427 
428 	list_add_tail(&ops->list, &link_ops);
429 	return 0;
430 }
431 EXPORT_SYMBOL_GPL(__rtnl_link_register);
432 
433 /**
434  * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
435  * @ops: struct rtnl_link_ops * to register
436  *
437  * Returns 0 on success or a negative error code.
438  */
439 int rtnl_link_register(struct rtnl_link_ops *ops)
440 {
441 	int err;
442 
443 	/* Sanity-check max sizes to avoid stack buffer overflow. */
444 	if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
445 		    ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
446 		return -EINVAL;
447 
448 	rtnl_lock();
449 	err = __rtnl_link_register(ops);
450 	rtnl_unlock();
451 	return err;
452 }
453 EXPORT_SYMBOL_GPL(rtnl_link_register);
454 
455 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
456 {
457 	struct net_device *dev;
458 	LIST_HEAD(list_kill);
459 
460 	for_each_netdev(net, dev) {
461 		if (dev->rtnl_link_ops == ops)
462 			ops->dellink(dev, &list_kill);
463 	}
464 	unregister_netdevice_many(&list_kill);
465 }
466 
467 /**
468  * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
469  * @ops: struct rtnl_link_ops * to unregister
470  *
471  * The caller must hold the rtnl_mutex and guarantee net_namespace_list
472  * integrity (hold pernet_ops_rwsem for writing to close the race
473  * with setup_net() and cleanup_net()).
474  */
475 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
476 {
477 	struct net *net;
478 
479 	for_each_net(net) {
480 		__rtnl_kill_links(net, ops);
481 	}
482 	list_del(&ops->list);
483 }
484 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
485 
486 /* Return with the rtnl_lock held when there are no network
487  * devices unregistering in any network namespace.
488  */
489 static void rtnl_lock_unregistering_all(void)
490 {
491 	struct net *net;
492 	bool unregistering;
493 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
494 
495 	add_wait_queue(&netdev_unregistering_wq, &wait);
496 	for (;;) {
497 		unregistering = false;
498 		rtnl_lock();
499 		/* We held write locked pernet_ops_rwsem, and parallel
500 		 * setup_net() and cleanup_net() are not possible.
501 		 */
502 		for_each_net(net) {
503 			if (atomic_read(&net->dev_unreg_count) > 0) {
504 				unregistering = true;
505 				break;
506 			}
507 		}
508 		if (!unregistering)
509 			break;
510 		__rtnl_unlock();
511 
512 		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
513 	}
514 	remove_wait_queue(&netdev_unregistering_wq, &wait);
515 }
516 
517 /**
518  * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
519  * @ops: struct rtnl_link_ops * to unregister
520  */
521 void rtnl_link_unregister(struct rtnl_link_ops *ops)
522 {
523 	/* Close the race with setup_net() and cleanup_net() */
524 	down_write(&pernet_ops_rwsem);
525 	rtnl_lock_unregistering_all();
526 	__rtnl_link_unregister(ops);
527 	rtnl_unlock();
528 	up_write(&pernet_ops_rwsem);
529 }
530 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
531 
532 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
533 {
534 	struct net_device *master_dev;
535 	const struct rtnl_link_ops *ops;
536 	size_t size = 0;
537 
538 	rcu_read_lock();
539 
540 	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
541 	if (!master_dev)
542 		goto out;
543 
544 	ops = master_dev->rtnl_link_ops;
545 	if (!ops || !ops->get_slave_size)
546 		goto out;
547 	/* IFLA_INFO_SLAVE_DATA + nested data */
548 	size = nla_total_size(sizeof(struct nlattr)) +
549 	       ops->get_slave_size(master_dev, dev);
550 
551 out:
552 	rcu_read_unlock();
553 	return size;
554 }
555 
556 static size_t rtnl_link_get_size(const struct net_device *dev)
557 {
558 	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
559 	size_t size;
560 
561 	if (!ops)
562 		return 0;
563 
564 	size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
565 	       nla_total_size(strlen(ops->kind) + 1);  /* IFLA_INFO_KIND */
566 
567 	if (ops->get_size)
568 		/* IFLA_INFO_DATA + nested data */
569 		size += nla_total_size(sizeof(struct nlattr)) +
570 			ops->get_size(dev);
571 
572 	if (ops->get_xstats_size)
573 		/* IFLA_INFO_XSTATS */
574 		size += nla_total_size(ops->get_xstats_size(dev));
575 
576 	size += rtnl_link_get_slave_info_data_size(dev);
577 
578 	return size;
579 }
580 
581 static LIST_HEAD(rtnl_af_ops);
582 
583 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
584 {
585 	const struct rtnl_af_ops *ops;
586 
587 	ASSERT_RTNL();
588 
589 	list_for_each_entry(ops, &rtnl_af_ops, list) {
590 		if (ops->family == family)
591 			return ops;
592 	}
593 
594 	return NULL;
595 }
596 
597 /**
598  * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
599  * @ops: struct rtnl_af_ops * to register
600  *
601  * Returns 0 on success or a negative error code.
602  */
603 void rtnl_af_register(struct rtnl_af_ops *ops)
604 {
605 	rtnl_lock();
606 	list_add_tail_rcu(&ops->list, &rtnl_af_ops);
607 	rtnl_unlock();
608 }
609 EXPORT_SYMBOL_GPL(rtnl_af_register);
610 
611 /**
612  * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
613  * @ops: struct rtnl_af_ops * to unregister
614  */
615 void rtnl_af_unregister(struct rtnl_af_ops *ops)
616 {
617 	rtnl_lock();
618 	list_del_rcu(&ops->list);
619 	rtnl_unlock();
620 
621 	synchronize_rcu();
622 }
623 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
624 
625 static size_t rtnl_link_get_af_size(const struct net_device *dev,
626 				    u32 ext_filter_mask)
627 {
628 	struct rtnl_af_ops *af_ops;
629 	size_t size;
630 
631 	/* IFLA_AF_SPEC */
632 	size = nla_total_size(sizeof(struct nlattr));
633 
634 	rcu_read_lock();
635 	list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
636 		if (af_ops->get_link_af_size) {
637 			/* AF_* + nested data */
638 			size += nla_total_size(sizeof(struct nlattr)) +
639 				af_ops->get_link_af_size(dev, ext_filter_mask);
640 		}
641 	}
642 	rcu_read_unlock();
643 
644 	return size;
645 }
646 
647 static bool rtnl_have_link_slave_info(const struct net_device *dev)
648 {
649 	struct net_device *master_dev;
650 	bool ret = false;
651 
652 	rcu_read_lock();
653 
654 	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
655 	if (master_dev && master_dev->rtnl_link_ops)
656 		ret = true;
657 	rcu_read_unlock();
658 	return ret;
659 }
660 
661 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
662 				     const struct net_device *dev)
663 {
664 	struct net_device *master_dev;
665 	const struct rtnl_link_ops *ops;
666 	struct nlattr *slave_data;
667 	int err;
668 
669 	master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
670 	if (!master_dev)
671 		return 0;
672 	ops = master_dev->rtnl_link_ops;
673 	if (!ops)
674 		return 0;
675 	if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
676 		return -EMSGSIZE;
677 	if (ops->fill_slave_info) {
678 		slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
679 		if (!slave_data)
680 			return -EMSGSIZE;
681 		err = ops->fill_slave_info(skb, master_dev, dev);
682 		if (err < 0)
683 			goto err_cancel_slave_data;
684 		nla_nest_end(skb, slave_data);
685 	}
686 	return 0;
687 
688 err_cancel_slave_data:
689 	nla_nest_cancel(skb, slave_data);
690 	return err;
691 }
692 
693 static int rtnl_link_info_fill(struct sk_buff *skb,
694 			       const struct net_device *dev)
695 {
696 	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
697 	struct nlattr *data;
698 	int err;
699 
700 	if (!ops)
701 		return 0;
702 	if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
703 		return -EMSGSIZE;
704 	if (ops->fill_xstats) {
705 		err = ops->fill_xstats(skb, dev);
706 		if (err < 0)
707 			return err;
708 	}
709 	if (ops->fill_info) {
710 		data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
711 		if (data == NULL)
712 			return -EMSGSIZE;
713 		err = ops->fill_info(skb, dev);
714 		if (err < 0)
715 			goto err_cancel_data;
716 		nla_nest_end(skb, data);
717 	}
718 	return 0;
719 
720 err_cancel_data:
721 	nla_nest_cancel(skb, data);
722 	return err;
723 }
724 
725 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
726 {
727 	struct nlattr *linkinfo;
728 	int err = -EMSGSIZE;
729 
730 	linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
731 	if (linkinfo == NULL)
732 		goto out;
733 
734 	err = rtnl_link_info_fill(skb, dev);
735 	if (err < 0)
736 		goto err_cancel_link;
737 
738 	err = rtnl_link_slave_info_fill(skb, dev);
739 	if (err < 0)
740 		goto err_cancel_link;
741 
742 	nla_nest_end(skb, linkinfo);
743 	return 0;
744 
745 err_cancel_link:
746 	nla_nest_cancel(skb, linkinfo);
747 out:
748 	return err;
749 }
750 
751 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
752 {
753 	struct sock *rtnl = net->rtnl;
754 
755 	return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
756 }
757 
758 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
759 {
760 	struct sock *rtnl = net->rtnl;
761 
762 	return nlmsg_unicast(rtnl, skb, pid);
763 }
764 EXPORT_SYMBOL(rtnl_unicast);
765 
766 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
767 		 const struct nlmsghdr *nlh, gfp_t flags)
768 {
769 	struct sock *rtnl = net->rtnl;
770 
771 	nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
772 }
773 EXPORT_SYMBOL(rtnl_notify);
774 
775 void rtnl_set_sk_err(struct net *net, u32 group, int error)
776 {
777 	struct sock *rtnl = net->rtnl;
778 
779 	netlink_set_err(rtnl, 0, group, error);
780 }
781 EXPORT_SYMBOL(rtnl_set_sk_err);
782 
783 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
784 {
785 	struct nlattr *mx;
786 	int i, valid = 0;
787 
788 	/* nothing is dumped for dst_default_metrics, so just skip the loop */
789 	if (metrics == dst_default_metrics.metrics)
790 		return 0;
791 
792 	mx = nla_nest_start_noflag(skb, RTA_METRICS);
793 	if (mx == NULL)
794 		return -ENOBUFS;
795 
796 	for (i = 0; i < RTAX_MAX; i++) {
797 		if (metrics[i]) {
798 			if (i == RTAX_CC_ALGO - 1) {
799 				char tmp[TCP_CA_NAME_MAX], *name;
800 
801 				name = tcp_ca_get_name_by_key(metrics[i], tmp);
802 				if (!name)
803 					continue;
804 				if (nla_put_string(skb, i + 1, name))
805 					goto nla_put_failure;
806 			} else if (i == RTAX_FEATURES - 1) {
807 				u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
808 
809 				if (!user_features)
810 					continue;
811 				BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
812 				if (nla_put_u32(skb, i + 1, user_features))
813 					goto nla_put_failure;
814 			} else {
815 				if (nla_put_u32(skb, i + 1, metrics[i]))
816 					goto nla_put_failure;
817 			}
818 			valid++;
819 		}
820 	}
821 
822 	if (!valid) {
823 		nla_nest_cancel(skb, mx);
824 		return 0;
825 	}
826 
827 	return nla_nest_end(skb, mx);
828 
829 nla_put_failure:
830 	nla_nest_cancel(skb, mx);
831 	return -EMSGSIZE;
832 }
833 EXPORT_SYMBOL(rtnetlink_put_metrics);
834 
835 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
836 		       long expires, u32 error)
837 {
838 	struct rta_cacheinfo ci = {
839 		.rta_error = error,
840 		.rta_id =  id,
841 	};
842 
843 	if (dst) {
844 		ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
845 		ci.rta_used = dst->__use;
846 		ci.rta_clntref = rcuref_read(&dst->__rcuref);
847 	}
848 	if (expires) {
849 		unsigned long clock;
850 
851 		clock = jiffies_to_clock_t(abs(expires));
852 		clock = min_t(unsigned long, clock, INT_MAX);
853 		ci.rta_expires = (expires > 0) ? clock : -clock;
854 	}
855 	return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
856 }
857 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
858 
859 static void set_operstate(struct net_device *dev, unsigned char transition)
860 {
861 	unsigned char operstate = dev->operstate;
862 
863 	switch (transition) {
864 	case IF_OPER_UP:
865 		if ((operstate == IF_OPER_DORMANT ||
866 		     operstate == IF_OPER_TESTING ||
867 		     operstate == IF_OPER_UNKNOWN) &&
868 		    !netif_dormant(dev) && !netif_testing(dev))
869 			operstate = IF_OPER_UP;
870 		break;
871 
872 	case IF_OPER_TESTING:
873 		if (netif_oper_up(dev))
874 			operstate = IF_OPER_TESTING;
875 		break;
876 
877 	case IF_OPER_DORMANT:
878 		if (netif_oper_up(dev))
879 			operstate = IF_OPER_DORMANT;
880 		break;
881 	}
882 
883 	if (dev->operstate != operstate) {
884 		write_lock(&dev_base_lock);
885 		dev->operstate = operstate;
886 		write_unlock(&dev_base_lock);
887 		netdev_state_change(dev);
888 	}
889 }
890 
891 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
892 {
893 	return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
894 	       (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
895 }
896 
897 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
898 					   const struct ifinfomsg *ifm)
899 {
900 	unsigned int flags = ifm->ifi_flags;
901 
902 	/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
903 	if (ifm->ifi_change)
904 		flags = (flags & ifm->ifi_change) |
905 			(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
906 
907 	return flags;
908 }
909 
910 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
911 				 const struct rtnl_link_stats64 *b)
912 {
913 	a->rx_packets = b->rx_packets;
914 	a->tx_packets = b->tx_packets;
915 	a->rx_bytes = b->rx_bytes;
916 	a->tx_bytes = b->tx_bytes;
917 	a->rx_errors = b->rx_errors;
918 	a->tx_errors = b->tx_errors;
919 	a->rx_dropped = b->rx_dropped;
920 	a->tx_dropped = b->tx_dropped;
921 
922 	a->multicast = b->multicast;
923 	a->collisions = b->collisions;
924 
925 	a->rx_length_errors = b->rx_length_errors;
926 	a->rx_over_errors = b->rx_over_errors;
927 	a->rx_crc_errors = b->rx_crc_errors;
928 	a->rx_frame_errors = b->rx_frame_errors;
929 	a->rx_fifo_errors = b->rx_fifo_errors;
930 	a->rx_missed_errors = b->rx_missed_errors;
931 
932 	a->tx_aborted_errors = b->tx_aborted_errors;
933 	a->tx_carrier_errors = b->tx_carrier_errors;
934 	a->tx_fifo_errors = b->tx_fifo_errors;
935 	a->tx_heartbeat_errors = b->tx_heartbeat_errors;
936 	a->tx_window_errors = b->tx_window_errors;
937 
938 	a->rx_compressed = b->rx_compressed;
939 	a->tx_compressed = b->tx_compressed;
940 
941 	a->rx_nohandler = b->rx_nohandler;
942 }
943 
944 /* All VF info */
945 static inline int rtnl_vfinfo_size(const struct net_device *dev,
946 				   u32 ext_filter_mask)
947 {
948 	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
949 		int num_vfs = dev_num_vf(dev->dev.parent);
950 		size_t size = nla_total_size(0);
951 		size += num_vfs *
952 			(nla_total_size(0) +
953 			 nla_total_size(sizeof(struct ifla_vf_mac)) +
954 			 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
955 			 nla_total_size(sizeof(struct ifla_vf_vlan)) +
956 			 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
957 			 nla_total_size(MAX_VLAN_LIST_LEN *
958 					sizeof(struct ifla_vf_vlan_info)) +
959 			 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
960 			 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
961 			 nla_total_size(sizeof(struct ifla_vf_rate)) +
962 			 nla_total_size(sizeof(struct ifla_vf_link_state)) +
963 			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
964 			 nla_total_size(0) + /* nest IFLA_VF_STATS */
965 			 /* IFLA_VF_STATS_RX_PACKETS */
966 			 nla_total_size_64bit(sizeof(__u64)) +
967 			 /* IFLA_VF_STATS_TX_PACKETS */
968 			 nla_total_size_64bit(sizeof(__u64)) +
969 			 /* IFLA_VF_STATS_RX_BYTES */
970 			 nla_total_size_64bit(sizeof(__u64)) +
971 			 /* IFLA_VF_STATS_TX_BYTES */
972 			 nla_total_size_64bit(sizeof(__u64)) +
973 			 /* IFLA_VF_STATS_BROADCAST */
974 			 nla_total_size_64bit(sizeof(__u64)) +
975 			 /* IFLA_VF_STATS_MULTICAST */
976 			 nla_total_size_64bit(sizeof(__u64)) +
977 			 /* IFLA_VF_STATS_RX_DROPPED */
978 			 nla_total_size_64bit(sizeof(__u64)) +
979 			 /* IFLA_VF_STATS_TX_DROPPED */
980 			 nla_total_size_64bit(sizeof(__u64)) +
981 			 nla_total_size(sizeof(struct ifla_vf_trust)));
982 		return size;
983 	} else
984 		return 0;
985 }
986 
987 static size_t rtnl_port_size(const struct net_device *dev,
988 			     u32 ext_filter_mask)
989 {
990 	size_t port_size = nla_total_size(4)		/* PORT_VF */
991 		+ nla_total_size(PORT_PROFILE_MAX)	/* PORT_PROFILE */
992 		+ nla_total_size(PORT_UUID_MAX)		/* PORT_INSTANCE_UUID */
993 		+ nla_total_size(PORT_UUID_MAX)		/* PORT_HOST_UUID */
994 		+ nla_total_size(1)			/* PROT_VDP_REQUEST */
995 		+ nla_total_size(2);			/* PORT_VDP_RESPONSE */
996 	size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
997 	size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
998 		+ port_size;
999 	size_t port_self_size = nla_total_size(sizeof(struct nlattr))
1000 		+ port_size;
1001 
1002 	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1003 	    !(ext_filter_mask & RTEXT_FILTER_VF))
1004 		return 0;
1005 	if (dev_num_vf(dev->dev.parent))
1006 		return port_self_size + vf_ports_size +
1007 			vf_port_size * dev_num_vf(dev->dev.parent);
1008 	else
1009 		return port_self_size;
1010 }
1011 
1012 static size_t rtnl_xdp_size(void)
1013 {
1014 	size_t xdp_size = nla_total_size(0) +	/* nest IFLA_XDP */
1015 			  nla_total_size(1) +	/* XDP_ATTACHED */
1016 			  nla_total_size(4) +	/* XDP_PROG_ID (or 1st mode) */
1017 			  nla_total_size(4);	/* XDP_<mode>_PROG_ID */
1018 
1019 	return xdp_size;
1020 }
1021 
1022 static size_t rtnl_prop_list_size(const struct net_device *dev)
1023 {
1024 	struct netdev_name_node *name_node;
1025 	size_t size;
1026 
1027 	if (list_empty(&dev->name_node->list))
1028 		return 0;
1029 	size = nla_total_size(0);
1030 	list_for_each_entry(name_node, &dev->name_node->list, list)
1031 		size += nla_total_size(ALTIFNAMSIZ);
1032 	return size;
1033 }
1034 
1035 static size_t rtnl_proto_down_size(const struct net_device *dev)
1036 {
1037 	size_t size = nla_total_size(1);
1038 
1039 	if (dev->proto_down_reason)
1040 		size += nla_total_size(0) + nla_total_size(4);
1041 
1042 	return size;
1043 }
1044 
1045 static size_t rtnl_devlink_port_size(const struct net_device *dev)
1046 {
1047 	size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1048 
1049 	if (dev->devlink_port)
1050 		size += devlink_nl_port_handle_size(dev->devlink_port);
1051 
1052 	return size;
1053 }
1054 
1055 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1056 				     u32 ext_filter_mask)
1057 {
1058 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1059 	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1060 	       + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1061 	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1062 	       + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1063 	       + nla_total_size(sizeof(struct rtnl_link_stats))
1064 	       + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1065 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1066 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1067 	       + nla_total_size(4) /* IFLA_TXQLEN */
1068 	       + nla_total_size(4) /* IFLA_WEIGHT */
1069 	       + nla_total_size(4) /* IFLA_MTU */
1070 	       + nla_total_size(4) /* IFLA_LINK */
1071 	       + nla_total_size(4) /* IFLA_MASTER */
1072 	       + nla_total_size(1) /* IFLA_CARRIER */
1073 	       + nla_total_size(4) /* IFLA_PROMISCUITY */
1074 	       + nla_total_size(4) /* IFLA_ALLMULTI */
1075 	       + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1076 	       + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1077 	       + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1078 	       + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1079 	       + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1080 	       + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
1081 	       + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
1082 	       + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1083 	       + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1084 	       + nla_total_size(1) /* IFLA_OPERSTATE */
1085 	       + nla_total_size(1) /* IFLA_LINKMODE */
1086 	       + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1087 	       + nla_total_size(4) /* IFLA_LINK_NETNSID */
1088 	       + nla_total_size(4) /* IFLA_GROUP */
1089 	       + nla_total_size(ext_filter_mask
1090 			        & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1091 	       + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1092 	       + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1093 	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1094 	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1095 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1096 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1097 	       + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1098 	       + rtnl_xdp_size() /* IFLA_XDP */
1099 	       + nla_total_size(4)  /* IFLA_EVENT */
1100 	       + nla_total_size(4)  /* IFLA_NEW_NETNSID */
1101 	       + nla_total_size(4)  /* IFLA_NEW_IFINDEX */
1102 	       + rtnl_proto_down_size(dev)  /* proto down */
1103 	       + nla_total_size(4)  /* IFLA_TARGET_NETNSID */
1104 	       + nla_total_size(4)  /* IFLA_CARRIER_UP_COUNT */
1105 	       + nla_total_size(4)  /* IFLA_CARRIER_DOWN_COUNT */
1106 	       + nla_total_size(4)  /* IFLA_MIN_MTU */
1107 	       + nla_total_size(4)  /* IFLA_MAX_MTU */
1108 	       + rtnl_prop_list_size(dev)
1109 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1110 	       + rtnl_devlink_port_size(dev)
1111 	       + 0;
1112 }
1113 
1114 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1115 {
1116 	struct nlattr *vf_ports;
1117 	struct nlattr *vf_port;
1118 	int vf;
1119 	int err;
1120 
1121 	vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1122 	if (!vf_ports)
1123 		return -EMSGSIZE;
1124 
1125 	for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1126 		vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1127 		if (!vf_port)
1128 			goto nla_put_failure;
1129 		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1130 			goto nla_put_failure;
1131 		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1132 		if (err == -EMSGSIZE)
1133 			goto nla_put_failure;
1134 		if (err) {
1135 			nla_nest_cancel(skb, vf_port);
1136 			continue;
1137 		}
1138 		nla_nest_end(skb, vf_port);
1139 	}
1140 
1141 	nla_nest_end(skb, vf_ports);
1142 
1143 	return 0;
1144 
1145 nla_put_failure:
1146 	nla_nest_cancel(skb, vf_ports);
1147 	return -EMSGSIZE;
1148 }
1149 
1150 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1151 {
1152 	struct nlattr *port_self;
1153 	int err;
1154 
1155 	port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1156 	if (!port_self)
1157 		return -EMSGSIZE;
1158 
1159 	err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1160 	if (err) {
1161 		nla_nest_cancel(skb, port_self);
1162 		return (err == -EMSGSIZE) ? err : 0;
1163 	}
1164 
1165 	nla_nest_end(skb, port_self);
1166 
1167 	return 0;
1168 }
1169 
1170 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1171 			  u32 ext_filter_mask)
1172 {
1173 	int err;
1174 
1175 	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1176 	    !(ext_filter_mask & RTEXT_FILTER_VF))
1177 		return 0;
1178 
1179 	err = rtnl_port_self_fill(skb, dev);
1180 	if (err)
1181 		return err;
1182 
1183 	if (dev_num_vf(dev->dev.parent)) {
1184 		err = rtnl_vf_ports_fill(skb, dev);
1185 		if (err)
1186 			return err;
1187 	}
1188 
1189 	return 0;
1190 }
1191 
1192 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1193 {
1194 	int err;
1195 	struct netdev_phys_item_id ppid;
1196 
1197 	err = dev_get_phys_port_id(dev, &ppid);
1198 	if (err) {
1199 		if (err == -EOPNOTSUPP)
1200 			return 0;
1201 		return err;
1202 	}
1203 
1204 	if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1205 		return -EMSGSIZE;
1206 
1207 	return 0;
1208 }
1209 
1210 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1211 {
1212 	char name[IFNAMSIZ];
1213 	int err;
1214 
1215 	err = dev_get_phys_port_name(dev, name, sizeof(name));
1216 	if (err) {
1217 		if (err == -EOPNOTSUPP)
1218 			return 0;
1219 		return err;
1220 	}
1221 
1222 	if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1223 		return -EMSGSIZE;
1224 
1225 	return 0;
1226 }
1227 
1228 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1229 {
1230 	struct netdev_phys_item_id ppid = { };
1231 	int err;
1232 
1233 	err = dev_get_port_parent_id(dev, &ppid, false);
1234 	if (err) {
1235 		if (err == -EOPNOTSUPP)
1236 			return 0;
1237 		return err;
1238 	}
1239 
1240 	if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1241 		return -EMSGSIZE;
1242 
1243 	return 0;
1244 }
1245 
1246 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1247 					      struct net_device *dev)
1248 {
1249 	struct rtnl_link_stats64 *sp;
1250 	struct nlattr *attr;
1251 
1252 	attr = nla_reserve_64bit(skb, IFLA_STATS64,
1253 				 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1254 	if (!attr)
1255 		return -EMSGSIZE;
1256 
1257 	sp = nla_data(attr);
1258 	dev_get_stats(dev, sp);
1259 
1260 	attr = nla_reserve(skb, IFLA_STATS,
1261 			   sizeof(struct rtnl_link_stats));
1262 	if (!attr)
1263 		return -EMSGSIZE;
1264 
1265 	copy_rtnl_link_stats(nla_data(attr), sp);
1266 
1267 	return 0;
1268 }
1269 
1270 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1271 					       struct net_device *dev,
1272 					       int vfs_num,
1273 					       struct nlattr *vfinfo)
1274 {
1275 	struct ifla_vf_rss_query_en vf_rss_query_en;
1276 	struct nlattr *vf, *vfstats, *vfvlanlist;
1277 	struct ifla_vf_link_state vf_linkstate;
1278 	struct ifla_vf_vlan_info vf_vlan_info;
1279 	struct ifla_vf_spoofchk vf_spoofchk;
1280 	struct ifla_vf_tx_rate vf_tx_rate;
1281 	struct ifla_vf_stats vf_stats;
1282 	struct ifla_vf_trust vf_trust;
1283 	struct ifla_vf_vlan vf_vlan;
1284 	struct ifla_vf_rate vf_rate;
1285 	struct ifla_vf_mac vf_mac;
1286 	struct ifla_vf_broadcast vf_broadcast;
1287 	struct ifla_vf_info ivi;
1288 	struct ifla_vf_guid node_guid;
1289 	struct ifla_vf_guid port_guid;
1290 
1291 	memset(&ivi, 0, sizeof(ivi));
1292 
1293 	/* Not all SR-IOV capable drivers support the
1294 	 * spoofcheck and "RSS query enable" query.  Preset to
1295 	 * -1 so the user space tool can detect that the driver
1296 	 * didn't report anything.
1297 	 */
1298 	ivi.spoofchk = -1;
1299 	ivi.rss_query_en = -1;
1300 	ivi.trusted = -1;
1301 	/* The default value for VF link state is "auto"
1302 	 * IFLA_VF_LINK_STATE_AUTO which equals zero
1303 	 */
1304 	ivi.linkstate = 0;
1305 	/* VLAN Protocol by default is 802.1Q */
1306 	ivi.vlan_proto = htons(ETH_P_8021Q);
1307 	if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1308 		return 0;
1309 
1310 	memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1311 	memset(&node_guid, 0, sizeof(node_guid));
1312 	memset(&port_guid, 0, sizeof(port_guid));
1313 
1314 	vf_mac.vf =
1315 		vf_vlan.vf =
1316 		vf_vlan_info.vf =
1317 		vf_rate.vf =
1318 		vf_tx_rate.vf =
1319 		vf_spoofchk.vf =
1320 		vf_linkstate.vf =
1321 		vf_rss_query_en.vf =
1322 		vf_trust.vf =
1323 		node_guid.vf =
1324 		port_guid.vf = ivi.vf;
1325 
1326 	memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1327 	memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1328 	vf_vlan.vlan = ivi.vlan;
1329 	vf_vlan.qos = ivi.qos;
1330 	vf_vlan_info.vlan = ivi.vlan;
1331 	vf_vlan_info.qos = ivi.qos;
1332 	vf_vlan_info.vlan_proto = ivi.vlan_proto;
1333 	vf_tx_rate.rate = ivi.max_tx_rate;
1334 	vf_rate.min_tx_rate = ivi.min_tx_rate;
1335 	vf_rate.max_tx_rate = ivi.max_tx_rate;
1336 	vf_spoofchk.setting = ivi.spoofchk;
1337 	vf_linkstate.link_state = ivi.linkstate;
1338 	vf_rss_query_en.setting = ivi.rss_query_en;
1339 	vf_trust.setting = ivi.trusted;
1340 	vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1341 	if (!vf)
1342 		goto nla_put_vfinfo_failure;
1343 	if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1344 	    nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1345 	    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1346 	    nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1347 		    &vf_rate) ||
1348 	    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1349 		    &vf_tx_rate) ||
1350 	    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1351 		    &vf_spoofchk) ||
1352 	    nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1353 		    &vf_linkstate) ||
1354 	    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1355 		    sizeof(vf_rss_query_en),
1356 		    &vf_rss_query_en) ||
1357 	    nla_put(skb, IFLA_VF_TRUST,
1358 		    sizeof(vf_trust), &vf_trust))
1359 		goto nla_put_vf_failure;
1360 
1361 	if (dev->netdev_ops->ndo_get_vf_guid &&
1362 	    !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1363 					      &port_guid)) {
1364 		if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1365 			    &node_guid) ||
1366 		    nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1367 			    &port_guid))
1368 			goto nla_put_vf_failure;
1369 	}
1370 	vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1371 	if (!vfvlanlist)
1372 		goto nla_put_vf_failure;
1373 	if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1374 		    &vf_vlan_info)) {
1375 		nla_nest_cancel(skb, vfvlanlist);
1376 		goto nla_put_vf_failure;
1377 	}
1378 	nla_nest_end(skb, vfvlanlist);
1379 	memset(&vf_stats, 0, sizeof(vf_stats));
1380 	if (dev->netdev_ops->ndo_get_vf_stats)
1381 		dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1382 						&vf_stats);
1383 	vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1384 	if (!vfstats)
1385 		goto nla_put_vf_failure;
1386 	if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1387 			      vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1388 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1389 			      vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1390 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1391 			      vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1392 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1393 			      vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1394 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1395 			      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1396 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1397 			      vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1398 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1399 			      vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1400 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1401 			      vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1402 		nla_nest_cancel(skb, vfstats);
1403 		goto nla_put_vf_failure;
1404 	}
1405 	nla_nest_end(skb, vfstats);
1406 	nla_nest_end(skb, vf);
1407 	return 0;
1408 
1409 nla_put_vf_failure:
1410 	nla_nest_cancel(skb, vf);
1411 nla_put_vfinfo_failure:
1412 	nla_nest_cancel(skb, vfinfo);
1413 	return -EMSGSIZE;
1414 }
1415 
1416 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1417 					   struct net_device *dev,
1418 					   u32 ext_filter_mask)
1419 {
1420 	struct nlattr *vfinfo;
1421 	int i, num_vfs;
1422 
1423 	if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1424 		return 0;
1425 
1426 	num_vfs = dev_num_vf(dev->dev.parent);
1427 	if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1428 		return -EMSGSIZE;
1429 
1430 	if (!dev->netdev_ops->ndo_get_vf_config)
1431 		return 0;
1432 
1433 	vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1434 	if (!vfinfo)
1435 		return -EMSGSIZE;
1436 
1437 	for (i = 0; i < num_vfs; i++) {
1438 		if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1439 			return -EMSGSIZE;
1440 	}
1441 
1442 	nla_nest_end(skb, vfinfo);
1443 	return 0;
1444 }
1445 
1446 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1447 {
1448 	struct rtnl_link_ifmap map;
1449 
1450 	memset(&map, 0, sizeof(map));
1451 	map.mem_start   = dev->mem_start;
1452 	map.mem_end     = dev->mem_end;
1453 	map.base_addr   = dev->base_addr;
1454 	map.irq         = dev->irq;
1455 	map.dma         = dev->dma;
1456 	map.port        = dev->if_port;
1457 
1458 	if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1459 		return -EMSGSIZE;
1460 
1461 	return 0;
1462 }
1463 
1464 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1465 {
1466 	const struct bpf_prog *generic_xdp_prog;
1467 
1468 	ASSERT_RTNL();
1469 
1470 	generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1471 	if (!generic_xdp_prog)
1472 		return 0;
1473 	return generic_xdp_prog->aux->id;
1474 }
1475 
1476 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1477 {
1478 	return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1479 }
1480 
1481 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1482 {
1483 	return dev_xdp_prog_id(dev, XDP_MODE_HW);
1484 }
1485 
1486 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1487 			       u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1488 			       u32 (*get_prog_id)(struct net_device *dev))
1489 {
1490 	u32 curr_id;
1491 	int err;
1492 
1493 	curr_id = get_prog_id(dev);
1494 	if (!curr_id)
1495 		return 0;
1496 
1497 	*prog_id = curr_id;
1498 	err = nla_put_u32(skb, attr, curr_id);
1499 	if (err)
1500 		return err;
1501 
1502 	if (*mode != XDP_ATTACHED_NONE)
1503 		*mode = XDP_ATTACHED_MULTI;
1504 	else
1505 		*mode = tgt_mode;
1506 
1507 	return 0;
1508 }
1509 
1510 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1511 {
1512 	struct nlattr *xdp;
1513 	u32 prog_id;
1514 	int err;
1515 	u8 mode;
1516 
1517 	xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1518 	if (!xdp)
1519 		return -EMSGSIZE;
1520 
1521 	prog_id = 0;
1522 	mode = XDP_ATTACHED_NONE;
1523 	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1524 				  IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1525 	if (err)
1526 		goto err_cancel;
1527 	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1528 				  IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1529 	if (err)
1530 		goto err_cancel;
1531 	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1532 				  IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1533 	if (err)
1534 		goto err_cancel;
1535 
1536 	err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1537 	if (err)
1538 		goto err_cancel;
1539 
1540 	if (prog_id && mode != XDP_ATTACHED_MULTI) {
1541 		err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1542 		if (err)
1543 			goto err_cancel;
1544 	}
1545 
1546 	nla_nest_end(skb, xdp);
1547 	return 0;
1548 
1549 err_cancel:
1550 	nla_nest_cancel(skb, xdp);
1551 	return err;
1552 }
1553 
1554 static u32 rtnl_get_event(unsigned long event)
1555 {
1556 	u32 rtnl_event_type = IFLA_EVENT_NONE;
1557 
1558 	switch (event) {
1559 	case NETDEV_REBOOT:
1560 		rtnl_event_type = IFLA_EVENT_REBOOT;
1561 		break;
1562 	case NETDEV_FEAT_CHANGE:
1563 		rtnl_event_type = IFLA_EVENT_FEATURES;
1564 		break;
1565 	case NETDEV_BONDING_FAILOVER:
1566 		rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1567 		break;
1568 	case NETDEV_NOTIFY_PEERS:
1569 		rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1570 		break;
1571 	case NETDEV_RESEND_IGMP:
1572 		rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1573 		break;
1574 	case NETDEV_CHANGEINFODATA:
1575 		rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1576 		break;
1577 	default:
1578 		break;
1579 	}
1580 
1581 	return rtnl_event_type;
1582 }
1583 
1584 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1585 {
1586 	const struct net_device *upper_dev;
1587 	int ret = 0;
1588 
1589 	rcu_read_lock();
1590 
1591 	upper_dev = netdev_master_upper_dev_get_rcu(dev);
1592 	if (upper_dev)
1593 		ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1594 
1595 	rcu_read_unlock();
1596 	return ret;
1597 }
1598 
1599 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1600 			  bool force)
1601 {
1602 	int ifindex = dev_get_iflink(dev);
1603 
1604 	if (force || dev->ifindex != ifindex)
1605 		return nla_put_u32(skb, IFLA_LINK, ifindex);
1606 
1607 	return 0;
1608 }
1609 
1610 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1611 					      struct net_device *dev)
1612 {
1613 	char buf[IFALIASZ];
1614 	int ret;
1615 
1616 	ret = dev_get_alias(dev, buf, sizeof(buf));
1617 	return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1618 }
1619 
1620 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1621 				  const struct net_device *dev,
1622 				  struct net *src_net, gfp_t gfp)
1623 {
1624 	bool put_iflink = false;
1625 
1626 	if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1627 		struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1628 
1629 		if (!net_eq(dev_net(dev), link_net)) {
1630 			int id = peernet2id_alloc(src_net, link_net, gfp);
1631 
1632 			if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1633 				return -EMSGSIZE;
1634 
1635 			put_iflink = true;
1636 		}
1637 	}
1638 
1639 	return nla_put_iflink(skb, dev, put_iflink);
1640 }
1641 
1642 static int rtnl_fill_link_af(struct sk_buff *skb,
1643 			     const struct net_device *dev,
1644 			     u32 ext_filter_mask)
1645 {
1646 	const struct rtnl_af_ops *af_ops;
1647 	struct nlattr *af_spec;
1648 
1649 	af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1650 	if (!af_spec)
1651 		return -EMSGSIZE;
1652 
1653 	list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1654 		struct nlattr *af;
1655 		int err;
1656 
1657 		if (!af_ops->fill_link_af)
1658 			continue;
1659 
1660 		af = nla_nest_start_noflag(skb, af_ops->family);
1661 		if (!af)
1662 			return -EMSGSIZE;
1663 
1664 		err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1665 		/*
1666 		 * Caller may return ENODATA to indicate that there
1667 		 * was no data to be dumped. This is not an error, it
1668 		 * means we should trim the attribute header and
1669 		 * continue.
1670 		 */
1671 		if (err == -ENODATA)
1672 			nla_nest_cancel(skb, af);
1673 		else if (err < 0)
1674 			return -EMSGSIZE;
1675 
1676 		nla_nest_end(skb, af);
1677 	}
1678 
1679 	nla_nest_end(skb, af_spec);
1680 	return 0;
1681 }
1682 
1683 static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1684 				 const struct net_device *dev)
1685 {
1686 	struct netdev_name_node *name_node;
1687 	int count = 0;
1688 
1689 	list_for_each_entry(name_node, &dev->name_node->list, list) {
1690 		if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1691 			return -EMSGSIZE;
1692 		count++;
1693 	}
1694 	return count;
1695 }
1696 
1697 static int rtnl_fill_prop_list(struct sk_buff *skb,
1698 			       const struct net_device *dev)
1699 {
1700 	struct nlattr *prop_list;
1701 	int ret;
1702 
1703 	prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1704 	if (!prop_list)
1705 		return -EMSGSIZE;
1706 
1707 	ret = rtnl_fill_alt_ifnames(skb, dev);
1708 	if (ret <= 0)
1709 		goto nest_cancel;
1710 
1711 	nla_nest_end(skb, prop_list);
1712 	return 0;
1713 
1714 nest_cancel:
1715 	nla_nest_cancel(skb, prop_list);
1716 	return ret;
1717 }
1718 
1719 static int rtnl_fill_proto_down(struct sk_buff *skb,
1720 				const struct net_device *dev)
1721 {
1722 	struct nlattr *pr;
1723 	u32 preason;
1724 
1725 	if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1726 		goto nla_put_failure;
1727 
1728 	preason = dev->proto_down_reason;
1729 	if (!preason)
1730 		return 0;
1731 
1732 	pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1733 	if (!pr)
1734 		return -EMSGSIZE;
1735 
1736 	if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1737 		nla_nest_cancel(skb, pr);
1738 		goto nla_put_failure;
1739 	}
1740 
1741 	nla_nest_end(skb, pr);
1742 	return 0;
1743 
1744 nla_put_failure:
1745 	return -EMSGSIZE;
1746 }
1747 
1748 static int rtnl_fill_devlink_port(struct sk_buff *skb,
1749 				  const struct net_device *dev)
1750 {
1751 	struct nlattr *devlink_port_nest;
1752 	int ret;
1753 
1754 	devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1755 	if (!devlink_port_nest)
1756 		return -EMSGSIZE;
1757 
1758 	if (dev->devlink_port) {
1759 		ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1760 		if (ret < 0)
1761 			goto nest_cancel;
1762 	}
1763 
1764 	nla_nest_end(skb, devlink_port_nest);
1765 	return 0;
1766 
1767 nest_cancel:
1768 	nla_nest_cancel(skb, devlink_port_nest);
1769 	return ret;
1770 }
1771 
1772 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1773 			    struct net_device *dev, struct net *src_net,
1774 			    int type, u32 pid, u32 seq, u32 change,
1775 			    unsigned int flags, u32 ext_filter_mask,
1776 			    u32 event, int *new_nsid, int new_ifindex,
1777 			    int tgt_netnsid, gfp_t gfp)
1778 {
1779 	struct ifinfomsg *ifm;
1780 	struct nlmsghdr *nlh;
1781 	struct Qdisc *qdisc;
1782 
1783 	ASSERT_RTNL();
1784 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1785 	if (nlh == NULL)
1786 		return -EMSGSIZE;
1787 
1788 	ifm = nlmsg_data(nlh);
1789 	ifm->ifi_family = AF_UNSPEC;
1790 	ifm->__ifi_pad = 0;
1791 	ifm->ifi_type = dev->type;
1792 	ifm->ifi_index = dev->ifindex;
1793 	ifm->ifi_flags = dev_get_flags(dev);
1794 	ifm->ifi_change = change;
1795 
1796 	if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1797 		goto nla_put_failure;
1798 
1799 	qdisc = rtnl_dereference(dev->qdisc);
1800 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1801 	    nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1802 	    nla_put_u8(skb, IFLA_OPERSTATE,
1803 		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1804 	    nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1805 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1806 	    nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1807 	    nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1808 	    nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1809 	    nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1810 	    nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
1811 	    nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1812 	    nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1813 	    nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1814 	    nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1815 	    nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) ||
1816 	    nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) ||
1817 	    nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1818 	    nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1819 #ifdef CONFIG_RPS
1820 	    nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1821 #endif
1822 	    put_master_ifindex(skb, dev) ||
1823 	    nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1824 	    (qdisc &&
1825 	     nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1826 	    nla_put_ifalias(skb, dev) ||
1827 	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1828 			atomic_read(&dev->carrier_up_count) +
1829 			atomic_read(&dev->carrier_down_count)) ||
1830 	    nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1831 			atomic_read(&dev->carrier_up_count)) ||
1832 	    nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1833 			atomic_read(&dev->carrier_down_count)))
1834 		goto nla_put_failure;
1835 
1836 	if (rtnl_fill_proto_down(skb, dev))
1837 		goto nla_put_failure;
1838 
1839 	if (event != IFLA_EVENT_NONE) {
1840 		if (nla_put_u32(skb, IFLA_EVENT, event))
1841 			goto nla_put_failure;
1842 	}
1843 
1844 	if (rtnl_fill_link_ifmap(skb, dev))
1845 		goto nla_put_failure;
1846 
1847 	if (dev->addr_len) {
1848 		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1849 		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1850 			goto nla_put_failure;
1851 	}
1852 
1853 	if (rtnl_phys_port_id_fill(skb, dev))
1854 		goto nla_put_failure;
1855 
1856 	if (rtnl_phys_port_name_fill(skb, dev))
1857 		goto nla_put_failure;
1858 
1859 	if (rtnl_phys_switch_id_fill(skb, dev))
1860 		goto nla_put_failure;
1861 
1862 	if (rtnl_fill_stats(skb, dev))
1863 		goto nla_put_failure;
1864 
1865 	if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1866 		goto nla_put_failure;
1867 
1868 	if (rtnl_port_fill(skb, dev, ext_filter_mask))
1869 		goto nla_put_failure;
1870 
1871 	if (rtnl_xdp_fill(skb, dev))
1872 		goto nla_put_failure;
1873 
1874 	if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1875 		if (rtnl_link_fill(skb, dev) < 0)
1876 			goto nla_put_failure;
1877 	}
1878 
1879 	if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1880 		goto nla_put_failure;
1881 
1882 	if (new_nsid &&
1883 	    nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1884 		goto nla_put_failure;
1885 	if (new_ifindex &&
1886 	    nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1887 		goto nla_put_failure;
1888 
1889 	if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1890 	    nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1891 		goto nla_put_failure;
1892 
1893 	rcu_read_lock();
1894 	if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1895 		goto nla_put_failure_rcu;
1896 	rcu_read_unlock();
1897 
1898 	if (rtnl_fill_prop_list(skb, dev))
1899 		goto nla_put_failure;
1900 
1901 	if (dev->dev.parent &&
1902 	    nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1903 			   dev_name(dev->dev.parent)))
1904 		goto nla_put_failure;
1905 
1906 	if (dev->dev.parent && dev->dev.parent->bus &&
1907 	    nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1908 			   dev->dev.parent->bus->name))
1909 		goto nla_put_failure;
1910 
1911 	if (rtnl_fill_devlink_port(skb, dev))
1912 		goto nla_put_failure;
1913 
1914 	nlmsg_end(skb, nlh);
1915 	return 0;
1916 
1917 nla_put_failure_rcu:
1918 	rcu_read_unlock();
1919 nla_put_failure:
1920 	nlmsg_cancel(skb, nlh);
1921 	return -EMSGSIZE;
1922 }
1923 
1924 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1925 	[IFLA_IFNAME]		= { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1926 	[IFLA_ADDRESS]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1927 	[IFLA_BROADCAST]	= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1928 	[IFLA_MAP]		= { .len = sizeof(struct rtnl_link_ifmap) },
1929 	[IFLA_MTU]		= { .type = NLA_U32 },
1930 	[IFLA_LINK]		= { .type = NLA_U32 },
1931 	[IFLA_MASTER]		= { .type = NLA_U32 },
1932 	[IFLA_CARRIER]		= { .type = NLA_U8 },
1933 	[IFLA_TXQLEN]		= { .type = NLA_U32 },
1934 	[IFLA_WEIGHT]		= { .type = NLA_U32 },
1935 	[IFLA_OPERSTATE]	= { .type = NLA_U8 },
1936 	[IFLA_LINKMODE]		= { .type = NLA_U8 },
1937 	[IFLA_LINKINFO]		= { .type = NLA_NESTED },
1938 	[IFLA_NET_NS_PID]	= { .type = NLA_U32 },
1939 	[IFLA_NET_NS_FD]	= { .type = NLA_U32 },
1940 	/* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1941 	 * allow 0-length string (needed to remove an alias).
1942 	 */
1943 	[IFLA_IFALIAS]	        = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1944 	[IFLA_VFINFO_LIST]	= {. type = NLA_NESTED },
1945 	[IFLA_VF_PORTS]		= { .type = NLA_NESTED },
1946 	[IFLA_PORT_SELF]	= { .type = NLA_NESTED },
1947 	[IFLA_AF_SPEC]		= { .type = NLA_NESTED },
1948 	[IFLA_EXT_MASK]		= { .type = NLA_U32 },
1949 	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
1950 	[IFLA_NUM_TX_QUEUES]	= { .type = NLA_U32 },
1951 	[IFLA_NUM_RX_QUEUES]	= { .type = NLA_U32 },
1952 	[IFLA_GSO_MAX_SEGS]	= { .type = NLA_U32 },
1953 	[IFLA_GSO_MAX_SIZE]	= { .type = NLA_U32 },
1954 	[IFLA_PHYS_PORT_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1955 	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
1956 	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1957 	[IFLA_LINK_NETNSID]	= { .type = NLA_S32 },
1958 	[IFLA_PROTO_DOWN]	= { .type = NLA_U8 },
1959 	[IFLA_XDP]		= { .type = NLA_NESTED },
1960 	[IFLA_EVENT]		= { .type = NLA_U32 },
1961 	[IFLA_GROUP]		= { .type = NLA_U32 },
1962 	[IFLA_TARGET_NETNSID]	= { .type = NLA_S32 },
1963 	[IFLA_CARRIER_UP_COUNT]	= { .type = NLA_U32 },
1964 	[IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1965 	[IFLA_MIN_MTU]		= { .type = NLA_U32 },
1966 	[IFLA_MAX_MTU]		= { .type = NLA_U32 },
1967 	[IFLA_PROP_LIST]	= { .type = NLA_NESTED },
1968 	[IFLA_ALT_IFNAME]	= { .type = NLA_STRING,
1969 				    .len = ALTIFNAMSIZ - 1 },
1970 	[IFLA_PERM_ADDRESS]	= { .type = NLA_REJECT },
1971 	[IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1972 	[IFLA_NEW_IFINDEX]	= NLA_POLICY_MIN(NLA_S32, 1),
1973 	[IFLA_PARENT_DEV_NAME]	= { .type = NLA_NUL_STRING },
1974 	[IFLA_GRO_MAX_SIZE]	= { .type = NLA_U32 },
1975 	[IFLA_TSO_MAX_SIZE]	= { .type = NLA_REJECT },
1976 	[IFLA_TSO_MAX_SEGS]	= { .type = NLA_REJECT },
1977 	[IFLA_ALLMULTI]		= { .type = NLA_REJECT },
1978 	[IFLA_GSO_IPV4_MAX_SIZE]	= { .type = NLA_U32 },
1979 	[IFLA_GRO_IPV4_MAX_SIZE]	= { .type = NLA_U32 },
1980 };
1981 
1982 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1983 	[IFLA_INFO_KIND]	= { .type = NLA_STRING },
1984 	[IFLA_INFO_DATA]	= { .type = NLA_NESTED },
1985 	[IFLA_INFO_SLAVE_KIND]	= { .type = NLA_STRING },
1986 	[IFLA_INFO_SLAVE_DATA]	= { .type = NLA_NESTED },
1987 };
1988 
1989 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1990 	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
1991 	[IFLA_VF_BROADCAST]	= { .type = NLA_REJECT },
1992 	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
1993 	[IFLA_VF_VLAN_LIST]     = { .type = NLA_NESTED },
1994 	[IFLA_VF_TX_RATE]	= { .len = sizeof(struct ifla_vf_tx_rate) },
1995 	[IFLA_VF_SPOOFCHK]	= { .len = sizeof(struct ifla_vf_spoofchk) },
1996 	[IFLA_VF_RATE]		= { .len = sizeof(struct ifla_vf_rate) },
1997 	[IFLA_VF_LINK_STATE]	= { .len = sizeof(struct ifla_vf_link_state) },
1998 	[IFLA_VF_RSS_QUERY_EN]	= { .len = sizeof(struct ifla_vf_rss_query_en) },
1999 	[IFLA_VF_STATS]		= { .type = NLA_NESTED },
2000 	[IFLA_VF_TRUST]		= { .len = sizeof(struct ifla_vf_trust) },
2001 	[IFLA_VF_IB_NODE_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
2002 	[IFLA_VF_IB_PORT_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
2003 };
2004 
2005 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
2006 	[IFLA_PORT_VF]		= { .type = NLA_U32 },
2007 	[IFLA_PORT_PROFILE]	= { .type = NLA_STRING,
2008 				    .len = PORT_PROFILE_MAX },
2009 	[IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2010 				      .len = PORT_UUID_MAX },
2011 	[IFLA_PORT_HOST_UUID]	= { .type = NLA_STRING,
2012 				    .len = PORT_UUID_MAX },
2013 	[IFLA_PORT_REQUEST]	= { .type = NLA_U8, },
2014 	[IFLA_PORT_RESPONSE]	= { .type = NLA_U16, },
2015 
2016 	/* Unused, but we need to keep it here since user space could
2017 	 * fill it. It's also broken with regard to NLA_BINARY use in
2018 	 * combination with structs.
2019 	 */
2020 	[IFLA_PORT_VSI_TYPE]	= { .type = NLA_BINARY,
2021 				    .len = sizeof(struct ifla_port_vsi) },
2022 };
2023 
2024 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
2025 	[IFLA_XDP_UNSPEC]	= { .strict_start_type = IFLA_XDP_EXPECTED_FD },
2026 	[IFLA_XDP_FD]		= { .type = NLA_S32 },
2027 	[IFLA_XDP_EXPECTED_FD]	= { .type = NLA_S32 },
2028 	[IFLA_XDP_ATTACHED]	= { .type = NLA_U8 },
2029 	[IFLA_XDP_FLAGS]	= { .type = NLA_U32 },
2030 	[IFLA_XDP_PROG_ID]	= { .type = NLA_U32 },
2031 };
2032 
2033 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
2034 {
2035 	const struct rtnl_link_ops *ops = NULL;
2036 	struct nlattr *linfo[IFLA_INFO_MAX + 1];
2037 
2038 	if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
2039 		return NULL;
2040 
2041 	if (linfo[IFLA_INFO_KIND]) {
2042 		char kind[MODULE_NAME_LEN];
2043 
2044 		nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2045 		ops = rtnl_link_ops_get(kind);
2046 	}
2047 
2048 	return ops;
2049 }
2050 
2051 static bool link_master_filtered(struct net_device *dev, int master_idx)
2052 {
2053 	struct net_device *master;
2054 
2055 	if (!master_idx)
2056 		return false;
2057 
2058 	master = netdev_master_upper_dev_get(dev);
2059 
2060 	/* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2061 	 * another invalid value for ifindex to denote "no master".
2062 	 */
2063 	if (master_idx == -1)
2064 		return !!master;
2065 
2066 	if (!master || master->ifindex != master_idx)
2067 		return true;
2068 
2069 	return false;
2070 }
2071 
2072 static bool link_kind_filtered(const struct net_device *dev,
2073 			       const struct rtnl_link_ops *kind_ops)
2074 {
2075 	if (kind_ops && dev->rtnl_link_ops != kind_ops)
2076 		return true;
2077 
2078 	return false;
2079 }
2080 
2081 static bool link_dump_filtered(struct net_device *dev,
2082 			       int master_idx,
2083 			       const struct rtnl_link_ops *kind_ops)
2084 {
2085 	if (link_master_filtered(dev, master_idx) ||
2086 	    link_kind_filtered(dev, kind_ops))
2087 		return true;
2088 
2089 	return false;
2090 }
2091 
2092 /**
2093  * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2094  * @sk: netlink socket
2095  * @netnsid: network namespace identifier
2096  *
2097  * Returns the network namespace identified by netnsid on success or an error
2098  * pointer on failure.
2099  */
2100 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2101 {
2102 	struct net *net;
2103 
2104 	net = get_net_ns_by_id(sock_net(sk), netnsid);
2105 	if (!net)
2106 		return ERR_PTR(-EINVAL);
2107 
2108 	/* For now, the caller is required to have CAP_NET_ADMIN in
2109 	 * the user namespace owning the target net ns.
2110 	 */
2111 	if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2112 		put_net(net);
2113 		return ERR_PTR(-EACCES);
2114 	}
2115 	return net;
2116 }
2117 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2118 
2119 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2120 				      bool strict_check, struct nlattr **tb,
2121 				      struct netlink_ext_ack *extack)
2122 {
2123 	int hdrlen;
2124 
2125 	if (strict_check) {
2126 		struct ifinfomsg *ifm;
2127 
2128 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2129 			NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2130 			return -EINVAL;
2131 		}
2132 
2133 		ifm = nlmsg_data(nlh);
2134 		if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2135 		    ifm->ifi_change) {
2136 			NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2137 			return -EINVAL;
2138 		}
2139 		if (ifm->ifi_index) {
2140 			NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2141 			return -EINVAL;
2142 		}
2143 
2144 		return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2145 						     IFLA_MAX, ifla_policy,
2146 						     extack);
2147 	}
2148 
2149 	/* A hack to preserve kernel<->userspace interface.
2150 	 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2151 	 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2152 	 * what iproute2 < v3.9.0 used.
2153 	 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2154 	 * attribute, its netlink message is shorter than struct ifinfomsg.
2155 	 */
2156 	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2157 		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2158 
2159 	return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2160 				      extack);
2161 }
2162 
2163 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2164 {
2165 	struct netlink_ext_ack *extack = cb->extack;
2166 	const struct nlmsghdr *nlh = cb->nlh;
2167 	struct net *net = sock_net(skb->sk);
2168 	struct net *tgt_net = net;
2169 	int h, s_h;
2170 	int idx = 0, s_idx;
2171 	struct net_device *dev;
2172 	struct hlist_head *head;
2173 	struct nlattr *tb[IFLA_MAX+1];
2174 	u32 ext_filter_mask = 0;
2175 	const struct rtnl_link_ops *kind_ops = NULL;
2176 	unsigned int flags = NLM_F_MULTI;
2177 	int master_idx = 0;
2178 	int netnsid = -1;
2179 	int err, i;
2180 
2181 	s_h = cb->args[0];
2182 	s_idx = cb->args[1];
2183 
2184 	err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2185 	if (err < 0) {
2186 		if (cb->strict_check)
2187 			return err;
2188 
2189 		goto walk_entries;
2190 	}
2191 
2192 	for (i = 0; i <= IFLA_MAX; ++i) {
2193 		if (!tb[i])
2194 			continue;
2195 
2196 		/* new attributes should only be added with strict checking */
2197 		switch (i) {
2198 		case IFLA_TARGET_NETNSID:
2199 			netnsid = nla_get_s32(tb[i]);
2200 			tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2201 			if (IS_ERR(tgt_net)) {
2202 				NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2203 				return PTR_ERR(tgt_net);
2204 			}
2205 			break;
2206 		case IFLA_EXT_MASK:
2207 			ext_filter_mask = nla_get_u32(tb[i]);
2208 			break;
2209 		case IFLA_MASTER:
2210 			master_idx = nla_get_u32(tb[i]);
2211 			break;
2212 		case IFLA_LINKINFO:
2213 			kind_ops = linkinfo_to_kind_ops(tb[i]);
2214 			break;
2215 		default:
2216 			if (cb->strict_check) {
2217 				NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2218 				return -EINVAL;
2219 			}
2220 		}
2221 	}
2222 
2223 	if (master_idx || kind_ops)
2224 		flags |= NLM_F_DUMP_FILTERED;
2225 
2226 walk_entries:
2227 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2228 		idx = 0;
2229 		head = &tgt_net->dev_index_head[h];
2230 		hlist_for_each_entry(dev, head, index_hlist) {
2231 			if (link_dump_filtered(dev, master_idx, kind_ops))
2232 				goto cont;
2233 			if (idx < s_idx)
2234 				goto cont;
2235 			err = rtnl_fill_ifinfo(skb, dev, net,
2236 					       RTM_NEWLINK,
2237 					       NETLINK_CB(cb->skb).portid,
2238 					       nlh->nlmsg_seq, 0, flags,
2239 					       ext_filter_mask, 0, NULL, 0,
2240 					       netnsid, GFP_KERNEL);
2241 
2242 			if (err < 0) {
2243 				if (likely(skb->len))
2244 					goto out;
2245 
2246 				goto out_err;
2247 			}
2248 cont:
2249 			idx++;
2250 		}
2251 	}
2252 out:
2253 	err = skb->len;
2254 out_err:
2255 	cb->args[1] = idx;
2256 	cb->args[0] = h;
2257 	cb->seq = tgt_net->dev_base_seq;
2258 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2259 	if (netnsid >= 0)
2260 		put_net(tgt_net);
2261 
2262 	return err;
2263 }
2264 
2265 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2266 			struct netlink_ext_ack *exterr)
2267 {
2268 	return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
2269 				    exterr);
2270 }
2271 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2272 
2273 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2274 {
2275 	struct net *net;
2276 	/* Examine the link attributes and figure out which
2277 	 * network namespace we are talking about.
2278 	 */
2279 	if (tb[IFLA_NET_NS_PID])
2280 		net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2281 	else if (tb[IFLA_NET_NS_FD])
2282 		net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2283 	else
2284 		net = get_net(src_net);
2285 	return net;
2286 }
2287 EXPORT_SYMBOL(rtnl_link_get_net);
2288 
2289 /* Figure out which network namespace we are talking about by
2290  * examining the link attributes in the following order:
2291  *
2292  * 1. IFLA_NET_NS_PID
2293  * 2. IFLA_NET_NS_FD
2294  * 3. IFLA_TARGET_NETNSID
2295  */
2296 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2297 					       struct nlattr *tb[])
2298 {
2299 	struct net *net;
2300 
2301 	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2302 		return rtnl_link_get_net(src_net, tb);
2303 
2304 	if (!tb[IFLA_TARGET_NETNSID])
2305 		return get_net(src_net);
2306 
2307 	net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2308 	if (!net)
2309 		return ERR_PTR(-EINVAL);
2310 
2311 	return net;
2312 }
2313 
2314 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2315 					     struct net *src_net,
2316 					     struct nlattr *tb[], int cap)
2317 {
2318 	struct net *net;
2319 
2320 	net = rtnl_link_get_net_by_nlattr(src_net, tb);
2321 	if (IS_ERR(net))
2322 		return net;
2323 
2324 	if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2325 		put_net(net);
2326 		return ERR_PTR(-EPERM);
2327 	}
2328 
2329 	return net;
2330 }
2331 
2332 /* Verify that rtnetlink requests do not pass additional properties
2333  * potentially referring to different network namespaces.
2334  */
2335 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2336 				    struct netlink_ext_ack *extack,
2337 				    bool netns_id_only)
2338 {
2339 
2340 	if (netns_id_only) {
2341 		if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2342 			return 0;
2343 
2344 		NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2345 		return -EOPNOTSUPP;
2346 	}
2347 
2348 	if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2349 		goto invalid_attr;
2350 
2351 	if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2352 		goto invalid_attr;
2353 
2354 	if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2355 		goto invalid_attr;
2356 
2357 	return 0;
2358 
2359 invalid_attr:
2360 	NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2361 	return -EINVAL;
2362 }
2363 
2364 static	int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2365 			     int max_tx_rate)
2366 {
2367 	const struct net_device_ops *ops = dev->netdev_ops;
2368 
2369 	if (!ops->ndo_set_vf_rate)
2370 		return -EOPNOTSUPP;
2371 	if (max_tx_rate && max_tx_rate < min_tx_rate)
2372 		return -EINVAL;
2373 
2374 	return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2375 }
2376 
2377 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2378 			    struct netlink_ext_ack *extack)
2379 {
2380 	if (dev) {
2381 		if (tb[IFLA_ADDRESS] &&
2382 		    nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2383 			return -EINVAL;
2384 
2385 		if (tb[IFLA_BROADCAST] &&
2386 		    nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2387 			return -EINVAL;
2388 	}
2389 
2390 	if (tb[IFLA_AF_SPEC]) {
2391 		struct nlattr *af;
2392 		int rem, err;
2393 
2394 		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2395 			const struct rtnl_af_ops *af_ops;
2396 
2397 			af_ops = rtnl_af_lookup(nla_type(af));
2398 			if (!af_ops)
2399 				return -EAFNOSUPPORT;
2400 
2401 			if (!af_ops->set_link_af)
2402 				return -EOPNOTSUPP;
2403 
2404 			if (af_ops->validate_link_af) {
2405 				err = af_ops->validate_link_af(dev, af, extack);
2406 				if (err < 0)
2407 					return err;
2408 			}
2409 		}
2410 	}
2411 
2412 	return 0;
2413 }
2414 
2415 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2416 				  int guid_type)
2417 {
2418 	const struct net_device_ops *ops = dev->netdev_ops;
2419 
2420 	return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2421 }
2422 
2423 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2424 {
2425 	if (dev->type != ARPHRD_INFINIBAND)
2426 		return -EOPNOTSUPP;
2427 
2428 	return handle_infiniband_guid(dev, ivt, guid_type);
2429 }
2430 
2431 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2432 {
2433 	const struct net_device_ops *ops = dev->netdev_ops;
2434 	int err = -EINVAL;
2435 
2436 	if (tb[IFLA_VF_MAC]) {
2437 		struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2438 
2439 		if (ivm->vf >= INT_MAX)
2440 			return -EINVAL;
2441 		err = -EOPNOTSUPP;
2442 		if (ops->ndo_set_vf_mac)
2443 			err = ops->ndo_set_vf_mac(dev, ivm->vf,
2444 						  ivm->mac);
2445 		if (err < 0)
2446 			return err;
2447 	}
2448 
2449 	if (tb[IFLA_VF_VLAN]) {
2450 		struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2451 
2452 		if (ivv->vf >= INT_MAX)
2453 			return -EINVAL;
2454 		err = -EOPNOTSUPP;
2455 		if (ops->ndo_set_vf_vlan)
2456 			err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2457 						   ivv->qos,
2458 						   htons(ETH_P_8021Q));
2459 		if (err < 0)
2460 			return err;
2461 	}
2462 
2463 	if (tb[IFLA_VF_VLAN_LIST]) {
2464 		struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2465 		struct nlattr *attr;
2466 		int rem, len = 0;
2467 
2468 		err = -EOPNOTSUPP;
2469 		if (!ops->ndo_set_vf_vlan)
2470 			return err;
2471 
2472 		nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2473 			if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2474 			    nla_len(attr) < NLA_HDRLEN) {
2475 				return -EINVAL;
2476 			}
2477 			if (len >= MAX_VLAN_LIST_LEN)
2478 				return -EOPNOTSUPP;
2479 			ivvl[len] = nla_data(attr);
2480 
2481 			len++;
2482 		}
2483 		if (len == 0)
2484 			return -EINVAL;
2485 
2486 		if (ivvl[0]->vf >= INT_MAX)
2487 			return -EINVAL;
2488 		err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2489 					   ivvl[0]->qos, ivvl[0]->vlan_proto);
2490 		if (err < 0)
2491 			return err;
2492 	}
2493 
2494 	if (tb[IFLA_VF_TX_RATE]) {
2495 		struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2496 		struct ifla_vf_info ivf;
2497 
2498 		if (ivt->vf >= INT_MAX)
2499 			return -EINVAL;
2500 		err = -EOPNOTSUPP;
2501 		if (ops->ndo_get_vf_config)
2502 			err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2503 		if (err < 0)
2504 			return err;
2505 
2506 		err = rtnl_set_vf_rate(dev, ivt->vf,
2507 				       ivf.min_tx_rate, ivt->rate);
2508 		if (err < 0)
2509 			return err;
2510 	}
2511 
2512 	if (tb[IFLA_VF_RATE]) {
2513 		struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2514 
2515 		if (ivt->vf >= INT_MAX)
2516 			return -EINVAL;
2517 
2518 		err = rtnl_set_vf_rate(dev, ivt->vf,
2519 				       ivt->min_tx_rate, ivt->max_tx_rate);
2520 		if (err < 0)
2521 			return err;
2522 	}
2523 
2524 	if (tb[IFLA_VF_SPOOFCHK]) {
2525 		struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2526 
2527 		if (ivs->vf >= INT_MAX)
2528 			return -EINVAL;
2529 		err = -EOPNOTSUPP;
2530 		if (ops->ndo_set_vf_spoofchk)
2531 			err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2532 						       ivs->setting);
2533 		if (err < 0)
2534 			return err;
2535 	}
2536 
2537 	if (tb[IFLA_VF_LINK_STATE]) {
2538 		struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2539 
2540 		if (ivl->vf >= INT_MAX)
2541 			return -EINVAL;
2542 		err = -EOPNOTSUPP;
2543 		if (ops->ndo_set_vf_link_state)
2544 			err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2545 							 ivl->link_state);
2546 		if (err < 0)
2547 			return err;
2548 	}
2549 
2550 	if (tb[IFLA_VF_RSS_QUERY_EN]) {
2551 		struct ifla_vf_rss_query_en *ivrssq_en;
2552 
2553 		err = -EOPNOTSUPP;
2554 		ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2555 		if (ivrssq_en->vf >= INT_MAX)
2556 			return -EINVAL;
2557 		if (ops->ndo_set_vf_rss_query_en)
2558 			err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2559 							   ivrssq_en->setting);
2560 		if (err < 0)
2561 			return err;
2562 	}
2563 
2564 	if (tb[IFLA_VF_TRUST]) {
2565 		struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2566 
2567 		if (ivt->vf >= INT_MAX)
2568 			return -EINVAL;
2569 		err = -EOPNOTSUPP;
2570 		if (ops->ndo_set_vf_trust)
2571 			err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2572 		if (err < 0)
2573 			return err;
2574 	}
2575 
2576 	if (tb[IFLA_VF_IB_NODE_GUID]) {
2577 		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2578 
2579 		if (ivt->vf >= INT_MAX)
2580 			return -EINVAL;
2581 		if (!ops->ndo_set_vf_guid)
2582 			return -EOPNOTSUPP;
2583 		return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2584 	}
2585 
2586 	if (tb[IFLA_VF_IB_PORT_GUID]) {
2587 		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2588 
2589 		if (ivt->vf >= INT_MAX)
2590 			return -EINVAL;
2591 		if (!ops->ndo_set_vf_guid)
2592 			return -EOPNOTSUPP;
2593 
2594 		return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2595 	}
2596 
2597 	return err;
2598 }
2599 
2600 static int do_set_master(struct net_device *dev, int ifindex,
2601 			 struct netlink_ext_ack *extack)
2602 {
2603 	struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2604 	const struct net_device_ops *ops;
2605 	int err;
2606 
2607 	if (upper_dev) {
2608 		if (upper_dev->ifindex == ifindex)
2609 			return 0;
2610 		ops = upper_dev->netdev_ops;
2611 		if (ops->ndo_del_slave) {
2612 			err = ops->ndo_del_slave(upper_dev, dev);
2613 			if (err)
2614 				return err;
2615 		} else {
2616 			return -EOPNOTSUPP;
2617 		}
2618 	}
2619 
2620 	if (ifindex) {
2621 		upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2622 		if (!upper_dev)
2623 			return -EINVAL;
2624 		ops = upper_dev->netdev_ops;
2625 		if (ops->ndo_add_slave) {
2626 			err = ops->ndo_add_slave(upper_dev, dev, extack);
2627 			if (err)
2628 				return err;
2629 		} else {
2630 			return -EOPNOTSUPP;
2631 		}
2632 	}
2633 	return 0;
2634 }
2635 
2636 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2637 	[IFLA_PROTO_DOWN_REASON_MASK]	= { .type = NLA_U32 },
2638 	[IFLA_PROTO_DOWN_REASON_VALUE]	= { .type = NLA_U32 },
2639 };
2640 
2641 static int do_set_proto_down(struct net_device *dev,
2642 			     struct nlattr *nl_proto_down,
2643 			     struct nlattr *nl_proto_down_reason,
2644 			     struct netlink_ext_ack *extack)
2645 {
2646 	struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2647 	unsigned long mask = 0;
2648 	u32 value;
2649 	bool proto_down;
2650 	int err;
2651 
2652 	if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2653 		NL_SET_ERR_MSG(extack,  "Protodown not supported by device");
2654 		return -EOPNOTSUPP;
2655 	}
2656 
2657 	if (nl_proto_down_reason) {
2658 		err = nla_parse_nested_deprecated(pdreason,
2659 						  IFLA_PROTO_DOWN_REASON_MAX,
2660 						  nl_proto_down_reason,
2661 						  ifla_proto_down_reason_policy,
2662 						  NULL);
2663 		if (err < 0)
2664 			return err;
2665 
2666 		if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2667 			NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2668 			return -EINVAL;
2669 		}
2670 
2671 		value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2672 
2673 		if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2674 			mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2675 
2676 		dev_change_proto_down_reason(dev, mask, value);
2677 	}
2678 
2679 	if (nl_proto_down) {
2680 		proto_down = nla_get_u8(nl_proto_down);
2681 
2682 		/* Don't turn off protodown if there are active reasons */
2683 		if (!proto_down && dev->proto_down_reason) {
2684 			NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2685 			return -EBUSY;
2686 		}
2687 		err = dev_change_proto_down(dev,
2688 					    proto_down);
2689 		if (err)
2690 			return err;
2691 	}
2692 
2693 	return 0;
2694 }
2695 
2696 #define DO_SETLINK_MODIFIED	0x01
2697 /* notify flag means notify + modified. */
2698 #define DO_SETLINK_NOTIFY	0x03
2699 static int do_setlink(const struct sk_buff *skb,
2700 		      struct net_device *dev, struct ifinfomsg *ifm,
2701 		      struct netlink_ext_ack *extack,
2702 		      struct nlattr **tb, int status)
2703 {
2704 	const struct net_device_ops *ops = dev->netdev_ops;
2705 	char ifname[IFNAMSIZ];
2706 	int err;
2707 
2708 	err = validate_linkmsg(dev, tb, extack);
2709 	if (err < 0)
2710 		return err;
2711 
2712 	if (tb[IFLA_IFNAME])
2713 		nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2714 	else
2715 		ifname[0] = '\0';
2716 
2717 	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2718 		const char *pat = ifname[0] ? ifname : NULL;
2719 		struct net *net;
2720 		int new_ifindex;
2721 
2722 		net = rtnl_link_get_net_capable(skb, dev_net(dev),
2723 						tb, CAP_NET_ADMIN);
2724 		if (IS_ERR(net)) {
2725 			err = PTR_ERR(net);
2726 			goto errout;
2727 		}
2728 
2729 		if (tb[IFLA_NEW_IFINDEX])
2730 			new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2731 		else
2732 			new_ifindex = 0;
2733 
2734 		err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2735 		put_net(net);
2736 		if (err)
2737 			goto errout;
2738 		status |= DO_SETLINK_MODIFIED;
2739 	}
2740 
2741 	if (tb[IFLA_MAP]) {
2742 		struct rtnl_link_ifmap *u_map;
2743 		struct ifmap k_map;
2744 
2745 		if (!ops->ndo_set_config) {
2746 			err = -EOPNOTSUPP;
2747 			goto errout;
2748 		}
2749 
2750 		if (!netif_device_present(dev)) {
2751 			err = -ENODEV;
2752 			goto errout;
2753 		}
2754 
2755 		u_map = nla_data(tb[IFLA_MAP]);
2756 		k_map.mem_start = (unsigned long) u_map->mem_start;
2757 		k_map.mem_end = (unsigned long) u_map->mem_end;
2758 		k_map.base_addr = (unsigned short) u_map->base_addr;
2759 		k_map.irq = (unsigned char) u_map->irq;
2760 		k_map.dma = (unsigned char) u_map->dma;
2761 		k_map.port = (unsigned char) u_map->port;
2762 
2763 		err = ops->ndo_set_config(dev, &k_map);
2764 		if (err < 0)
2765 			goto errout;
2766 
2767 		status |= DO_SETLINK_NOTIFY;
2768 	}
2769 
2770 	if (tb[IFLA_ADDRESS]) {
2771 		struct sockaddr *sa;
2772 		int len;
2773 
2774 		len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2775 						  sizeof(*sa));
2776 		sa = kmalloc(len, GFP_KERNEL);
2777 		if (!sa) {
2778 			err = -ENOMEM;
2779 			goto errout;
2780 		}
2781 		sa->sa_family = dev->type;
2782 		memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2783 		       dev->addr_len);
2784 		err = dev_set_mac_address_user(dev, sa, extack);
2785 		kfree(sa);
2786 		if (err)
2787 			goto errout;
2788 		status |= DO_SETLINK_MODIFIED;
2789 	}
2790 
2791 	if (tb[IFLA_MTU]) {
2792 		err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2793 		if (err < 0)
2794 			goto errout;
2795 		status |= DO_SETLINK_MODIFIED;
2796 	}
2797 
2798 	if (tb[IFLA_GROUP]) {
2799 		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2800 		status |= DO_SETLINK_NOTIFY;
2801 	}
2802 
2803 	/*
2804 	 * Interface selected by interface index but interface
2805 	 * name provided implies that a name change has been
2806 	 * requested.
2807 	 */
2808 	if (ifm->ifi_index > 0 && ifname[0]) {
2809 		err = dev_change_name(dev, ifname);
2810 		if (err < 0)
2811 			goto errout;
2812 		status |= DO_SETLINK_MODIFIED;
2813 	}
2814 
2815 	if (tb[IFLA_IFALIAS]) {
2816 		err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2817 				    nla_len(tb[IFLA_IFALIAS]));
2818 		if (err < 0)
2819 			goto errout;
2820 		status |= DO_SETLINK_NOTIFY;
2821 	}
2822 
2823 	if (tb[IFLA_BROADCAST]) {
2824 		nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2825 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2826 	}
2827 
2828 	if (tb[IFLA_MASTER]) {
2829 		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2830 		if (err)
2831 			goto errout;
2832 		status |= DO_SETLINK_MODIFIED;
2833 	}
2834 
2835 	if (ifm->ifi_flags || ifm->ifi_change) {
2836 		err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2837 				       extack);
2838 		if (err < 0)
2839 			goto errout;
2840 	}
2841 
2842 	if (tb[IFLA_CARRIER]) {
2843 		err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2844 		if (err)
2845 			goto errout;
2846 		status |= DO_SETLINK_MODIFIED;
2847 	}
2848 
2849 	if (tb[IFLA_TXQLEN]) {
2850 		unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2851 
2852 		err = dev_change_tx_queue_len(dev, value);
2853 		if (err)
2854 			goto errout;
2855 		status |= DO_SETLINK_MODIFIED;
2856 	}
2857 
2858 	if (tb[IFLA_GSO_MAX_SIZE]) {
2859 		u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2860 
2861 		if (max_size > dev->tso_max_size) {
2862 			err = -EINVAL;
2863 			goto errout;
2864 		}
2865 
2866 		if (dev->gso_max_size ^ max_size) {
2867 			netif_set_gso_max_size(dev, max_size);
2868 			status |= DO_SETLINK_MODIFIED;
2869 		}
2870 	}
2871 
2872 	if (tb[IFLA_GSO_MAX_SEGS]) {
2873 		u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2874 
2875 		if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) {
2876 			err = -EINVAL;
2877 			goto errout;
2878 		}
2879 
2880 		if (dev->gso_max_segs ^ max_segs) {
2881 			netif_set_gso_max_segs(dev, max_segs);
2882 			status |= DO_SETLINK_MODIFIED;
2883 		}
2884 	}
2885 
2886 	if (tb[IFLA_GRO_MAX_SIZE]) {
2887 		u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2888 
2889 		if (dev->gro_max_size ^ gro_max_size) {
2890 			netif_set_gro_max_size(dev, gro_max_size);
2891 			status |= DO_SETLINK_MODIFIED;
2892 		}
2893 	}
2894 
2895 	if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
2896 		u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
2897 
2898 		if (max_size > dev->tso_max_size) {
2899 			err = -EINVAL;
2900 			goto errout;
2901 		}
2902 
2903 		if (dev->gso_ipv4_max_size ^ max_size) {
2904 			netif_set_gso_ipv4_max_size(dev, max_size);
2905 			status |= DO_SETLINK_MODIFIED;
2906 		}
2907 	}
2908 
2909 	if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
2910 		u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
2911 
2912 		if (dev->gro_ipv4_max_size ^ gro_max_size) {
2913 			netif_set_gro_ipv4_max_size(dev, gro_max_size);
2914 			status |= DO_SETLINK_MODIFIED;
2915 		}
2916 	}
2917 
2918 	if (tb[IFLA_OPERSTATE])
2919 		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2920 
2921 	if (tb[IFLA_LINKMODE]) {
2922 		unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2923 
2924 		write_lock(&dev_base_lock);
2925 		if (dev->link_mode ^ value)
2926 			status |= DO_SETLINK_NOTIFY;
2927 		dev->link_mode = value;
2928 		write_unlock(&dev_base_lock);
2929 	}
2930 
2931 	if (tb[IFLA_VFINFO_LIST]) {
2932 		struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2933 		struct nlattr *attr;
2934 		int rem;
2935 
2936 		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2937 			if (nla_type(attr) != IFLA_VF_INFO ||
2938 			    nla_len(attr) < NLA_HDRLEN) {
2939 				err = -EINVAL;
2940 				goto errout;
2941 			}
2942 			err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2943 							  attr,
2944 							  ifla_vf_policy,
2945 							  NULL);
2946 			if (err < 0)
2947 				goto errout;
2948 			err = do_setvfinfo(dev, vfinfo);
2949 			if (err < 0)
2950 				goto errout;
2951 			status |= DO_SETLINK_NOTIFY;
2952 		}
2953 	}
2954 	err = 0;
2955 
2956 	if (tb[IFLA_VF_PORTS]) {
2957 		struct nlattr *port[IFLA_PORT_MAX+1];
2958 		struct nlattr *attr;
2959 		int vf;
2960 		int rem;
2961 
2962 		err = -EOPNOTSUPP;
2963 		if (!ops->ndo_set_vf_port)
2964 			goto errout;
2965 
2966 		nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2967 			if (nla_type(attr) != IFLA_VF_PORT ||
2968 			    nla_len(attr) < NLA_HDRLEN) {
2969 				err = -EINVAL;
2970 				goto errout;
2971 			}
2972 			err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2973 							  attr,
2974 							  ifla_port_policy,
2975 							  NULL);
2976 			if (err < 0)
2977 				goto errout;
2978 			if (!port[IFLA_PORT_VF]) {
2979 				err = -EOPNOTSUPP;
2980 				goto errout;
2981 			}
2982 			vf = nla_get_u32(port[IFLA_PORT_VF]);
2983 			err = ops->ndo_set_vf_port(dev, vf, port);
2984 			if (err < 0)
2985 				goto errout;
2986 			status |= DO_SETLINK_NOTIFY;
2987 		}
2988 	}
2989 	err = 0;
2990 
2991 	if (tb[IFLA_PORT_SELF]) {
2992 		struct nlattr *port[IFLA_PORT_MAX+1];
2993 
2994 		err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2995 						  tb[IFLA_PORT_SELF],
2996 						  ifla_port_policy, NULL);
2997 		if (err < 0)
2998 			goto errout;
2999 
3000 		err = -EOPNOTSUPP;
3001 		if (ops->ndo_set_vf_port)
3002 			err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
3003 		if (err < 0)
3004 			goto errout;
3005 		status |= DO_SETLINK_NOTIFY;
3006 	}
3007 
3008 	if (tb[IFLA_AF_SPEC]) {
3009 		struct nlattr *af;
3010 		int rem;
3011 
3012 		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
3013 			const struct rtnl_af_ops *af_ops;
3014 
3015 			BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
3016 
3017 			err = af_ops->set_link_af(dev, af, extack);
3018 			if (err < 0)
3019 				goto errout;
3020 
3021 			status |= DO_SETLINK_NOTIFY;
3022 		}
3023 	}
3024 	err = 0;
3025 
3026 	if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
3027 		err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
3028 					tb[IFLA_PROTO_DOWN_REASON], extack);
3029 		if (err)
3030 			goto errout;
3031 		status |= DO_SETLINK_NOTIFY;
3032 	}
3033 
3034 	if (tb[IFLA_XDP]) {
3035 		struct nlattr *xdp[IFLA_XDP_MAX + 1];
3036 		u32 xdp_flags = 0;
3037 
3038 		err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3039 						  tb[IFLA_XDP],
3040 						  ifla_xdp_policy, NULL);
3041 		if (err < 0)
3042 			goto errout;
3043 
3044 		if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
3045 			err = -EINVAL;
3046 			goto errout;
3047 		}
3048 
3049 		if (xdp[IFLA_XDP_FLAGS]) {
3050 			xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3051 			if (xdp_flags & ~XDP_FLAGS_MASK) {
3052 				err = -EINVAL;
3053 				goto errout;
3054 			}
3055 			if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3056 				err = -EINVAL;
3057 				goto errout;
3058 			}
3059 		}
3060 
3061 		if (xdp[IFLA_XDP_FD]) {
3062 			int expected_fd = -1;
3063 
3064 			if (xdp_flags & XDP_FLAGS_REPLACE) {
3065 				if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3066 					err = -EINVAL;
3067 					goto errout;
3068 				}
3069 				expected_fd =
3070 					nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3071 			}
3072 
3073 			err = dev_change_xdp_fd(dev, extack,
3074 						nla_get_s32(xdp[IFLA_XDP_FD]),
3075 						expected_fd,
3076 						xdp_flags);
3077 			if (err)
3078 				goto errout;
3079 			status |= DO_SETLINK_NOTIFY;
3080 		}
3081 	}
3082 
3083 errout:
3084 	if (status & DO_SETLINK_MODIFIED) {
3085 		if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3086 			netdev_state_change(dev);
3087 
3088 		if (err < 0)
3089 			net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3090 					     dev->name);
3091 	}
3092 
3093 	return err;
3094 }
3095 
3096 static struct net_device *rtnl_dev_get(struct net *net,
3097 				       struct nlattr *tb[])
3098 {
3099 	char ifname[ALTIFNAMSIZ];
3100 
3101 	if (tb[IFLA_IFNAME])
3102 		nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3103 	else if (tb[IFLA_ALT_IFNAME])
3104 		nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3105 	else
3106 		return NULL;
3107 
3108 	return __dev_get_by_name(net, ifname);
3109 }
3110 
3111 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3112 			struct netlink_ext_ack *extack)
3113 {
3114 	struct net *net = sock_net(skb->sk);
3115 	struct ifinfomsg *ifm;
3116 	struct net_device *dev;
3117 	int err;
3118 	struct nlattr *tb[IFLA_MAX+1];
3119 
3120 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3121 				     ifla_policy, extack);
3122 	if (err < 0)
3123 		goto errout;
3124 
3125 	err = rtnl_ensure_unique_netns(tb, extack, false);
3126 	if (err < 0)
3127 		goto errout;
3128 
3129 	err = -EINVAL;
3130 	ifm = nlmsg_data(nlh);
3131 	if (ifm->ifi_index > 0)
3132 		dev = __dev_get_by_index(net, ifm->ifi_index);
3133 	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3134 		dev = rtnl_dev_get(net, tb);
3135 	else
3136 		goto errout;
3137 
3138 	if (dev == NULL) {
3139 		err = -ENODEV;
3140 		goto errout;
3141 	}
3142 
3143 	err = do_setlink(skb, dev, ifm, extack, tb, 0);
3144 errout:
3145 	return err;
3146 }
3147 
3148 static int rtnl_group_dellink(const struct net *net, int group)
3149 {
3150 	struct net_device *dev, *aux;
3151 	LIST_HEAD(list_kill);
3152 	bool found = false;
3153 
3154 	if (!group)
3155 		return -EPERM;
3156 
3157 	for_each_netdev(net, dev) {
3158 		if (dev->group == group) {
3159 			const struct rtnl_link_ops *ops;
3160 
3161 			found = true;
3162 			ops = dev->rtnl_link_ops;
3163 			if (!ops || !ops->dellink)
3164 				return -EOPNOTSUPP;
3165 		}
3166 	}
3167 
3168 	if (!found)
3169 		return -ENODEV;
3170 
3171 	for_each_netdev_safe(net, dev, aux) {
3172 		if (dev->group == group) {
3173 			const struct rtnl_link_ops *ops;
3174 
3175 			ops = dev->rtnl_link_ops;
3176 			ops->dellink(dev, &list_kill);
3177 		}
3178 	}
3179 	unregister_netdevice_many(&list_kill);
3180 
3181 	return 0;
3182 }
3183 
3184 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3185 {
3186 	const struct rtnl_link_ops *ops;
3187 	LIST_HEAD(list_kill);
3188 
3189 	ops = dev->rtnl_link_ops;
3190 	if (!ops || !ops->dellink)
3191 		return -EOPNOTSUPP;
3192 
3193 	ops->dellink(dev, &list_kill);
3194 	unregister_netdevice_many_notify(&list_kill, portid, nlh);
3195 
3196 	return 0;
3197 }
3198 EXPORT_SYMBOL_GPL(rtnl_delete_link);
3199 
3200 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3201 			struct netlink_ext_ack *extack)
3202 {
3203 	struct net *net = sock_net(skb->sk);
3204 	u32 portid = NETLINK_CB(skb).portid;
3205 	struct net *tgt_net = net;
3206 	struct net_device *dev = NULL;
3207 	struct ifinfomsg *ifm;
3208 	struct nlattr *tb[IFLA_MAX+1];
3209 	int err;
3210 	int netnsid = -1;
3211 
3212 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3213 				     ifla_policy, extack);
3214 	if (err < 0)
3215 		return err;
3216 
3217 	err = rtnl_ensure_unique_netns(tb, extack, true);
3218 	if (err < 0)
3219 		return err;
3220 
3221 	if (tb[IFLA_TARGET_NETNSID]) {
3222 		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3223 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3224 		if (IS_ERR(tgt_net))
3225 			return PTR_ERR(tgt_net);
3226 	}
3227 
3228 	err = -EINVAL;
3229 	ifm = nlmsg_data(nlh);
3230 	if (ifm->ifi_index > 0)
3231 		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3232 	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3233 		dev = rtnl_dev_get(net, tb);
3234 	else if (tb[IFLA_GROUP])
3235 		err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3236 	else
3237 		goto out;
3238 
3239 	if (!dev) {
3240 		if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3241 			err = -ENODEV;
3242 
3243 		goto out;
3244 	}
3245 
3246 	err = rtnl_delete_link(dev, portid, nlh);
3247 
3248 out:
3249 	if (netnsid >= 0)
3250 		put_net(tgt_net);
3251 
3252 	return err;
3253 }
3254 
3255 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3256 			u32 portid, const struct nlmsghdr *nlh)
3257 {
3258 	unsigned int old_flags;
3259 	int err;
3260 
3261 	old_flags = dev->flags;
3262 	if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3263 		err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3264 					 NULL);
3265 		if (err < 0)
3266 			return err;
3267 	}
3268 
3269 	if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3270 		__dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3271 	} else {
3272 		dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3273 		__dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3274 	}
3275 	return 0;
3276 }
3277 EXPORT_SYMBOL(rtnl_configure_link);
3278 
3279 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3280 				    unsigned char name_assign_type,
3281 				    const struct rtnl_link_ops *ops,
3282 				    struct nlattr *tb[],
3283 				    struct netlink_ext_ack *extack)
3284 {
3285 	struct net_device *dev;
3286 	unsigned int num_tx_queues = 1;
3287 	unsigned int num_rx_queues = 1;
3288 
3289 	if (tb[IFLA_NUM_TX_QUEUES])
3290 		num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3291 	else if (ops->get_num_tx_queues)
3292 		num_tx_queues = ops->get_num_tx_queues();
3293 
3294 	if (tb[IFLA_NUM_RX_QUEUES])
3295 		num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3296 	else if (ops->get_num_rx_queues)
3297 		num_rx_queues = ops->get_num_rx_queues();
3298 
3299 	if (num_tx_queues < 1 || num_tx_queues > 4096) {
3300 		NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3301 		return ERR_PTR(-EINVAL);
3302 	}
3303 
3304 	if (num_rx_queues < 1 || num_rx_queues > 4096) {
3305 		NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3306 		return ERR_PTR(-EINVAL);
3307 	}
3308 
3309 	if (ops->alloc) {
3310 		dev = ops->alloc(tb, ifname, name_assign_type,
3311 				 num_tx_queues, num_rx_queues);
3312 		if (IS_ERR(dev))
3313 			return dev;
3314 	} else {
3315 		dev = alloc_netdev_mqs(ops->priv_size, ifname,
3316 				       name_assign_type, ops->setup,
3317 				       num_tx_queues, num_rx_queues);
3318 	}
3319 
3320 	if (!dev)
3321 		return ERR_PTR(-ENOMEM);
3322 
3323 	dev_net_set(dev, net);
3324 	dev->rtnl_link_ops = ops;
3325 	dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3326 
3327 	if (tb[IFLA_MTU]) {
3328 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3329 		int err;
3330 
3331 		err = dev_validate_mtu(dev, mtu, extack);
3332 		if (err) {
3333 			free_netdev(dev);
3334 			return ERR_PTR(err);
3335 		}
3336 		dev->mtu = mtu;
3337 	}
3338 	if (tb[IFLA_ADDRESS]) {
3339 		__dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3340 			       nla_len(tb[IFLA_ADDRESS]));
3341 		dev->addr_assign_type = NET_ADDR_SET;
3342 	}
3343 	if (tb[IFLA_BROADCAST])
3344 		memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3345 				nla_len(tb[IFLA_BROADCAST]));
3346 	if (tb[IFLA_TXQLEN])
3347 		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3348 	if (tb[IFLA_OPERSTATE])
3349 		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3350 	if (tb[IFLA_LINKMODE])
3351 		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3352 	if (tb[IFLA_GROUP])
3353 		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3354 	if (tb[IFLA_GSO_MAX_SIZE])
3355 		netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3356 	if (tb[IFLA_GSO_MAX_SEGS])
3357 		netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3358 	if (tb[IFLA_GRO_MAX_SIZE])
3359 		netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3360 	if (tb[IFLA_GSO_IPV4_MAX_SIZE])
3361 		netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
3362 	if (tb[IFLA_GRO_IPV4_MAX_SIZE])
3363 		netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
3364 
3365 	return dev;
3366 }
3367 EXPORT_SYMBOL(rtnl_create_link);
3368 
3369 static int rtnl_group_changelink(const struct sk_buff *skb,
3370 		struct net *net, int group,
3371 		struct ifinfomsg *ifm,
3372 		struct netlink_ext_ack *extack,
3373 		struct nlattr **tb)
3374 {
3375 	struct net_device *dev, *aux;
3376 	int err;
3377 
3378 	for_each_netdev_safe(net, dev, aux) {
3379 		if (dev->group == group) {
3380 			err = do_setlink(skb, dev, ifm, extack, tb, 0);
3381 			if (err < 0)
3382 				return err;
3383 		}
3384 	}
3385 
3386 	return 0;
3387 }
3388 
3389 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3390 			       const struct rtnl_link_ops *ops,
3391 			       const struct nlmsghdr *nlh,
3392 			       struct nlattr **tb, struct nlattr **data,
3393 			       struct netlink_ext_ack *extack)
3394 {
3395 	unsigned char name_assign_type = NET_NAME_USER;
3396 	struct net *net = sock_net(skb->sk);
3397 	u32 portid = NETLINK_CB(skb).portid;
3398 	struct net *dest_net, *link_net;
3399 	struct net_device *dev;
3400 	char ifname[IFNAMSIZ];
3401 	int err;
3402 
3403 	if (!ops->alloc && !ops->setup)
3404 		return -EOPNOTSUPP;
3405 
3406 	if (tb[IFLA_IFNAME]) {
3407 		nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3408 	} else {
3409 		snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3410 		name_assign_type = NET_NAME_ENUM;
3411 	}
3412 
3413 	dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3414 	if (IS_ERR(dest_net))
3415 		return PTR_ERR(dest_net);
3416 
3417 	if (tb[IFLA_LINK_NETNSID]) {
3418 		int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3419 
3420 		link_net = get_net_ns_by_id(dest_net, id);
3421 		if (!link_net) {
3422 			NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3423 			err =  -EINVAL;
3424 			goto out;
3425 		}
3426 		err = -EPERM;
3427 		if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3428 			goto out;
3429 	} else {
3430 		link_net = NULL;
3431 	}
3432 
3433 	dev = rtnl_create_link(link_net ? : dest_net, ifname,
3434 			       name_assign_type, ops, tb, extack);
3435 	if (IS_ERR(dev)) {
3436 		err = PTR_ERR(dev);
3437 		goto out;
3438 	}
3439 
3440 	dev->ifindex = ifm->ifi_index;
3441 
3442 	if (ops->newlink)
3443 		err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3444 	else
3445 		err = register_netdevice(dev);
3446 	if (err < 0) {
3447 		free_netdev(dev);
3448 		goto out;
3449 	}
3450 
3451 	err = rtnl_configure_link(dev, ifm, portid, nlh);
3452 	if (err < 0)
3453 		goto out_unregister;
3454 	if (link_net) {
3455 		err = dev_change_net_namespace(dev, dest_net, ifname);
3456 		if (err < 0)
3457 			goto out_unregister;
3458 	}
3459 	if (tb[IFLA_MASTER]) {
3460 		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3461 		if (err)
3462 			goto out_unregister;
3463 	}
3464 out:
3465 	if (link_net)
3466 		put_net(link_net);
3467 	put_net(dest_net);
3468 	return err;
3469 out_unregister:
3470 	if (ops->newlink) {
3471 		LIST_HEAD(list_kill);
3472 
3473 		ops->dellink(dev, &list_kill);
3474 		unregister_netdevice_many(&list_kill);
3475 	} else {
3476 		unregister_netdevice(dev);
3477 	}
3478 	goto out;
3479 }
3480 
3481 struct rtnl_newlink_tbs {
3482 	struct nlattr *tb[IFLA_MAX + 1];
3483 	struct nlattr *attr[RTNL_MAX_TYPE + 1];
3484 	struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3485 };
3486 
3487 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3488 			  struct rtnl_newlink_tbs *tbs,
3489 			  struct netlink_ext_ack *extack)
3490 {
3491 	struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3492 	struct nlattr ** const tb = tbs->tb;
3493 	const struct rtnl_link_ops *m_ops;
3494 	struct net_device *master_dev;
3495 	struct net *net = sock_net(skb->sk);
3496 	const struct rtnl_link_ops *ops;
3497 	struct nlattr **slave_data;
3498 	char kind[MODULE_NAME_LEN];
3499 	struct net_device *dev;
3500 	struct ifinfomsg *ifm;
3501 	struct nlattr **data;
3502 	bool link_specified;
3503 	int err;
3504 
3505 #ifdef CONFIG_MODULES
3506 replay:
3507 #endif
3508 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3509 				     ifla_policy, extack);
3510 	if (err < 0)
3511 		return err;
3512 
3513 	err = rtnl_ensure_unique_netns(tb, extack, false);
3514 	if (err < 0)
3515 		return err;
3516 
3517 	ifm = nlmsg_data(nlh);
3518 	if (ifm->ifi_index > 0) {
3519 		link_specified = true;
3520 		dev = __dev_get_by_index(net, ifm->ifi_index);
3521 	} else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3522 		link_specified = true;
3523 		dev = rtnl_dev_get(net, tb);
3524 	} else {
3525 		link_specified = false;
3526 		dev = NULL;
3527 	}
3528 
3529 	master_dev = NULL;
3530 	m_ops = NULL;
3531 	if (dev) {
3532 		master_dev = netdev_master_upper_dev_get(dev);
3533 		if (master_dev)
3534 			m_ops = master_dev->rtnl_link_ops;
3535 	}
3536 
3537 	err = validate_linkmsg(dev, tb, extack);
3538 	if (err < 0)
3539 		return err;
3540 
3541 	if (tb[IFLA_LINKINFO]) {
3542 		err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3543 						  tb[IFLA_LINKINFO],
3544 						  ifla_info_policy, NULL);
3545 		if (err < 0)
3546 			return err;
3547 	} else
3548 		memset(linkinfo, 0, sizeof(linkinfo));
3549 
3550 	if (linkinfo[IFLA_INFO_KIND]) {
3551 		nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3552 		ops = rtnl_link_ops_get(kind);
3553 	} else {
3554 		kind[0] = '\0';
3555 		ops = NULL;
3556 	}
3557 
3558 	data = NULL;
3559 	if (ops) {
3560 		if (ops->maxtype > RTNL_MAX_TYPE)
3561 			return -EINVAL;
3562 
3563 		if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3564 			err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3565 							  linkinfo[IFLA_INFO_DATA],
3566 							  ops->policy, extack);
3567 			if (err < 0)
3568 				return err;
3569 			data = tbs->attr;
3570 		}
3571 		if (ops->validate) {
3572 			err = ops->validate(tb, data, extack);
3573 			if (err < 0)
3574 				return err;
3575 		}
3576 	}
3577 
3578 	slave_data = NULL;
3579 	if (m_ops) {
3580 		if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3581 			return -EINVAL;
3582 
3583 		if (m_ops->slave_maxtype &&
3584 		    linkinfo[IFLA_INFO_SLAVE_DATA]) {
3585 			err = nla_parse_nested_deprecated(tbs->slave_attr,
3586 							  m_ops->slave_maxtype,
3587 							  linkinfo[IFLA_INFO_SLAVE_DATA],
3588 							  m_ops->slave_policy,
3589 							  extack);
3590 			if (err < 0)
3591 				return err;
3592 			slave_data = tbs->slave_attr;
3593 		}
3594 	}
3595 
3596 	if (dev) {
3597 		int status = 0;
3598 
3599 		if (nlh->nlmsg_flags & NLM_F_EXCL)
3600 			return -EEXIST;
3601 		if (nlh->nlmsg_flags & NLM_F_REPLACE)
3602 			return -EOPNOTSUPP;
3603 
3604 		if (linkinfo[IFLA_INFO_DATA]) {
3605 			if (!ops || ops != dev->rtnl_link_ops ||
3606 			    !ops->changelink)
3607 				return -EOPNOTSUPP;
3608 
3609 			err = ops->changelink(dev, tb, data, extack);
3610 			if (err < 0)
3611 				return err;
3612 			status |= DO_SETLINK_NOTIFY;
3613 		}
3614 
3615 		if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3616 			if (!m_ops || !m_ops->slave_changelink)
3617 				return -EOPNOTSUPP;
3618 
3619 			err = m_ops->slave_changelink(master_dev, dev, tb,
3620 						      slave_data, extack);
3621 			if (err < 0)
3622 				return err;
3623 			status |= DO_SETLINK_NOTIFY;
3624 		}
3625 
3626 		return do_setlink(skb, dev, ifm, extack, tb, status);
3627 	}
3628 
3629 	if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3630 		/* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3631 		 * or it's for a group
3632 		*/
3633 		if (link_specified)
3634 			return -ENODEV;
3635 		if (tb[IFLA_GROUP])
3636 			return rtnl_group_changelink(skb, net,
3637 						nla_get_u32(tb[IFLA_GROUP]),
3638 						ifm, extack, tb);
3639 		return -ENODEV;
3640 	}
3641 
3642 	if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3643 		return -EOPNOTSUPP;
3644 
3645 	if (!ops) {
3646 #ifdef CONFIG_MODULES
3647 		if (kind[0]) {
3648 			__rtnl_unlock();
3649 			request_module("rtnl-link-%s", kind);
3650 			rtnl_lock();
3651 			ops = rtnl_link_ops_get(kind);
3652 			if (ops)
3653 				goto replay;
3654 		}
3655 #endif
3656 		NL_SET_ERR_MSG(extack, "Unknown device type");
3657 		return -EOPNOTSUPP;
3658 	}
3659 
3660 	return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
3661 }
3662 
3663 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3664 			struct netlink_ext_ack *extack)
3665 {
3666 	struct rtnl_newlink_tbs *tbs;
3667 	int ret;
3668 
3669 	tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3670 	if (!tbs)
3671 		return -ENOMEM;
3672 
3673 	ret = __rtnl_newlink(skb, nlh, tbs, extack);
3674 	kfree(tbs);
3675 	return ret;
3676 }
3677 
3678 static int rtnl_valid_getlink_req(struct sk_buff *skb,
3679 				  const struct nlmsghdr *nlh,
3680 				  struct nlattr **tb,
3681 				  struct netlink_ext_ack *extack)
3682 {
3683 	struct ifinfomsg *ifm;
3684 	int i, err;
3685 
3686 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3687 		NL_SET_ERR_MSG(extack, "Invalid header for get link");
3688 		return -EINVAL;
3689 	}
3690 
3691 	if (!netlink_strict_get_check(skb))
3692 		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3693 					      ifla_policy, extack);
3694 
3695 	ifm = nlmsg_data(nlh);
3696 	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3697 	    ifm->ifi_change) {
3698 		NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3699 		return -EINVAL;
3700 	}
3701 
3702 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3703 					    ifla_policy, extack);
3704 	if (err)
3705 		return err;
3706 
3707 	for (i = 0; i <= IFLA_MAX; i++) {
3708 		if (!tb[i])
3709 			continue;
3710 
3711 		switch (i) {
3712 		case IFLA_IFNAME:
3713 		case IFLA_ALT_IFNAME:
3714 		case IFLA_EXT_MASK:
3715 		case IFLA_TARGET_NETNSID:
3716 			break;
3717 		default:
3718 			NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3719 			return -EINVAL;
3720 		}
3721 	}
3722 
3723 	return 0;
3724 }
3725 
3726 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3727 			struct netlink_ext_ack *extack)
3728 {
3729 	struct net *net = sock_net(skb->sk);
3730 	struct net *tgt_net = net;
3731 	struct ifinfomsg *ifm;
3732 	struct nlattr *tb[IFLA_MAX+1];
3733 	struct net_device *dev = NULL;
3734 	struct sk_buff *nskb;
3735 	int netnsid = -1;
3736 	int err;
3737 	u32 ext_filter_mask = 0;
3738 
3739 	err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3740 	if (err < 0)
3741 		return err;
3742 
3743 	err = rtnl_ensure_unique_netns(tb, extack, true);
3744 	if (err < 0)
3745 		return err;
3746 
3747 	if (tb[IFLA_TARGET_NETNSID]) {
3748 		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3749 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3750 		if (IS_ERR(tgt_net))
3751 			return PTR_ERR(tgt_net);
3752 	}
3753 
3754 	if (tb[IFLA_EXT_MASK])
3755 		ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3756 
3757 	err = -EINVAL;
3758 	ifm = nlmsg_data(nlh);
3759 	if (ifm->ifi_index > 0)
3760 		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3761 	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3762 		dev = rtnl_dev_get(tgt_net, tb);
3763 	else
3764 		goto out;
3765 
3766 	err = -ENODEV;
3767 	if (dev == NULL)
3768 		goto out;
3769 
3770 	err = -ENOBUFS;
3771 	nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3772 	if (nskb == NULL)
3773 		goto out;
3774 
3775 	err = rtnl_fill_ifinfo(nskb, dev, net,
3776 			       RTM_NEWLINK, NETLINK_CB(skb).portid,
3777 			       nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3778 			       0, NULL, 0, netnsid, GFP_KERNEL);
3779 	if (err < 0) {
3780 		/* -EMSGSIZE implies BUG in if_nlmsg_size */
3781 		WARN_ON(err == -EMSGSIZE);
3782 		kfree_skb(nskb);
3783 	} else
3784 		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3785 out:
3786 	if (netnsid >= 0)
3787 		put_net(tgt_net);
3788 
3789 	return err;
3790 }
3791 
3792 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3793 			   bool *changed, struct netlink_ext_ack *extack)
3794 {
3795 	char *alt_ifname;
3796 	size_t size;
3797 	int err;
3798 
3799 	err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3800 	if (err)
3801 		return err;
3802 
3803 	if (cmd == RTM_NEWLINKPROP) {
3804 		size = rtnl_prop_list_size(dev);
3805 		size += nla_total_size(ALTIFNAMSIZ);
3806 		if (size >= U16_MAX) {
3807 			NL_SET_ERR_MSG(extack,
3808 				       "effective property list too long");
3809 			return -EINVAL;
3810 		}
3811 	}
3812 
3813 	alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3814 	if (!alt_ifname)
3815 		return -ENOMEM;
3816 
3817 	if (cmd == RTM_NEWLINKPROP) {
3818 		err = netdev_name_node_alt_create(dev, alt_ifname);
3819 		if (!err)
3820 			alt_ifname = NULL;
3821 	} else if (cmd == RTM_DELLINKPROP) {
3822 		err = netdev_name_node_alt_destroy(dev, alt_ifname);
3823 	} else {
3824 		WARN_ON_ONCE(1);
3825 		err = -EINVAL;
3826 	}
3827 
3828 	kfree(alt_ifname);
3829 	if (!err)
3830 		*changed = true;
3831 	return err;
3832 }
3833 
3834 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3835 			 struct netlink_ext_ack *extack)
3836 {
3837 	struct net *net = sock_net(skb->sk);
3838 	struct nlattr *tb[IFLA_MAX + 1];
3839 	struct net_device *dev;
3840 	struct ifinfomsg *ifm;
3841 	bool changed = false;
3842 	struct nlattr *attr;
3843 	int err, rem;
3844 
3845 	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3846 	if (err)
3847 		return err;
3848 
3849 	err = rtnl_ensure_unique_netns(tb, extack, true);
3850 	if (err)
3851 		return err;
3852 
3853 	ifm = nlmsg_data(nlh);
3854 	if (ifm->ifi_index > 0)
3855 		dev = __dev_get_by_index(net, ifm->ifi_index);
3856 	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3857 		dev = rtnl_dev_get(net, tb);
3858 	else
3859 		return -EINVAL;
3860 
3861 	if (!dev)
3862 		return -ENODEV;
3863 
3864 	if (!tb[IFLA_PROP_LIST])
3865 		return 0;
3866 
3867 	nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3868 		switch (nla_type(attr)) {
3869 		case IFLA_ALT_IFNAME:
3870 			err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3871 			if (err)
3872 				return err;
3873 			break;
3874 		}
3875 	}
3876 
3877 	if (changed)
3878 		netdev_state_change(dev);
3879 	return 0;
3880 }
3881 
3882 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3883 			    struct netlink_ext_ack *extack)
3884 {
3885 	return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3886 }
3887 
3888 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3889 			    struct netlink_ext_ack *extack)
3890 {
3891 	return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3892 }
3893 
3894 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3895 {
3896 	struct net *net = sock_net(skb->sk);
3897 	size_t min_ifinfo_dump_size = 0;
3898 	struct nlattr *tb[IFLA_MAX+1];
3899 	u32 ext_filter_mask = 0;
3900 	struct net_device *dev;
3901 	int hdrlen;
3902 
3903 	/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3904 	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3905 		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3906 
3907 	if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3908 		if (tb[IFLA_EXT_MASK])
3909 			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3910 	}
3911 
3912 	if (!ext_filter_mask)
3913 		return NLMSG_GOODSIZE;
3914 	/*
3915 	 * traverse the list of net devices and compute the minimum
3916 	 * buffer size based upon the filter mask.
3917 	 */
3918 	rcu_read_lock();
3919 	for_each_netdev_rcu(net, dev) {
3920 		min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3921 					   if_nlmsg_size(dev, ext_filter_mask));
3922 	}
3923 	rcu_read_unlock();
3924 
3925 	return nlmsg_total_size(min_ifinfo_dump_size);
3926 }
3927 
3928 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3929 {
3930 	int idx;
3931 	int s_idx = cb->family;
3932 	int type = cb->nlh->nlmsg_type - RTM_BASE;
3933 	int ret = 0;
3934 
3935 	if (s_idx == 0)
3936 		s_idx = 1;
3937 
3938 	for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3939 		struct rtnl_link __rcu **tab;
3940 		struct rtnl_link *link;
3941 		rtnl_dumpit_func dumpit;
3942 
3943 		if (idx < s_idx || idx == PF_PACKET)
3944 			continue;
3945 
3946 		if (type < 0 || type >= RTM_NR_MSGTYPES)
3947 			continue;
3948 
3949 		tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3950 		if (!tab)
3951 			continue;
3952 
3953 		link = rcu_dereference_rtnl(tab[type]);
3954 		if (!link)
3955 			continue;
3956 
3957 		dumpit = link->dumpit;
3958 		if (!dumpit)
3959 			continue;
3960 
3961 		if (idx > s_idx) {
3962 			memset(&cb->args[0], 0, sizeof(cb->args));
3963 			cb->prev_seq = 0;
3964 			cb->seq = 0;
3965 		}
3966 		ret = dumpit(skb, cb);
3967 		if (ret)
3968 			break;
3969 	}
3970 	cb->family = idx;
3971 
3972 	return skb->len ? : ret;
3973 }
3974 
3975 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3976 				       unsigned int change,
3977 				       u32 event, gfp_t flags, int *new_nsid,
3978 				       int new_ifindex, u32 portid,
3979 				       const struct nlmsghdr *nlh)
3980 {
3981 	struct net *net = dev_net(dev);
3982 	struct sk_buff *skb;
3983 	int err = -ENOBUFS;
3984 	u32 seq = 0;
3985 
3986 	skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
3987 	if (skb == NULL)
3988 		goto errout;
3989 
3990 	if (nlmsg_report(nlh))
3991 		seq = nlmsg_seq(nlh);
3992 	else
3993 		portid = 0;
3994 
3995 	err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3996 			       type, portid, seq, change, 0, 0, event,
3997 			       new_nsid, new_ifindex, -1, flags);
3998 	if (err < 0) {
3999 		/* -EMSGSIZE implies BUG in if_nlmsg_size() */
4000 		WARN_ON(err == -EMSGSIZE);
4001 		kfree_skb(skb);
4002 		goto errout;
4003 	}
4004 	return skb;
4005 errout:
4006 	if (err < 0)
4007 		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4008 	return NULL;
4009 }
4010 
4011 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
4012 		       u32 portid, const struct nlmsghdr *nlh)
4013 {
4014 	struct net *net = dev_net(dev);
4015 
4016 	rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
4017 }
4018 
4019 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
4020 			       unsigned int change, u32 event,
4021 			       gfp_t flags, int *new_nsid, int new_ifindex,
4022 			       u32 portid, const struct nlmsghdr *nlh)
4023 {
4024 	struct sk_buff *skb;
4025 
4026 	if (dev->reg_state != NETREG_REGISTERED)
4027 		return;
4028 
4029 	skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
4030 				     new_ifindex, portid, nlh);
4031 	if (skb)
4032 		rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
4033 }
4034 
4035 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
4036 		  gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
4037 {
4038 	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4039 			   NULL, 0, portid, nlh);
4040 }
4041 
4042 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4043 			 gfp_t flags, int *new_nsid, int new_ifindex)
4044 {
4045 	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4046 			   new_nsid, new_ifindex, 0, NULL);
4047 }
4048 
4049 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4050 				   struct net_device *dev,
4051 				   u8 *addr, u16 vid, u32 pid, u32 seq,
4052 				   int type, unsigned int flags,
4053 				   int nlflags, u16 ndm_state)
4054 {
4055 	struct nlmsghdr *nlh;
4056 	struct ndmsg *ndm;
4057 
4058 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4059 	if (!nlh)
4060 		return -EMSGSIZE;
4061 
4062 	ndm = nlmsg_data(nlh);
4063 	ndm->ndm_family  = AF_BRIDGE;
4064 	ndm->ndm_pad1	 = 0;
4065 	ndm->ndm_pad2    = 0;
4066 	ndm->ndm_flags	 = flags;
4067 	ndm->ndm_type	 = 0;
4068 	ndm->ndm_ifindex = dev->ifindex;
4069 	ndm->ndm_state   = ndm_state;
4070 
4071 	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
4072 		goto nla_put_failure;
4073 	if (vid)
4074 		if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4075 			goto nla_put_failure;
4076 
4077 	nlmsg_end(skb, nlh);
4078 	return 0;
4079 
4080 nla_put_failure:
4081 	nlmsg_cancel(skb, nlh);
4082 	return -EMSGSIZE;
4083 }
4084 
4085 static inline size_t rtnl_fdb_nlmsg_size(void)
4086 {
4087 	return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4088 	       nla_total_size(ETH_ALEN) +	/* NDA_LLADDR */
4089 	       nla_total_size(sizeof(u16)) +	/* NDA_VLAN */
4090 	       0;
4091 }
4092 
4093 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4094 			    u16 ndm_state)
4095 {
4096 	struct net *net = dev_net(dev);
4097 	struct sk_buff *skb;
4098 	int err = -ENOBUFS;
4099 
4100 	skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
4101 	if (!skb)
4102 		goto errout;
4103 
4104 	err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4105 				      0, 0, type, NTF_SELF, 0, ndm_state);
4106 	if (err < 0) {
4107 		kfree_skb(skb);
4108 		goto errout;
4109 	}
4110 
4111 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4112 	return;
4113 errout:
4114 	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4115 }
4116 
4117 /*
4118  * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4119  */
4120 int ndo_dflt_fdb_add(struct ndmsg *ndm,
4121 		     struct nlattr *tb[],
4122 		     struct net_device *dev,
4123 		     const unsigned char *addr, u16 vid,
4124 		     u16 flags)
4125 {
4126 	int err = -EINVAL;
4127 
4128 	/* If aging addresses are supported device will need to
4129 	 * implement its own handler for this.
4130 	 */
4131 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4132 		netdev_info(dev, "default FDB implementation only supports local addresses\n");
4133 		return err;
4134 	}
4135 
4136 	if (tb[NDA_FLAGS_EXT]) {
4137 		netdev_info(dev, "invalid flags given to default FDB implementation\n");
4138 		return err;
4139 	}
4140 
4141 	if (vid) {
4142 		netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4143 		return err;
4144 	}
4145 
4146 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4147 		err = dev_uc_add_excl(dev, addr);
4148 	else if (is_multicast_ether_addr(addr))
4149 		err = dev_mc_add_excl(dev, addr);
4150 
4151 	/* Only return duplicate errors if NLM_F_EXCL is set */
4152 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
4153 		err = 0;
4154 
4155 	return err;
4156 }
4157 EXPORT_SYMBOL(ndo_dflt_fdb_add);
4158 
4159 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4160 			 struct netlink_ext_ack *extack)
4161 {
4162 	u16 vid = 0;
4163 
4164 	if (vlan_attr) {
4165 		if (nla_len(vlan_attr) != sizeof(u16)) {
4166 			NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4167 			return -EINVAL;
4168 		}
4169 
4170 		vid = nla_get_u16(vlan_attr);
4171 
4172 		if (!vid || vid >= VLAN_VID_MASK) {
4173 			NL_SET_ERR_MSG(extack, "invalid vlan id");
4174 			return -EINVAL;
4175 		}
4176 	}
4177 	*p_vid = vid;
4178 	return 0;
4179 }
4180 
4181 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4182 			struct netlink_ext_ack *extack)
4183 {
4184 	struct net *net = sock_net(skb->sk);
4185 	struct ndmsg *ndm;
4186 	struct nlattr *tb[NDA_MAX+1];
4187 	struct net_device *dev;
4188 	u8 *addr;
4189 	u16 vid;
4190 	int err;
4191 
4192 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4193 				     extack);
4194 	if (err < 0)
4195 		return err;
4196 
4197 	ndm = nlmsg_data(nlh);
4198 	if (ndm->ndm_ifindex == 0) {
4199 		NL_SET_ERR_MSG(extack, "invalid ifindex");
4200 		return -EINVAL;
4201 	}
4202 
4203 	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4204 	if (dev == NULL) {
4205 		NL_SET_ERR_MSG(extack, "unknown ifindex");
4206 		return -ENODEV;
4207 	}
4208 
4209 	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4210 		NL_SET_ERR_MSG(extack, "invalid address");
4211 		return -EINVAL;
4212 	}
4213 
4214 	if (dev->type != ARPHRD_ETHER) {
4215 		NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4216 		return -EINVAL;
4217 	}
4218 
4219 	addr = nla_data(tb[NDA_LLADDR]);
4220 
4221 	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4222 	if (err)
4223 		return err;
4224 
4225 	err = -EOPNOTSUPP;
4226 
4227 	/* Support fdb on master device the net/bridge default case */
4228 	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4229 	    netif_is_bridge_port(dev)) {
4230 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4231 		const struct net_device_ops *ops = br_dev->netdev_ops;
4232 
4233 		err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4234 				       nlh->nlmsg_flags, extack);
4235 		if (err)
4236 			goto out;
4237 		else
4238 			ndm->ndm_flags &= ~NTF_MASTER;
4239 	}
4240 
4241 	/* Embedded bridge, macvlan, and any other device support */
4242 	if ((ndm->ndm_flags & NTF_SELF)) {
4243 		if (dev->netdev_ops->ndo_fdb_add)
4244 			err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4245 							   vid,
4246 							   nlh->nlmsg_flags,
4247 							   extack);
4248 		else
4249 			err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4250 					       nlh->nlmsg_flags);
4251 
4252 		if (!err) {
4253 			rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4254 					ndm->ndm_state);
4255 			ndm->ndm_flags &= ~NTF_SELF;
4256 		}
4257 	}
4258 out:
4259 	return err;
4260 }
4261 
4262 /*
4263  * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4264  */
4265 int ndo_dflt_fdb_del(struct ndmsg *ndm,
4266 		     struct nlattr *tb[],
4267 		     struct net_device *dev,
4268 		     const unsigned char *addr, u16 vid)
4269 {
4270 	int err = -EINVAL;
4271 
4272 	/* If aging addresses are supported device will need to
4273 	 * implement its own handler for this.
4274 	 */
4275 	if (!(ndm->ndm_state & NUD_PERMANENT)) {
4276 		netdev_info(dev, "default FDB implementation only supports local addresses\n");
4277 		return err;
4278 	}
4279 
4280 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4281 		err = dev_uc_del(dev, addr);
4282 	else if (is_multicast_ether_addr(addr))
4283 		err = dev_mc_del(dev, addr);
4284 
4285 	return err;
4286 }
4287 EXPORT_SYMBOL(ndo_dflt_fdb_del);
4288 
4289 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = {
4290 	[NDA_VLAN]	= { .type = NLA_U16 },
4291 	[NDA_IFINDEX]	= NLA_POLICY_MIN(NLA_S32, 1),
4292 	[NDA_NDM_STATE_MASK]	= { .type = NLA_U16  },
4293 	[NDA_NDM_FLAGS_MASK]	= { .type = NLA_U8 },
4294 };
4295 
4296 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4297 			struct netlink_ext_ack *extack)
4298 {
4299 	bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4300 	struct net *net = sock_net(skb->sk);
4301 	const struct net_device_ops *ops;
4302 	struct ndmsg *ndm;
4303 	struct nlattr *tb[NDA_MAX+1];
4304 	struct net_device *dev;
4305 	__u8 *addr = NULL;
4306 	int err;
4307 	u16 vid;
4308 
4309 	if (!netlink_capable(skb, CAP_NET_ADMIN))
4310 		return -EPERM;
4311 
4312 	if (!del_bulk) {
4313 		err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4314 					     NULL, extack);
4315 	} else {
4316 		err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
4317 				  fdb_del_bulk_policy, extack);
4318 	}
4319 	if (err < 0)
4320 		return err;
4321 
4322 	ndm = nlmsg_data(nlh);
4323 	if (ndm->ndm_ifindex == 0) {
4324 		NL_SET_ERR_MSG(extack, "invalid ifindex");
4325 		return -EINVAL;
4326 	}
4327 
4328 	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4329 	if (dev == NULL) {
4330 		NL_SET_ERR_MSG(extack, "unknown ifindex");
4331 		return -ENODEV;
4332 	}
4333 
4334 	if (!del_bulk) {
4335 		if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4336 			NL_SET_ERR_MSG(extack, "invalid address");
4337 			return -EINVAL;
4338 		}
4339 		addr = nla_data(tb[NDA_LLADDR]);
4340 	}
4341 
4342 	if (dev->type != ARPHRD_ETHER) {
4343 		NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4344 		return -EINVAL;
4345 	}
4346 
4347 	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4348 	if (err)
4349 		return err;
4350 
4351 	err = -EOPNOTSUPP;
4352 
4353 	/* Support fdb on master device the net/bridge default case */
4354 	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4355 	    netif_is_bridge_port(dev)) {
4356 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4357 
4358 		ops = br_dev->netdev_ops;
4359 		if (!del_bulk) {
4360 			if (ops->ndo_fdb_del)
4361 				err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4362 		} else {
4363 			if (ops->ndo_fdb_del_bulk)
4364 				err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4365 							    extack);
4366 		}
4367 
4368 		if (err)
4369 			goto out;
4370 		else
4371 			ndm->ndm_flags &= ~NTF_MASTER;
4372 	}
4373 
4374 	/* Embedded bridge, macvlan, and any other device support */
4375 	if (ndm->ndm_flags & NTF_SELF) {
4376 		ops = dev->netdev_ops;
4377 		if (!del_bulk) {
4378 			if (ops->ndo_fdb_del)
4379 				err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4380 			else
4381 				err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4382 		} else {
4383 			/* in case err was cleared by NTF_MASTER call */
4384 			err = -EOPNOTSUPP;
4385 			if (ops->ndo_fdb_del_bulk)
4386 				err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4387 							    extack);
4388 		}
4389 
4390 		if (!err) {
4391 			if (!del_bulk)
4392 				rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4393 						ndm->ndm_state);
4394 			ndm->ndm_flags &= ~NTF_SELF;
4395 		}
4396 	}
4397 out:
4398 	return err;
4399 }
4400 
4401 static int nlmsg_populate_fdb(struct sk_buff *skb,
4402 			      struct netlink_callback *cb,
4403 			      struct net_device *dev,
4404 			      int *idx,
4405 			      struct netdev_hw_addr_list *list)
4406 {
4407 	struct netdev_hw_addr *ha;
4408 	int err;
4409 	u32 portid, seq;
4410 
4411 	portid = NETLINK_CB(cb->skb).portid;
4412 	seq = cb->nlh->nlmsg_seq;
4413 
4414 	list_for_each_entry(ha, &list->list, list) {
4415 		if (*idx < cb->args[2])
4416 			goto skip;
4417 
4418 		err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4419 					      portid, seq,
4420 					      RTM_NEWNEIGH, NTF_SELF,
4421 					      NLM_F_MULTI, NUD_PERMANENT);
4422 		if (err < 0)
4423 			return err;
4424 skip:
4425 		*idx += 1;
4426 	}
4427 	return 0;
4428 }
4429 
4430 /**
4431  * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4432  * @skb: socket buffer to store message in
4433  * @cb: netlink callback
4434  * @dev: netdevice
4435  * @filter_dev: ignored
4436  * @idx: the number of FDB table entries dumped is added to *@idx
4437  *
4438  * Default netdevice operation to dump the existing unicast address list.
4439  * Returns number of addresses from list put in skb.
4440  */
4441 int ndo_dflt_fdb_dump(struct sk_buff *skb,
4442 		      struct netlink_callback *cb,
4443 		      struct net_device *dev,
4444 		      struct net_device *filter_dev,
4445 		      int *idx)
4446 {
4447 	int err;
4448 
4449 	if (dev->type != ARPHRD_ETHER)
4450 		return -EINVAL;
4451 
4452 	netif_addr_lock_bh(dev);
4453 	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4454 	if (err)
4455 		goto out;
4456 	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4457 out:
4458 	netif_addr_unlock_bh(dev);
4459 	return err;
4460 }
4461 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4462 
4463 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4464 				 int *br_idx, int *brport_idx,
4465 				 struct netlink_ext_ack *extack)
4466 {
4467 	struct nlattr *tb[NDA_MAX + 1];
4468 	struct ndmsg *ndm;
4469 	int err, i;
4470 
4471 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4472 		NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4473 		return -EINVAL;
4474 	}
4475 
4476 	ndm = nlmsg_data(nlh);
4477 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
4478 	    ndm->ndm_flags || ndm->ndm_type) {
4479 		NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4480 		return -EINVAL;
4481 	}
4482 
4483 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4484 					    NDA_MAX, NULL, extack);
4485 	if (err < 0)
4486 		return err;
4487 
4488 	*brport_idx = ndm->ndm_ifindex;
4489 	for (i = 0; i <= NDA_MAX; ++i) {
4490 		if (!tb[i])
4491 			continue;
4492 
4493 		switch (i) {
4494 		case NDA_IFINDEX:
4495 			if (nla_len(tb[i]) != sizeof(u32)) {
4496 				NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4497 				return -EINVAL;
4498 			}
4499 			*brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4500 			break;
4501 		case NDA_MASTER:
4502 			if (nla_len(tb[i]) != sizeof(u32)) {
4503 				NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4504 				return -EINVAL;
4505 			}
4506 			*br_idx = nla_get_u32(tb[NDA_MASTER]);
4507 			break;
4508 		default:
4509 			NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4510 			return -EINVAL;
4511 		}
4512 	}
4513 
4514 	return 0;
4515 }
4516 
4517 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4518 				 int *br_idx, int *brport_idx,
4519 				 struct netlink_ext_ack *extack)
4520 {
4521 	struct nlattr *tb[IFLA_MAX+1];
4522 	int err;
4523 
4524 	/* A hack to preserve kernel<->userspace interface.
4525 	 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4526 	 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4527 	 * So, check for ndmsg with an optional u32 attribute (not used here).
4528 	 * Fortunately these sizes don't conflict with the size of ifinfomsg
4529 	 * with an optional attribute.
4530 	 */
4531 	if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4532 	    (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4533 	     nla_attr_size(sizeof(u32)))) {
4534 		struct ifinfomsg *ifm;
4535 
4536 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4537 					     tb, IFLA_MAX, ifla_policy,
4538 					     extack);
4539 		if (err < 0) {
4540 			return -EINVAL;
4541 		} else if (err == 0) {
4542 			if (tb[IFLA_MASTER])
4543 				*br_idx = nla_get_u32(tb[IFLA_MASTER]);
4544 		}
4545 
4546 		ifm = nlmsg_data(nlh);
4547 		*brport_idx = ifm->ifi_index;
4548 	}
4549 	return 0;
4550 }
4551 
4552 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4553 {
4554 	struct net_device *dev;
4555 	struct net_device *br_dev = NULL;
4556 	const struct net_device_ops *ops = NULL;
4557 	const struct net_device_ops *cops = NULL;
4558 	struct net *net = sock_net(skb->sk);
4559 	struct hlist_head *head;
4560 	int brport_idx = 0;
4561 	int br_idx = 0;
4562 	int h, s_h;
4563 	int idx = 0, s_idx;
4564 	int err = 0;
4565 	int fidx = 0;
4566 
4567 	if (cb->strict_check)
4568 		err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4569 					    cb->extack);
4570 	else
4571 		err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4572 					    cb->extack);
4573 	if (err < 0)
4574 		return err;
4575 
4576 	if (br_idx) {
4577 		br_dev = __dev_get_by_index(net, br_idx);
4578 		if (!br_dev)
4579 			return -ENODEV;
4580 
4581 		ops = br_dev->netdev_ops;
4582 	}
4583 
4584 	s_h = cb->args[0];
4585 	s_idx = cb->args[1];
4586 
4587 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4588 		idx = 0;
4589 		head = &net->dev_index_head[h];
4590 		hlist_for_each_entry(dev, head, index_hlist) {
4591 
4592 			if (brport_idx && (dev->ifindex != brport_idx))
4593 				continue;
4594 
4595 			if (!br_idx) { /* user did not specify a specific bridge */
4596 				if (netif_is_bridge_port(dev)) {
4597 					br_dev = netdev_master_upper_dev_get(dev);
4598 					cops = br_dev->netdev_ops;
4599 				}
4600 			} else {
4601 				if (dev != br_dev &&
4602 				    !netif_is_bridge_port(dev))
4603 					continue;
4604 
4605 				if (br_dev != netdev_master_upper_dev_get(dev) &&
4606 				    !netif_is_bridge_master(dev))
4607 					continue;
4608 				cops = ops;
4609 			}
4610 
4611 			if (idx < s_idx)
4612 				goto cont;
4613 
4614 			if (netif_is_bridge_port(dev)) {
4615 				if (cops && cops->ndo_fdb_dump) {
4616 					err = cops->ndo_fdb_dump(skb, cb,
4617 								br_dev, dev,
4618 								&fidx);
4619 					if (err == -EMSGSIZE)
4620 						goto out;
4621 				}
4622 			}
4623 
4624 			if (dev->netdev_ops->ndo_fdb_dump)
4625 				err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4626 								    dev, NULL,
4627 								    &fidx);
4628 			else
4629 				err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4630 							&fidx);
4631 			if (err == -EMSGSIZE)
4632 				goto out;
4633 
4634 			cops = NULL;
4635 
4636 			/* reset fdb offset to 0 for rest of the interfaces */
4637 			cb->args[2] = 0;
4638 			fidx = 0;
4639 cont:
4640 			idx++;
4641 		}
4642 	}
4643 
4644 out:
4645 	cb->args[0] = h;
4646 	cb->args[1] = idx;
4647 	cb->args[2] = fidx;
4648 
4649 	return skb->len;
4650 }
4651 
4652 static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4653 				struct nlattr **tb, u8 *ndm_flags,
4654 				int *br_idx, int *brport_idx, u8 **addr,
4655 				u16 *vid, struct netlink_ext_ack *extack)
4656 {
4657 	struct ndmsg *ndm;
4658 	int err, i;
4659 
4660 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4661 		NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4662 		return -EINVAL;
4663 	}
4664 
4665 	ndm = nlmsg_data(nlh);
4666 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
4667 	    ndm->ndm_type) {
4668 		NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4669 		return -EINVAL;
4670 	}
4671 
4672 	if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4673 		NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4674 		return -EINVAL;
4675 	}
4676 
4677 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4678 					    NDA_MAX, nda_policy, extack);
4679 	if (err < 0)
4680 		return err;
4681 
4682 	*ndm_flags = ndm->ndm_flags;
4683 	*brport_idx = ndm->ndm_ifindex;
4684 	for (i = 0; i <= NDA_MAX; ++i) {
4685 		if (!tb[i])
4686 			continue;
4687 
4688 		switch (i) {
4689 		case NDA_MASTER:
4690 			*br_idx = nla_get_u32(tb[i]);
4691 			break;
4692 		case NDA_LLADDR:
4693 			if (nla_len(tb[i]) != ETH_ALEN) {
4694 				NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4695 				return -EINVAL;
4696 			}
4697 			*addr = nla_data(tb[i]);
4698 			break;
4699 		case NDA_VLAN:
4700 			err = fdb_vid_parse(tb[i], vid, extack);
4701 			if (err)
4702 				return err;
4703 			break;
4704 		case NDA_VNI:
4705 			break;
4706 		default:
4707 			NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4708 			return -EINVAL;
4709 		}
4710 	}
4711 
4712 	return 0;
4713 }
4714 
4715 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4716 			struct netlink_ext_ack *extack)
4717 {
4718 	struct net_device *dev = NULL, *br_dev = NULL;
4719 	const struct net_device_ops *ops = NULL;
4720 	struct net *net = sock_net(in_skb->sk);
4721 	struct nlattr *tb[NDA_MAX + 1];
4722 	struct sk_buff *skb;
4723 	int brport_idx = 0;
4724 	u8 ndm_flags = 0;
4725 	int br_idx = 0;
4726 	u8 *addr = NULL;
4727 	u16 vid = 0;
4728 	int err;
4729 
4730 	err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4731 				   &brport_idx, &addr, &vid, extack);
4732 	if (err < 0)
4733 		return err;
4734 
4735 	if (!addr) {
4736 		NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4737 		return -EINVAL;
4738 	}
4739 
4740 	if (brport_idx) {
4741 		dev = __dev_get_by_index(net, brport_idx);
4742 		if (!dev) {
4743 			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4744 			return -ENODEV;
4745 		}
4746 	}
4747 
4748 	if (br_idx) {
4749 		if (dev) {
4750 			NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4751 			return -EINVAL;
4752 		}
4753 
4754 		br_dev = __dev_get_by_index(net, br_idx);
4755 		if (!br_dev) {
4756 			NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4757 			return -EINVAL;
4758 		}
4759 		ops = br_dev->netdev_ops;
4760 	}
4761 
4762 	if (dev) {
4763 		if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4764 			if (!netif_is_bridge_port(dev)) {
4765 				NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4766 				return -EINVAL;
4767 			}
4768 			br_dev = netdev_master_upper_dev_get(dev);
4769 			if (!br_dev) {
4770 				NL_SET_ERR_MSG(extack, "Master of device not found");
4771 				return -EINVAL;
4772 			}
4773 			ops = br_dev->netdev_ops;
4774 		} else {
4775 			if (!(ndm_flags & NTF_SELF)) {
4776 				NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4777 				return -EINVAL;
4778 			}
4779 			ops = dev->netdev_ops;
4780 		}
4781 	}
4782 
4783 	if (!br_dev && !dev) {
4784 		NL_SET_ERR_MSG(extack, "No device specified");
4785 		return -ENODEV;
4786 	}
4787 
4788 	if (!ops || !ops->ndo_fdb_get) {
4789 		NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4790 		return -EOPNOTSUPP;
4791 	}
4792 
4793 	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4794 	if (!skb)
4795 		return -ENOBUFS;
4796 
4797 	if (br_dev)
4798 		dev = br_dev;
4799 	err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4800 			       NETLINK_CB(in_skb).portid,
4801 			       nlh->nlmsg_seq, extack);
4802 	if (err)
4803 		goto out;
4804 
4805 	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4806 out:
4807 	kfree_skb(skb);
4808 	return err;
4809 }
4810 
4811 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4812 			       unsigned int attrnum, unsigned int flag)
4813 {
4814 	if (mask & flag)
4815 		return nla_put_u8(skb, attrnum, !!(flags & flag));
4816 	return 0;
4817 }
4818 
4819 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4820 			    struct net_device *dev, u16 mode,
4821 			    u32 flags, u32 mask, int nlflags,
4822 			    u32 filter_mask,
4823 			    int (*vlan_fill)(struct sk_buff *skb,
4824 					     struct net_device *dev,
4825 					     u32 filter_mask))
4826 {
4827 	struct nlmsghdr *nlh;
4828 	struct ifinfomsg *ifm;
4829 	struct nlattr *br_afspec;
4830 	struct nlattr *protinfo;
4831 	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4832 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4833 	int err = 0;
4834 
4835 	nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4836 	if (nlh == NULL)
4837 		return -EMSGSIZE;
4838 
4839 	ifm = nlmsg_data(nlh);
4840 	ifm->ifi_family = AF_BRIDGE;
4841 	ifm->__ifi_pad = 0;
4842 	ifm->ifi_type = dev->type;
4843 	ifm->ifi_index = dev->ifindex;
4844 	ifm->ifi_flags = dev_get_flags(dev);
4845 	ifm->ifi_change = 0;
4846 
4847 
4848 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4849 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4850 	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4851 	    (br_dev &&
4852 	     nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4853 	    (dev->addr_len &&
4854 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4855 	    (dev->ifindex != dev_get_iflink(dev) &&
4856 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4857 		goto nla_put_failure;
4858 
4859 	br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4860 	if (!br_afspec)
4861 		goto nla_put_failure;
4862 
4863 	if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4864 		nla_nest_cancel(skb, br_afspec);
4865 		goto nla_put_failure;
4866 	}
4867 
4868 	if (mode != BRIDGE_MODE_UNDEF) {
4869 		if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4870 			nla_nest_cancel(skb, br_afspec);
4871 			goto nla_put_failure;
4872 		}
4873 	}
4874 	if (vlan_fill) {
4875 		err = vlan_fill(skb, dev, filter_mask);
4876 		if (err) {
4877 			nla_nest_cancel(skb, br_afspec);
4878 			goto nla_put_failure;
4879 		}
4880 	}
4881 	nla_nest_end(skb, br_afspec);
4882 
4883 	protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4884 	if (!protinfo)
4885 		goto nla_put_failure;
4886 
4887 	if (brport_nla_put_flag(skb, flags, mask,
4888 				IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4889 	    brport_nla_put_flag(skb, flags, mask,
4890 				IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4891 	    brport_nla_put_flag(skb, flags, mask,
4892 				IFLA_BRPORT_FAST_LEAVE,
4893 				BR_MULTICAST_FAST_LEAVE) ||
4894 	    brport_nla_put_flag(skb, flags, mask,
4895 				IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4896 	    brport_nla_put_flag(skb, flags, mask,
4897 				IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4898 	    brport_nla_put_flag(skb, flags, mask,
4899 				IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4900 	    brport_nla_put_flag(skb, flags, mask,
4901 				IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4902 	    brport_nla_put_flag(skb, flags, mask,
4903 				IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4904 	    brport_nla_put_flag(skb, flags, mask,
4905 				IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4906 	    brport_nla_put_flag(skb, flags, mask,
4907 				IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4908 		nla_nest_cancel(skb, protinfo);
4909 		goto nla_put_failure;
4910 	}
4911 
4912 	nla_nest_end(skb, protinfo);
4913 
4914 	nlmsg_end(skb, nlh);
4915 	return 0;
4916 nla_put_failure:
4917 	nlmsg_cancel(skb, nlh);
4918 	return err ? err : -EMSGSIZE;
4919 }
4920 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4921 
4922 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4923 				    bool strict_check, u32 *filter_mask,
4924 				    struct netlink_ext_ack *extack)
4925 {
4926 	struct nlattr *tb[IFLA_MAX+1];
4927 	int err, i;
4928 
4929 	if (strict_check) {
4930 		struct ifinfomsg *ifm;
4931 
4932 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4933 			NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4934 			return -EINVAL;
4935 		}
4936 
4937 		ifm = nlmsg_data(nlh);
4938 		if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4939 		    ifm->ifi_change || ifm->ifi_index) {
4940 			NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4941 			return -EINVAL;
4942 		}
4943 
4944 		err = nlmsg_parse_deprecated_strict(nlh,
4945 						    sizeof(struct ifinfomsg),
4946 						    tb, IFLA_MAX, ifla_policy,
4947 						    extack);
4948 	} else {
4949 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4950 					     tb, IFLA_MAX, ifla_policy,
4951 					     extack);
4952 	}
4953 	if (err < 0)
4954 		return err;
4955 
4956 	/* new attributes should only be added with strict checking */
4957 	for (i = 0; i <= IFLA_MAX; ++i) {
4958 		if (!tb[i])
4959 			continue;
4960 
4961 		switch (i) {
4962 		case IFLA_EXT_MASK:
4963 			*filter_mask = nla_get_u32(tb[i]);
4964 			break;
4965 		default:
4966 			if (strict_check) {
4967 				NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4968 				return -EINVAL;
4969 			}
4970 		}
4971 	}
4972 
4973 	return 0;
4974 }
4975 
4976 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4977 {
4978 	const struct nlmsghdr *nlh = cb->nlh;
4979 	struct net *net = sock_net(skb->sk);
4980 	struct net_device *dev;
4981 	int idx = 0;
4982 	u32 portid = NETLINK_CB(cb->skb).portid;
4983 	u32 seq = nlh->nlmsg_seq;
4984 	u32 filter_mask = 0;
4985 	int err;
4986 
4987 	err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4988 				       cb->extack);
4989 	if (err < 0 && cb->strict_check)
4990 		return err;
4991 
4992 	rcu_read_lock();
4993 	for_each_netdev_rcu(net, dev) {
4994 		const struct net_device_ops *ops = dev->netdev_ops;
4995 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4996 
4997 		if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4998 			if (idx >= cb->args[0]) {
4999 				err = br_dev->netdev_ops->ndo_bridge_getlink(
5000 						skb, portid, seq, dev,
5001 						filter_mask, NLM_F_MULTI);
5002 				if (err < 0 && err != -EOPNOTSUPP) {
5003 					if (likely(skb->len))
5004 						break;
5005 
5006 					goto out_err;
5007 				}
5008 			}
5009 			idx++;
5010 		}
5011 
5012 		if (ops->ndo_bridge_getlink) {
5013 			if (idx >= cb->args[0]) {
5014 				err = ops->ndo_bridge_getlink(skb, portid,
5015 							      seq, dev,
5016 							      filter_mask,
5017 							      NLM_F_MULTI);
5018 				if (err < 0 && err != -EOPNOTSUPP) {
5019 					if (likely(skb->len))
5020 						break;
5021 
5022 					goto out_err;
5023 				}
5024 			}
5025 			idx++;
5026 		}
5027 	}
5028 	err = skb->len;
5029 out_err:
5030 	rcu_read_unlock();
5031 	cb->args[0] = idx;
5032 
5033 	return err;
5034 }
5035 
5036 static inline size_t bridge_nlmsg_size(void)
5037 {
5038 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5039 		+ nla_total_size(IFNAMSIZ)	/* IFLA_IFNAME */
5040 		+ nla_total_size(MAX_ADDR_LEN)	/* IFLA_ADDRESS */
5041 		+ nla_total_size(sizeof(u32))	/* IFLA_MASTER */
5042 		+ nla_total_size(sizeof(u32))	/* IFLA_MTU */
5043 		+ nla_total_size(sizeof(u32))	/* IFLA_LINK */
5044 		+ nla_total_size(sizeof(u32))	/* IFLA_OPERSTATE */
5045 		+ nla_total_size(sizeof(u8))	/* IFLA_PROTINFO */
5046 		+ nla_total_size(sizeof(struct nlattr))	/* IFLA_AF_SPEC */
5047 		+ nla_total_size(sizeof(u16))	/* IFLA_BRIDGE_FLAGS */
5048 		+ nla_total_size(sizeof(u16));	/* IFLA_BRIDGE_MODE */
5049 }
5050 
5051 static int rtnl_bridge_notify(struct net_device *dev)
5052 {
5053 	struct net *net = dev_net(dev);
5054 	struct sk_buff *skb;
5055 	int err = -EOPNOTSUPP;
5056 
5057 	if (!dev->netdev_ops->ndo_bridge_getlink)
5058 		return 0;
5059 
5060 	skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5061 	if (!skb) {
5062 		err = -ENOMEM;
5063 		goto errout;
5064 	}
5065 
5066 	err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5067 	if (err < 0)
5068 		goto errout;
5069 
5070 	/* Notification info is only filled for bridge ports, not the bridge
5071 	 * device itself. Therefore, a zero notification length is valid and
5072 	 * should not result in an error.
5073 	 */
5074 	if (!skb->len)
5075 		goto errout;
5076 
5077 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5078 	return 0;
5079 errout:
5080 	WARN_ON(err == -EMSGSIZE);
5081 	kfree_skb(skb);
5082 	if (err)
5083 		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5084 	return err;
5085 }
5086 
5087 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5088 			       struct netlink_ext_ack *extack)
5089 {
5090 	struct net *net = sock_net(skb->sk);
5091 	struct ifinfomsg *ifm;
5092 	struct net_device *dev;
5093 	struct nlattr *br_spec, *attr = NULL;
5094 	int rem, err = -EOPNOTSUPP;
5095 	u16 flags = 0;
5096 	bool have_flags = false;
5097 
5098 	if (nlmsg_len(nlh) < sizeof(*ifm))
5099 		return -EINVAL;
5100 
5101 	ifm = nlmsg_data(nlh);
5102 	if (ifm->ifi_family != AF_BRIDGE)
5103 		return -EPFNOSUPPORT;
5104 
5105 	dev = __dev_get_by_index(net, ifm->ifi_index);
5106 	if (!dev) {
5107 		NL_SET_ERR_MSG(extack, "unknown ifindex");
5108 		return -ENODEV;
5109 	}
5110 
5111 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5112 	if (br_spec) {
5113 		nla_for_each_nested(attr, br_spec, rem) {
5114 			if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5115 				if (nla_len(attr) < sizeof(flags))
5116 					return -EINVAL;
5117 
5118 				have_flags = true;
5119 				flags = nla_get_u16(attr);
5120 				break;
5121 			}
5122 		}
5123 	}
5124 
5125 	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5126 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5127 
5128 		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5129 			err = -EOPNOTSUPP;
5130 			goto out;
5131 		}
5132 
5133 		err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5134 							     extack);
5135 		if (err)
5136 			goto out;
5137 
5138 		flags &= ~BRIDGE_FLAGS_MASTER;
5139 	}
5140 
5141 	if ((flags & BRIDGE_FLAGS_SELF)) {
5142 		if (!dev->netdev_ops->ndo_bridge_setlink)
5143 			err = -EOPNOTSUPP;
5144 		else
5145 			err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5146 								  flags,
5147 								  extack);
5148 		if (!err) {
5149 			flags &= ~BRIDGE_FLAGS_SELF;
5150 
5151 			/* Generate event to notify upper layer of bridge
5152 			 * change
5153 			 */
5154 			err = rtnl_bridge_notify(dev);
5155 		}
5156 	}
5157 
5158 	if (have_flags)
5159 		memcpy(nla_data(attr), &flags, sizeof(flags));
5160 out:
5161 	return err;
5162 }
5163 
5164 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5165 			       struct netlink_ext_ack *extack)
5166 {
5167 	struct net *net = sock_net(skb->sk);
5168 	struct ifinfomsg *ifm;
5169 	struct net_device *dev;
5170 	struct nlattr *br_spec, *attr = NULL;
5171 	int rem, err = -EOPNOTSUPP;
5172 	u16 flags = 0;
5173 	bool have_flags = false;
5174 
5175 	if (nlmsg_len(nlh) < sizeof(*ifm))
5176 		return -EINVAL;
5177 
5178 	ifm = nlmsg_data(nlh);
5179 	if (ifm->ifi_family != AF_BRIDGE)
5180 		return -EPFNOSUPPORT;
5181 
5182 	dev = __dev_get_by_index(net, ifm->ifi_index);
5183 	if (!dev) {
5184 		NL_SET_ERR_MSG(extack, "unknown ifindex");
5185 		return -ENODEV;
5186 	}
5187 
5188 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5189 	if (br_spec) {
5190 		nla_for_each_nested(attr, br_spec, rem) {
5191 			if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5192 				if (nla_len(attr) < sizeof(flags))
5193 					return -EINVAL;
5194 
5195 				have_flags = true;
5196 				flags = nla_get_u16(attr);
5197 				break;
5198 			}
5199 		}
5200 	}
5201 
5202 	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5203 		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5204 
5205 		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5206 			err = -EOPNOTSUPP;
5207 			goto out;
5208 		}
5209 
5210 		err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5211 		if (err)
5212 			goto out;
5213 
5214 		flags &= ~BRIDGE_FLAGS_MASTER;
5215 	}
5216 
5217 	if ((flags & BRIDGE_FLAGS_SELF)) {
5218 		if (!dev->netdev_ops->ndo_bridge_dellink)
5219 			err = -EOPNOTSUPP;
5220 		else
5221 			err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5222 								  flags);
5223 
5224 		if (!err) {
5225 			flags &= ~BRIDGE_FLAGS_SELF;
5226 
5227 			/* Generate event to notify upper layer of bridge
5228 			 * change
5229 			 */
5230 			err = rtnl_bridge_notify(dev);
5231 		}
5232 	}
5233 
5234 	if (have_flags)
5235 		memcpy(nla_data(attr), &flags, sizeof(flags));
5236 out:
5237 	return err;
5238 }
5239 
5240 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5241 {
5242 	return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5243 	       (!idxattr || idxattr == attrid);
5244 }
5245 
5246 static bool
5247 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5248 {
5249 	return dev->netdev_ops &&
5250 	       dev->netdev_ops->ndo_has_offload_stats &&
5251 	       dev->netdev_ops->ndo_get_offload_stats &&
5252 	       dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5253 }
5254 
5255 static unsigned int
5256 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5257 {
5258 	return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5259 	       sizeof(struct rtnl_link_stats64) : 0;
5260 }
5261 
5262 static int
5263 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5264 			     struct sk_buff *skb)
5265 {
5266 	unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5267 	struct nlattr *attr = NULL;
5268 	void *attr_data;
5269 	int err;
5270 
5271 	if (!size)
5272 		return -ENODATA;
5273 
5274 	attr = nla_reserve_64bit(skb, attr_id, size,
5275 				 IFLA_OFFLOAD_XSTATS_UNSPEC);
5276 	if (!attr)
5277 		return -EMSGSIZE;
5278 
5279 	attr_data = nla_data(attr);
5280 	memset(attr_data, 0, size);
5281 
5282 	err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5283 	if (err)
5284 		return err;
5285 
5286 	return 0;
5287 }
5288 
5289 static unsigned int
5290 rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5291 				   enum netdev_offload_xstats_type type)
5292 {
5293 	bool enabled = netdev_offload_xstats_enabled(dev, type);
5294 
5295 	return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5296 }
5297 
5298 struct rtnl_offload_xstats_request_used {
5299 	bool request;
5300 	bool used;
5301 };
5302 
5303 static int
5304 rtnl_offload_xstats_get_stats(struct net_device *dev,
5305 			      enum netdev_offload_xstats_type type,
5306 			      struct rtnl_offload_xstats_request_used *ru,
5307 			      struct rtnl_hw_stats64 *stats,
5308 			      struct netlink_ext_ack *extack)
5309 {
5310 	bool request;
5311 	bool used;
5312 	int err;
5313 
5314 	request = netdev_offload_xstats_enabled(dev, type);
5315 	if (!request) {
5316 		used = false;
5317 		goto out;
5318 	}
5319 
5320 	err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5321 	if (err)
5322 		return err;
5323 
5324 out:
5325 	if (ru) {
5326 		ru->request = request;
5327 		ru->used = used;
5328 	}
5329 	return 0;
5330 }
5331 
5332 static int
5333 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5334 				       struct rtnl_offload_xstats_request_used *ru)
5335 {
5336 	struct nlattr *nest;
5337 
5338 	nest = nla_nest_start(skb, attr_id);
5339 	if (!nest)
5340 		return -EMSGSIZE;
5341 
5342 	if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5343 		goto nla_put_failure;
5344 
5345 	if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5346 		goto nla_put_failure;
5347 
5348 	nla_nest_end(skb, nest);
5349 	return 0;
5350 
5351 nla_put_failure:
5352 	nla_nest_cancel(skb, nest);
5353 	return -EMSGSIZE;
5354 }
5355 
5356 static int
5357 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5358 				   struct netlink_ext_ack *extack)
5359 {
5360 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5361 	struct rtnl_offload_xstats_request_used ru_l3;
5362 	struct nlattr *nest;
5363 	int err;
5364 
5365 	err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5366 	if (err)
5367 		return err;
5368 
5369 	nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5370 	if (!nest)
5371 		return -EMSGSIZE;
5372 
5373 	if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5374 						   IFLA_OFFLOAD_XSTATS_L3_STATS,
5375 						   &ru_l3))
5376 		goto nla_put_failure;
5377 
5378 	nla_nest_end(skb, nest);
5379 	return 0;
5380 
5381 nla_put_failure:
5382 	nla_nest_cancel(skb, nest);
5383 	return -EMSGSIZE;
5384 }
5385 
5386 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5387 				    int *prividx, u32 off_filter_mask,
5388 				    struct netlink_ext_ack *extack)
5389 {
5390 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5391 	int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5392 	int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5393 	int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5394 	bool have_data = false;
5395 	int err;
5396 
5397 	if (*prividx <= attr_id_cpu_hit &&
5398 	    (off_filter_mask &
5399 	     IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5400 		err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5401 		if (!err) {
5402 			have_data = true;
5403 		} else if (err != -ENODATA) {
5404 			*prividx = attr_id_cpu_hit;
5405 			return err;
5406 		}
5407 	}
5408 
5409 	if (*prividx <= attr_id_hw_s_info &&
5410 	    (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5411 		*prividx = attr_id_hw_s_info;
5412 
5413 		err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5414 		if (err)
5415 			return err;
5416 
5417 		have_data = true;
5418 		*prividx = 0;
5419 	}
5420 
5421 	if (*prividx <= attr_id_l3_stats &&
5422 	    (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5423 		unsigned int size_l3;
5424 		struct nlattr *attr;
5425 
5426 		*prividx = attr_id_l3_stats;
5427 
5428 		size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5429 		if (!size_l3)
5430 			goto skip_l3_stats;
5431 		attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5432 					 IFLA_OFFLOAD_XSTATS_UNSPEC);
5433 		if (!attr)
5434 			return -EMSGSIZE;
5435 
5436 		err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5437 						    nla_data(attr), extack);
5438 		if (err)
5439 			return err;
5440 
5441 		have_data = true;
5442 skip_l3_stats:
5443 		*prividx = 0;
5444 	}
5445 
5446 	if (!have_data)
5447 		return -ENODATA;
5448 
5449 	*prividx = 0;
5450 	return 0;
5451 }
5452 
5453 static unsigned int
5454 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5455 					   enum netdev_offload_xstats_type type)
5456 {
5457 	bool enabled = netdev_offload_xstats_enabled(dev, type);
5458 
5459 	return nla_total_size(0) +
5460 		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5461 		nla_total_size(sizeof(u8)) +
5462 		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5463 		(enabled ? nla_total_size(sizeof(u8)) : 0) +
5464 		0;
5465 }
5466 
5467 static unsigned int
5468 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5469 {
5470 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5471 
5472 	return nla_total_size(0) +
5473 		/* IFLA_OFFLOAD_XSTATS_L3_STATS */
5474 		rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5475 		0;
5476 }
5477 
5478 static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5479 					u32 off_filter_mask)
5480 {
5481 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5482 	int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5483 	int nla_size = 0;
5484 	int size;
5485 
5486 	if (off_filter_mask &
5487 	    IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5488 		size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5489 		nla_size += nla_total_size_64bit(size);
5490 	}
5491 
5492 	if (off_filter_mask &
5493 	    IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5494 		nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5495 
5496 	if (off_filter_mask &
5497 	    IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5498 		size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5499 		nla_size += nla_total_size_64bit(size);
5500 	}
5501 
5502 	if (nla_size != 0)
5503 		nla_size += nla_total_size(0);
5504 
5505 	return nla_size;
5506 }
5507 
5508 struct rtnl_stats_dump_filters {
5509 	/* mask[0] filters outer attributes. Then individual nests have their
5510 	 * filtering mask at the index of the nested attribute.
5511 	 */
5512 	u32 mask[IFLA_STATS_MAX + 1];
5513 };
5514 
5515 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5516 			       int type, u32 pid, u32 seq, u32 change,
5517 			       unsigned int flags,
5518 			       const struct rtnl_stats_dump_filters *filters,
5519 			       int *idxattr, int *prividx,
5520 			       struct netlink_ext_ack *extack)
5521 {
5522 	unsigned int filter_mask = filters->mask[0];
5523 	struct if_stats_msg *ifsm;
5524 	struct nlmsghdr *nlh;
5525 	struct nlattr *attr;
5526 	int s_prividx = *prividx;
5527 	int err;
5528 
5529 	ASSERT_RTNL();
5530 
5531 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5532 	if (!nlh)
5533 		return -EMSGSIZE;
5534 
5535 	ifsm = nlmsg_data(nlh);
5536 	ifsm->family = PF_UNSPEC;
5537 	ifsm->pad1 = 0;
5538 	ifsm->pad2 = 0;
5539 	ifsm->ifindex = dev->ifindex;
5540 	ifsm->filter_mask = filter_mask;
5541 
5542 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5543 		struct rtnl_link_stats64 *sp;
5544 
5545 		attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5546 					 sizeof(struct rtnl_link_stats64),
5547 					 IFLA_STATS_UNSPEC);
5548 		if (!attr) {
5549 			err = -EMSGSIZE;
5550 			goto nla_put_failure;
5551 		}
5552 
5553 		sp = nla_data(attr);
5554 		dev_get_stats(dev, sp);
5555 	}
5556 
5557 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5558 		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5559 
5560 		if (ops && ops->fill_linkxstats) {
5561 			*idxattr = IFLA_STATS_LINK_XSTATS;
5562 			attr = nla_nest_start_noflag(skb,
5563 						     IFLA_STATS_LINK_XSTATS);
5564 			if (!attr) {
5565 				err = -EMSGSIZE;
5566 				goto nla_put_failure;
5567 			}
5568 
5569 			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5570 			nla_nest_end(skb, attr);
5571 			if (err)
5572 				goto nla_put_failure;
5573 			*idxattr = 0;
5574 		}
5575 	}
5576 
5577 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5578 			     *idxattr)) {
5579 		const struct rtnl_link_ops *ops = NULL;
5580 		const struct net_device *master;
5581 
5582 		master = netdev_master_upper_dev_get(dev);
5583 		if (master)
5584 			ops = master->rtnl_link_ops;
5585 		if (ops && ops->fill_linkxstats) {
5586 			*idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5587 			attr = nla_nest_start_noflag(skb,
5588 						     IFLA_STATS_LINK_XSTATS_SLAVE);
5589 			if (!attr) {
5590 				err = -EMSGSIZE;
5591 				goto nla_put_failure;
5592 			}
5593 
5594 			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5595 			nla_nest_end(skb, attr);
5596 			if (err)
5597 				goto nla_put_failure;
5598 			*idxattr = 0;
5599 		}
5600 	}
5601 
5602 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5603 			     *idxattr)) {
5604 		u32 off_filter_mask;
5605 
5606 		off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5607 		*idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5608 		attr = nla_nest_start_noflag(skb,
5609 					     IFLA_STATS_LINK_OFFLOAD_XSTATS);
5610 		if (!attr) {
5611 			err = -EMSGSIZE;
5612 			goto nla_put_failure;
5613 		}
5614 
5615 		err = rtnl_offload_xstats_fill(skb, dev, prividx,
5616 					       off_filter_mask, extack);
5617 		if (err == -ENODATA)
5618 			nla_nest_cancel(skb, attr);
5619 		else
5620 			nla_nest_end(skb, attr);
5621 
5622 		if (err && err != -ENODATA)
5623 			goto nla_put_failure;
5624 		*idxattr = 0;
5625 	}
5626 
5627 	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5628 		struct rtnl_af_ops *af_ops;
5629 
5630 		*idxattr = IFLA_STATS_AF_SPEC;
5631 		attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5632 		if (!attr) {
5633 			err = -EMSGSIZE;
5634 			goto nla_put_failure;
5635 		}
5636 
5637 		rcu_read_lock();
5638 		list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5639 			if (af_ops->fill_stats_af) {
5640 				struct nlattr *af;
5641 
5642 				af = nla_nest_start_noflag(skb,
5643 							   af_ops->family);
5644 				if (!af) {
5645 					rcu_read_unlock();
5646 					err = -EMSGSIZE;
5647 					goto nla_put_failure;
5648 				}
5649 				err = af_ops->fill_stats_af(skb, dev);
5650 
5651 				if (err == -ENODATA) {
5652 					nla_nest_cancel(skb, af);
5653 				} else if (err < 0) {
5654 					rcu_read_unlock();
5655 					goto nla_put_failure;
5656 				}
5657 
5658 				nla_nest_end(skb, af);
5659 			}
5660 		}
5661 		rcu_read_unlock();
5662 
5663 		nla_nest_end(skb, attr);
5664 
5665 		*idxattr = 0;
5666 	}
5667 
5668 	nlmsg_end(skb, nlh);
5669 
5670 	return 0;
5671 
5672 nla_put_failure:
5673 	/* not a multi message or no progress mean a real error */
5674 	if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5675 		nlmsg_cancel(skb, nlh);
5676 	else
5677 		nlmsg_end(skb, nlh);
5678 
5679 	return err;
5680 }
5681 
5682 static size_t if_nlmsg_stats_size(const struct net_device *dev,
5683 				  const struct rtnl_stats_dump_filters *filters)
5684 {
5685 	size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5686 	unsigned int filter_mask = filters->mask[0];
5687 
5688 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5689 		size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5690 
5691 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5692 		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5693 		int attr = IFLA_STATS_LINK_XSTATS;
5694 
5695 		if (ops && ops->get_linkxstats_size) {
5696 			size += nla_total_size(ops->get_linkxstats_size(dev,
5697 									attr));
5698 			/* for IFLA_STATS_LINK_XSTATS */
5699 			size += nla_total_size(0);
5700 		}
5701 	}
5702 
5703 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5704 		struct net_device *_dev = (struct net_device *)dev;
5705 		const struct rtnl_link_ops *ops = NULL;
5706 		const struct net_device *master;
5707 
5708 		/* netdev_master_upper_dev_get can't take const */
5709 		master = netdev_master_upper_dev_get(_dev);
5710 		if (master)
5711 			ops = master->rtnl_link_ops;
5712 		if (ops && ops->get_linkxstats_size) {
5713 			int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5714 
5715 			size += nla_total_size(ops->get_linkxstats_size(dev,
5716 									attr));
5717 			/* for IFLA_STATS_LINK_XSTATS_SLAVE */
5718 			size += nla_total_size(0);
5719 		}
5720 	}
5721 
5722 	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5723 		u32 off_filter_mask;
5724 
5725 		off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5726 		size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5727 	}
5728 
5729 	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5730 		struct rtnl_af_ops *af_ops;
5731 
5732 		/* for IFLA_STATS_AF_SPEC */
5733 		size += nla_total_size(0);
5734 
5735 		rcu_read_lock();
5736 		list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5737 			if (af_ops->get_stats_af_size) {
5738 				size += nla_total_size(
5739 					af_ops->get_stats_af_size(dev));
5740 
5741 				/* for AF_* */
5742 				size += nla_total_size(0);
5743 			}
5744 		}
5745 		rcu_read_unlock();
5746 	}
5747 
5748 	return size;
5749 }
5750 
5751 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5752 
5753 static const struct nla_policy
5754 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5755 	[IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5756 		    NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5757 };
5758 
5759 static const struct nla_policy
5760 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5761 	[IFLA_STATS_GET_FILTERS] =
5762 		    NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5763 };
5764 
5765 static const struct nla_policy
5766 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5767 	[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5768 };
5769 
5770 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5771 					struct rtnl_stats_dump_filters *filters,
5772 					struct netlink_ext_ack *extack)
5773 {
5774 	struct nlattr *tb[IFLA_STATS_MAX + 1];
5775 	int err;
5776 	int at;
5777 
5778 	err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5779 			       rtnl_stats_get_policy_filters, extack);
5780 	if (err < 0)
5781 		return err;
5782 
5783 	for (at = 1; at <= IFLA_STATS_MAX; at++) {
5784 		if (tb[at]) {
5785 			if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5786 				NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5787 				return -EINVAL;
5788 			}
5789 			filters->mask[at] = nla_get_u32(tb[at]);
5790 		}
5791 	}
5792 
5793 	return 0;
5794 }
5795 
5796 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5797 				u32 filter_mask,
5798 				struct rtnl_stats_dump_filters *filters,
5799 				struct netlink_ext_ack *extack)
5800 {
5801 	struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5802 	int err;
5803 	int i;
5804 
5805 	filters->mask[0] = filter_mask;
5806 	for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5807 		filters->mask[i] = -1U;
5808 
5809 	err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5810 			  IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5811 	if (err < 0)
5812 		return err;
5813 
5814 	if (tb[IFLA_STATS_GET_FILTERS]) {
5815 		err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5816 						   filters, extack);
5817 		if (err)
5818 			return err;
5819 	}
5820 
5821 	return 0;
5822 }
5823 
5824 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5825 				bool is_dump, struct netlink_ext_ack *extack)
5826 {
5827 	struct if_stats_msg *ifsm;
5828 
5829 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5830 		NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5831 		return -EINVAL;
5832 	}
5833 
5834 	if (!strict_check)
5835 		return 0;
5836 
5837 	ifsm = nlmsg_data(nlh);
5838 
5839 	/* only requests using strict checks can pass data to influence
5840 	 * the dump. The legacy exception is filter_mask.
5841 	 */
5842 	if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5843 		NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5844 		return -EINVAL;
5845 	}
5846 	if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5847 		NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5848 		return -EINVAL;
5849 	}
5850 
5851 	return 0;
5852 }
5853 
5854 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5855 			  struct netlink_ext_ack *extack)
5856 {
5857 	struct rtnl_stats_dump_filters filters;
5858 	struct net *net = sock_net(skb->sk);
5859 	struct net_device *dev = NULL;
5860 	int idxattr = 0, prividx = 0;
5861 	struct if_stats_msg *ifsm;
5862 	struct sk_buff *nskb;
5863 	int err;
5864 
5865 	err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5866 				   false, extack);
5867 	if (err)
5868 		return err;
5869 
5870 	ifsm = nlmsg_data(nlh);
5871 	if (ifsm->ifindex > 0)
5872 		dev = __dev_get_by_index(net, ifsm->ifindex);
5873 	else
5874 		return -EINVAL;
5875 
5876 	if (!dev)
5877 		return -ENODEV;
5878 
5879 	if (!ifsm->filter_mask) {
5880 		NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5881 		return -EINVAL;
5882 	}
5883 
5884 	err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5885 	if (err)
5886 		return err;
5887 
5888 	nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5889 	if (!nskb)
5890 		return -ENOBUFS;
5891 
5892 	err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5893 				  NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5894 				  0, &filters, &idxattr, &prividx, extack);
5895 	if (err < 0) {
5896 		/* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5897 		WARN_ON(err == -EMSGSIZE);
5898 		kfree_skb(nskb);
5899 	} else {
5900 		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5901 	}
5902 
5903 	return err;
5904 }
5905 
5906 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5907 {
5908 	struct netlink_ext_ack *extack = cb->extack;
5909 	int h, s_h, err, s_idx, s_idxattr, s_prividx;
5910 	struct rtnl_stats_dump_filters filters;
5911 	struct net *net = sock_net(skb->sk);
5912 	unsigned int flags = NLM_F_MULTI;
5913 	struct if_stats_msg *ifsm;
5914 	struct hlist_head *head;
5915 	struct net_device *dev;
5916 	int idx = 0;
5917 
5918 	s_h = cb->args[0];
5919 	s_idx = cb->args[1];
5920 	s_idxattr = cb->args[2];
5921 	s_prividx = cb->args[3];
5922 
5923 	cb->seq = net->dev_base_seq;
5924 
5925 	err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5926 	if (err)
5927 		return err;
5928 
5929 	ifsm = nlmsg_data(cb->nlh);
5930 	if (!ifsm->filter_mask) {
5931 		NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5932 		return -EINVAL;
5933 	}
5934 
5935 	err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5936 				   extack);
5937 	if (err)
5938 		return err;
5939 
5940 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5941 		idx = 0;
5942 		head = &net->dev_index_head[h];
5943 		hlist_for_each_entry(dev, head, index_hlist) {
5944 			if (idx < s_idx)
5945 				goto cont;
5946 			err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5947 						  NETLINK_CB(cb->skb).portid,
5948 						  cb->nlh->nlmsg_seq, 0,
5949 						  flags, &filters,
5950 						  &s_idxattr, &s_prividx,
5951 						  extack);
5952 			/* If we ran out of room on the first message,
5953 			 * we're in trouble
5954 			 */
5955 			WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5956 
5957 			if (err < 0)
5958 				goto out;
5959 			s_prividx = 0;
5960 			s_idxattr = 0;
5961 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5962 cont:
5963 			idx++;
5964 		}
5965 	}
5966 out:
5967 	cb->args[3] = s_prividx;
5968 	cb->args[2] = s_idxattr;
5969 	cb->args[1] = idx;
5970 	cb->args[0] = h;
5971 
5972 	return skb->len;
5973 }
5974 
5975 void rtnl_offload_xstats_notify(struct net_device *dev)
5976 {
5977 	struct rtnl_stats_dump_filters response_filters = {};
5978 	struct net *net = dev_net(dev);
5979 	int idxattr = 0, prividx = 0;
5980 	struct sk_buff *skb;
5981 	int err = -ENOBUFS;
5982 
5983 	ASSERT_RTNL();
5984 
5985 	response_filters.mask[0] |=
5986 		IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5987 	response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5988 		IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5989 
5990 	skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
5991 			GFP_KERNEL);
5992 	if (!skb)
5993 		goto errout;
5994 
5995 	err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
5996 				  &response_filters, &idxattr, &prividx, NULL);
5997 	if (err < 0) {
5998 		kfree_skb(skb);
5999 		goto errout;
6000 	}
6001 
6002 	rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
6003 	return;
6004 
6005 errout:
6006 	rtnl_set_sk_err(net, RTNLGRP_STATS, err);
6007 }
6008 EXPORT_SYMBOL(rtnl_offload_xstats_notify);
6009 
6010 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
6011 			  struct netlink_ext_ack *extack)
6012 {
6013 	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
6014 	struct rtnl_stats_dump_filters response_filters = {};
6015 	struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6016 	struct net *net = sock_net(skb->sk);
6017 	struct net_device *dev = NULL;
6018 	struct if_stats_msg *ifsm;
6019 	bool notify = false;
6020 	int err;
6021 
6022 	err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6023 				   false, extack);
6024 	if (err)
6025 		return err;
6026 
6027 	ifsm = nlmsg_data(nlh);
6028 	if (ifsm->family != AF_UNSPEC) {
6029 		NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
6030 		return -EINVAL;
6031 	}
6032 
6033 	if (ifsm->ifindex > 0)
6034 		dev = __dev_get_by_index(net, ifsm->ifindex);
6035 	else
6036 		return -EINVAL;
6037 
6038 	if (!dev)
6039 		return -ENODEV;
6040 
6041 	if (ifsm->filter_mask) {
6042 		NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6043 		return -EINVAL;
6044 	}
6045 
6046 	err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6047 			  ifla_stats_set_policy, extack);
6048 	if (err < 0)
6049 		return err;
6050 
6051 	if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6052 		u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
6053 
6054 		if (req)
6055 			err = netdev_offload_xstats_enable(dev, t_l3, extack);
6056 		else
6057 			err = netdev_offload_xstats_disable(dev, t_l3);
6058 
6059 		if (!err)
6060 			notify = true;
6061 		else if (err != -EALREADY)
6062 			return err;
6063 
6064 		response_filters.mask[0] |=
6065 			IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6066 		response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6067 			IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6068 	}
6069 
6070 	if (notify)
6071 		rtnl_offload_xstats_notify(dev);
6072 
6073 	return 0;
6074 }
6075 
6076 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh,
6077 				   struct netlink_ext_ack *extack)
6078 {
6079 	struct br_port_msg *bpm;
6080 
6081 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
6082 		NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request");
6083 		return -EINVAL;
6084 	}
6085 
6086 	bpm = nlmsg_data(nlh);
6087 	if (bpm->ifindex) {
6088 		NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request");
6089 		return -EINVAL;
6090 	}
6091 	if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
6092 		NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
6093 		return -EINVAL;
6094 	}
6095 
6096 	return 0;
6097 }
6098 
6099 struct rtnl_mdb_dump_ctx {
6100 	long idx;
6101 };
6102 
6103 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
6104 {
6105 	struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx;
6106 	struct net *net = sock_net(skb->sk);
6107 	struct net_device *dev;
6108 	int idx, s_idx;
6109 	int err;
6110 
6111 	NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx);
6112 
6113 	if (cb->strict_check) {
6114 		err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
6115 		if (err)
6116 			return err;
6117 	}
6118 
6119 	s_idx = ctx->idx;
6120 	idx = 0;
6121 
6122 	for_each_netdev(net, dev) {
6123 		if (idx < s_idx)
6124 			goto skip;
6125 		if (!dev->netdev_ops->ndo_mdb_dump)
6126 			goto skip;
6127 
6128 		err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
6129 		if (err == -EMSGSIZE)
6130 			goto out;
6131 		/* Moving on to next device, reset markers and sequence
6132 		 * counters since they are all maintained per-device.
6133 		 */
6134 		memset(cb->ctx, 0, sizeof(cb->ctx));
6135 		cb->prev_seq = 0;
6136 		cb->seq = 0;
6137 skip:
6138 		idx++;
6139 	}
6140 
6141 out:
6142 	ctx->idx = idx;
6143 	return skb->len;
6144 }
6145 
6146 static int rtnl_validate_mdb_entry(const struct nlattr *attr,
6147 				   struct netlink_ext_ack *extack)
6148 {
6149 	struct br_mdb_entry *entry = nla_data(attr);
6150 
6151 	if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6152 		NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6153 		return -EINVAL;
6154 	}
6155 
6156 	if (entry->ifindex == 0) {
6157 		NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed");
6158 		return -EINVAL;
6159 	}
6160 
6161 	if (entry->addr.proto == htons(ETH_P_IP)) {
6162 		if (!ipv4_is_multicast(entry->addr.u.ip4) &&
6163 		    !ipv4_is_zeronet(entry->addr.u.ip4)) {
6164 			NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0");
6165 			return -EINVAL;
6166 		}
6167 		if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
6168 			NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast");
6169 			return -EINVAL;
6170 		}
6171 #if IS_ENABLED(CONFIG_IPV6)
6172 	} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
6173 		if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
6174 			NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes");
6175 			return -EINVAL;
6176 		}
6177 #endif
6178 	} else if (entry->addr.proto == 0) {
6179 		/* L2 mdb */
6180 		if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
6181 			NL_SET_ERR_MSG(extack, "L2 entry group is not multicast");
6182 			return -EINVAL;
6183 		}
6184 	} else {
6185 		NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6186 		return -EINVAL;
6187 	}
6188 
6189 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6190 		NL_SET_ERR_MSG(extack, "Unknown entry state");
6191 		return -EINVAL;
6192 	}
6193 	if (entry->vid >= VLAN_VID_MASK) {
6194 		NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6195 		return -EINVAL;
6196 	}
6197 
6198 	return 0;
6199 }
6200 
6201 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
6202 	[MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
6203 	[MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6204 						  rtnl_validate_mdb_entry,
6205 						  sizeof(struct br_mdb_entry)),
6206 	[MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6207 };
6208 
6209 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
6210 			struct netlink_ext_ack *extack)
6211 {
6212 	struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6213 	struct net *net = sock_net(skb->sk);
6214 	struct br_port_msg *bpm;
6215 	struct net_device *dev;
6216 	int err;
6217 
6218 	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6219 				     MDBA_SET_ENTRY_MAX, mdba_policy, extack);
6220 	if (err)
6221 		return err;
6222 
6223 	bpm = nlmsg_data(nlh);
6224 	if (!bpm->ifindex) {
6225 		NL_SET_ERR_MSG(extack, "Invalid ifindex");
6226 		return -EINVAL;
6227 	}
6228 
6229 	dev = __dev_get_by_index(net, bpm->ifindex);
6230 	if (!dev) {
6231 		NL_SET_ERR_MSG(extack, "Device doesn't exist");
6232 		return -ENODEV;
6233 	}
6234 
6235 	if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6236 		NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6237 		return -EINVAL;
6238 	}
6239 
6240 	if (!dev->netdev_ops->ndo_mdb_add) {
6241 		NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6242 		return -EOPNOTSUPP;
6243 	}
6244 
6245 	return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
6246 }
6247 
6248 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
6249 			struct netlink_ext_ack *extack)
6250 {
6251 	struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6252 	struct net *net = sock_net(skb->sk);
6253 	struct br_port_msg *bpm;
6254 	struct net_device *dev;
6255 	int err;
6256 
6257 	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6258 				     MDBA_SET_ENTRY_MAX, mdba_policy, extack);
6259 	if (err)
6260 		return err;
6261 
6262 	bpm = nlmsg_data(nlh);
6263 	if (!bpm->ifindex) {
6264 		NL_SET_ERR_MSG(extack, "Invalid ifindex");
6265 		return -EINVAL;
6266 	}
6267 
6268 	dev = __dev_get_by_index(net, bpm->ifindex);
6269 	if (!dev) {
6270 		NL_SET_ERR_MSG(extack, "Device doesn't exist");
6271 		return -ENODEV;
6272 	}
6273 
6274 	if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6275 		NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6276 		return -EINVAL;
6277 	}
6278 
6279 	if (!dev->netdev_ops->ndo_mdb_del) {
6280 		NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6281 		return -EOPNOTSUPP;
6282 	}
6283 
6284 	return dev->netdev_ops->ndo_mdb_del(dev, tb, extack);
6285 }
6286 
6287 /* Process one rtnetlink message. */
6288 
6289 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6290 			     struct netlink_ext_ack *extack)
6291 {
6292 	struct net *net = sock_net(skb->sk);
6293 	struct rtnl_link *link;
6294 	enum rtnl_kinds kind;
6295 	struct module *owner;
6296 	int err = -EOPNOTSUPP;
6297 	rtnl_doit_func doit;
6298 	unsigned int flags;
6299 	int family;
6300 	int type;
6301 
6302 	type = nlh->nlmsg_type;
6303 	if (type > RTM_MAX)
6304 		return -EOPNOTSUPP;
6305 
6306 	type -= RTM_BASE;
6307 
6308 	/* All the messages must have at least 1 byte length */
6309 	if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6310 		return 0;
6311 
6312 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6313 	kind = rtnl_msgtype_kind(type);
6314 
6315 	if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6316 		return -EPERM;
6317 
6318 	rcu_read_lock();
6319 	if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6320 		struct sock *rtnl;
6321 		rtnl_dumpit_func dumpit;
6322 		u32 min_dump_alloc = 0;
6323 
6324 		link = rtnl_get_link(family, type);
6325 		if (!link || !link->dumpit) {
6326 			family = PF_UNSPEC;
6327 			link = rtnl_get_link(family, type);
6328 			if (!link || !link->dumpit)
6329 				goto err_unlock;
6330 		}
6331 		owner = link->owner;
6332 		dumpit = link->dumpit;
6333 
6334 		if (type == RTM_GETLINK - RTM_BASE)
6335 			min_dump_alloc = rtnl_calcit(skb, nlh);
6336 
6337 		err = 0;
6338 		/* need to do this before rcu_read_unlock() */
6339 		if (!try_module_get(owner))
6340 			err = -EPROTONOSUPPORT;
6341 
6342 		rcu_read_unlock();
6343 
6344 		rtnl = net->rtnl;
6345 		if (err == 0) {
6346 			struct netlink_dump_control c = {
6347 				.dump		= dumpit,
6348 				.min_dump_alloc	= min_dump_alloc,
6349 				.module		= owner,
6350 			};
6351 			err = netlink_dump_start(rtnl, skb, nlh, &c);
6352 			/* netlink_dump_start() will keep a reference on
6353 			 * module if dump is still in progress.
6354 			 */
6355 			module_put(owner);
6356 		}
6357 		return err;
6358 	}
6359 
6360 	link = rtnl_get_link(family, type);
6361 	if (!link || !link->doit) {
6362 		family = PF_UNSPEC;
6363 		link = rtnl_get_link(PF_UNSPEC, type);
6364 		if (!link || !link->doit)
6365 			goto out_unlock;
6366 	}
6367 
6368 	owner = link->owner;
6369 	if (!try_module_get(owner)) {
6370 		err = -EPROTONOSUPPORT;
6371 		goto out_unlock;
6372 	}
6373 
6374 	flags = link->flags;
6375 	if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6376 	    !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6377 		NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6378 		module_put(owner);
6379 		goto err_unlock;
6380 	}
6381 
6382 	if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6383 		doit = link->doit;
6384 		rcu_read_unlock();
6385 		if (doit)
6386 			err = doit(skb, nlh, extack);
6387 		module_put(owner);
6388 		return err;
6389 	}
6390 	rcu_read_unlock();
6391 
6392 	rtnl_lock();
6393 	link = rtnl_get_link(family, type);
6394 	if (link && link->doit)
6395 		err = link->doit(skb, nlh, extack);
6396 	rtnl_unlock();
6397 
6398 	module_put(owner);
6399 
6400 	return err;
6401 
6402 out_unlock:
6403 	rcu_read_unlock();
6404 	return err;
6405 
6406 err_unlock:
6407 	rcu_read_unlock();
6408 	return -EOPNOTSUPP;
6409 }
6410 
6411 static void rtnetlink_rcv(struct sk_buff *skb)
6412 {
6413 	netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6414 }
6415 
6416 static int rtnetlink_bind(struct net *net, int group)
6417 {
6418 	switch (group) {
6419 	case RTNLGRP_IPV4_MROUTE_R:
6420 	case RTNLGRP_IPV6_MROUTE_R:
6421 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6422 			return -EPERM;
6423 		break;
6424 	}
6425 	return 0;
6426 }
6427 
6428 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6429 {
6430 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6431 
6432 	switch (event) {
6433 	case NETDEV_REBOOT:
6434 	case NETDEV_CHANGEMTU:
6435 	case NETDEV_CHANGEADDR:
6436 	case NETDEV_CHANGENAME:
6437 	case NETDEV_FEAT_CHANGE:
6438 	case NETDEV_BONDING_FAILOVER:
6439 	case NETDEV_POST_TYPE_CHANGE:
6440 	case NETDEV_NOTIFY_PEERS:
6441 	case NETDEV_CHANGEUPPER:
6442 	case NETDEV_RESEND_IGMP:
6443 	case NETDEV_CHANGEINFODATA:
6444 	case NETDEV_CHANGELOWERSTATE:
6445 	case NETDEV_CHANGE_TX_QUEUE_LEN:
6446 		rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6447 				   GFP_KERNEL, NULL, 0, 0, NULL);
6448 		break;
6449 	default:
6450 		break;
6451 	}
6452 	return NOTIFY_DONE;
6453 }
6454 
6455 static struct notifier_block rtnetlink_dev_notifier = {
6456 	.notifier_call	= rtnetlink_event,
6457 };
6458 
6459 
6460 static int __net_init rtnetlink_net_init(struct net *net)
6461 {
6462 	struct sock *sk;
6463 	struct netlink_kernel_cfg cfg = {
6464 		.groups		= RTNLGRP_MAX,
6465 		.input		= rtnetlink_rcv,
6466 		.cb_mutex	= &rtnl_mutex,
6467 		.flags		= NL_CFG_F_NONROOT_RECV,
6468 		.bind		= rtnetlink_bind,
6469 	};
6470 
6471 	sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6472 	if (!sk)
6473 		return -ENOMEM;
6474 	net->rtnl = sk;
6475 	return 0;
6476 }
6477 
6478 static void __net_exit rtnetlink_net_exit(struct net *net)
6479 {
6480 	netlink_kernel_release(net->rtnl);
6481 	net->rtnl = NULL;
6482 }
6483 
6484 static struct pernet_operations rtnetlink_net_ops = {
6485 	.init = rtnetlink_net_init,
6486 	.exit = rtnetlink_net_exit,
6487 };
6488 
6489 void __init rtnetlink_init(void)
6490 {
6491 	if (register_pernet_subsys(&rtnetlink_net_ops))
6492 		panic("rtnetlink_init: cannot initialize rtnetlink\n");
6493 
6494 	register_netdevice_notifier(&rtnetlink_dev_notifier);
6495 
6496 	rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6497 		      rtnl_dump_ifinfo, 0);
6498 	rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6499 	rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6500 	rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6501 
6502 	rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6503 	rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6504 	rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6505 
6506 	rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6507 	rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6508 
6509 	rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6510 	rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6511 		      RTNL_FLAG_BULK_DEL_SUPPORTED);
6512 	rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6513 
6514 	rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6515 	rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6516 	rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6517 
6518 	rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6519 		      0);
6520 	rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
6521 
6522 	rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, rtnl_mdb_dump, 0);
6523 	rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
6524 	rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 0);
6525 }
6526