1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Routing netlink socket interface: protocol independent part.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
13 */
14
15 #include <linux/bitops.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/timer.h>
22 #include <linux/string.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/fcntl.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/capability.h>
30 #include <linux/skbuff.h>
31 #include <linux/init.h>
32 #include <linux/security.h>
33 #include <linux/mutex.h>
34 #include <linux/if_addr.h>
35 #include <linux/if_bridge.h>
36 #include <linux/if_vlan.h>
37 #include <linux/pci.h>
38 #include <linux/etherdevice.h>
39 #include <linux/bpf.h>
40
41 #include <linux/uaccess.h>
42
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
45 #include <net/ip.h>
46 #include <net/protocol.h>
47 #include <net/arp.h>
48 #include <net/route.h>
49 #include <net/udp.h>
50 #include <net/tcp.h>
51 #include <net/sock.h>
52 #include <net/pkt_sched.h>
53 #include <net/fib_rules.h>
54 #include <net/rtnetlink.h>
55 #include <net/net_namespace.h>
56 #include <net/devlink.h>
57 #if IS_ENABLED(CONFIG_IPV6)
58 #include <net/addrconf.h>
59 #endif
60 #include <linux/dpll.h>
61
62 #include "dev.h"
63
64 #define RTNL_MAX_TYPE 50
65 #define RTNL_SLAVE_MAX_TYPE 44
66
67 struct rtnl_link {
68 rtnl_doit_func doit;
69 rtnl_dumpit_func dumpit;
70 struct module *owner;
71 unsigned int flags;
72 struct rcu_head rcu;
73 };
74
75 static DEFINE_MUTEX(rtnl_mutex);
76
rtnl_lock(void)77 void rtnl_lock(void)
78 {
79 mutex_lock(&rtnl_mutex);
80 }
81 EXPORT_SYMBOL(rtnl_lock);
82
rtnl_lock_killable(void)83 int rtnl_lock_killable(void)
84 {
85 return mutex_lock_killable(&rtnl_mutex);
86 }
87 EXPORT_SYMBOL(rtnl_lock_killable);
88
89 static struct sk_buff *defer_kfree_skb_list;
rtnl_kfree_skbs(struct sk_buff * head,struct sk_buff * tail)90 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
91 {
92 if (head && tail) {
93 tail->next = defer_kfree_skb_list;
94 defer_kfree_skb_list = head;
95 }
96 }
97 EXPORT_SYMBOL(rtnl_kfree_skbs);
98
__rtnl_unlock(void)99 void __rtnl_unlock(void)
100 {
101 struct sk_buff *head = defer_kfree_skb_list;
102
103 defer_kfree_skb_list = NULL;
104
105 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
106 * is used. In some places, e.g. in cfg80211, we have code that will do
107 * something like
108 * rtnl_lock()
109 * wiphy_lock()
110 * ...
111 * rtnl_unlock()
112 *
113 * and because netdev_run_todo() acquires the RTNL for items on the list
114 * we could cause a situation such as this:
115 * Thread 1 Thread 2
116 * rtnl_lock()
117 * unregister_netdevice()
118 * __rtnl_unlock()
119 * rtnl_lock()
120 * wiphy_lock()
121 * rtnl_unlock()
122 * netdev_run_todo()
123 * __rtnl_unlock()
124 *
125 * // list not empty now
126 * // because of thread 2
127 * rtnl_lock()
128 * while (!list_empty(...))
129 * rtnl_lock()
130 * wiphy_lock()
131 * **** DEADLOCK ****
132 *
133 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
134 * it's not used in cases where something is added to do the list.
135 */
136 WARN_ON(!list_empty(&net_todo_list));
137
138 mutex_unlock(&rtnl_mutex);
139
140 while (head) {
141 struct sk_buff *next = head->next;
142
143 kfree_skb(head);
144 cond_resched();
145 head = next;
146 }
147 }
148
rtnl_unlock(void)149 void rtnl_unlock(void)
150 {
151 /* This fellow will unlock it for us. */
152 netdev_run_todo();
153 }
154 EXPORT_SYMBOL(rtnl_unlock);
155
rtnl_trylock(void)156 int rtnl_trylock(void)
157 {
158 return mutex_trylock(&rtnl_mutex);
159 }
160 EXPORT_SYMBOL(rtnl_trylock);
161
rtnl_is_locked(void)162 int rtnl_is_locked(void)
163 {
164 return mutex_is_locked(&rtnl_mutex);
165 }
166 EXPORT_SYMBOL(rtnl_is_locked);
167
refcount_dec_and_rtnl_lock(refcount_t * r)168 bool refcount_dec_and_rtnl_lock(refcount_t *r)
169 {
170 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
171 }
172 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
173
174 #ifdef CONFIG_PROVE_LOCKING
lockdep_rtnl_is_held(void)175 bool lockdep_rtnl_is_held(void)
176 {
177 return lockdep_is_held(&rtnl_mutex);
178 }
179 EXPORT_SYMBOL(lockdep_rtnl_is_held);
180 #endif /* #ifdef CONFIG_PROVE_LOCKING */
181
182 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
183
rtm_msgindex(int msgtype)184 static inline int rtm_msgindex(int msgtype)
185 {
186 int msgindex = msgtype - RTM_BASE;
187
188 /*
189 * msgindex < 0 implies someone tried to register a netlink
190 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
191 * the message type has not been added to linux/rtnetlink.h
192 */
193 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
194
195 return msgindex;
196 }
197
rtnl_get_link(int protocol,int msgtype)198 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
199 {
200 struct rtnl_link __rcu **tab;
201
202 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
203 protocol = PF_UNSPEC;
204
205 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
206 if (!tab)
207 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
208
209 return rcu_dereference_rtnl(tab[msgtype]);
210 }
211
rtnl_register_internal(struct module * owner,int protocol,int msgtype,rtnl_doit_func doit,rtnl_dumpit_func dumpit,unsigned int flags)212 static int rtnl_register_internal(struct module *owner,
213 int protocol, int msgtype,
214 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
215 unsigned int flags)
216 {
217 struct rtnl_link *link, *old;
218 struct rtnl_link __rcu **tab;
219 int msgindex;
220 int ret = -ENOBUFS;
221
222 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
223 msgindex = rtm_msgindex(msgtype);
224
225 rtnl_lock();
226 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
227 if (tab == NULL) {
228 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
229 if (!tab)
230 goto unlock;
231
232 /* ensures we see the 0 stores */
233 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
234 }
235
236 old = rtnl_dereference(tab[msgindex]);
237 if (old) {
238 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
239 if (!link)
240 goto unlock;
241 } else {
242 link = kzalloc(sizeof(*link), GFP_KERNEL);
243 if (!link)
244 goto unlock;
245 }
246
247 WARN_ON(link->owner && link->owner != owner);
248 link->owner = owner;
249
250 WARN_ON(doit && link->doit && link->doit != doit);
251 if (doit)
252 link->doit = doit;
253 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
254 if (dumpit)
255 link->dumpit = dumpit;
256
257 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
258 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
259 link->flags |= flags;
260
261 /* publish protocol:msgtype */
262 rcu_assign_pointer(tab[msgindex], link);
263 ret = 0;
264 if (old)
265 kfree_rcu(old, rcu);
266 unlock:
267 rtnl_unlock();
268 return ret;
269 }
270
271 /**
272 * rtnl_register_module - Register a rtnetlink message type
273 *
274 * @owner: module registering the hook (THIS_MODULE)
275 * @protocol: Protocol family or PF_UNSPEC
276 * @msgtype: rtnetlink message type
277 * @doit: Function pointer called for each request message
278 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
279 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
280 *
281 * Like rtnl_register, but for use by removable modules.
282 */
rtnl_register_module(struct module * owner,int protocol,int msgtype,rtnl_doit_func doit,rtnl_dumpit_func dumpit,unsigned int flags)283 int rtnl_register_module(struct module *owner,
284 int protocol, int msgtype,
285 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
286 unsigned int flags)
287 {
288 return rtnl_register_internal(owner, protocol, msgtype,
289 doit, dumpit, flags);
290 }
291 EXPORT_SYMBOL_GPL(rtnl_register_module);
292
293 /**
294 * rtnl_register - Register a rtnetlink message type
295 * @protocol: Protocol family or PF_UNSPEC
296 * @msgtype: rtnetlink message type
297 * @doit: Function pointer called for each request message
298 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
299 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
300 *
301 * Registers the specified function pointers (at least one of them has
302 * to be non-NULL) to be called whenever a request message for the
303 * specified protocol family and message type is received.
304 *
305 * The special protocol family PF_UNSPEC may be used to define fallback
306 * function pointers for the case when no entry for the specific protocol
307 * family exists.
308 */
rtnl_register(int protocol,int msgtype,rtnl_doit_func doit,rtnl_dumpit_func dumpit,unsigned int flags)309 void rtnl_register(int protocol, int msgtype,
310 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
311 unsigned int flags)
312 {
313 int err;
314
315 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
316 flags);
317 if (err)
318 pr_err("Unable to register rtnetlink message handler, "
319 "protocol = %d, message type = %d\n", protocol, msgtype);
320 }
321
322 /**
323 * rtnl_unregister - Unregister a rtnetlink message type
324 * @protocol: Protocol family or PF_UNSPEC
325 * @msgtype: rtnetlink message type
326 *
327 * Returns 0 on success or a negative error code.
328 */
rtnl_unregister(int protocol,int msgtype)329 int rtnl_unregister(int protocol, int msgtype)
330 {
331 struct rtnl_link __rcu **tab;
332 struct rtnl_link *link;
333 int msgindex;
334
335 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
336 msgindex = rtm_msgindex(msgtype);
337
338 rtnl_lock();
339 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
340 if (!tab) {
341 rtnl_unlock();
342 return -ENOENT;
343 }
344
345 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
346 rtnl_unlock();
347
348 kfree_rcu(link, rcu);
349
350 return 0;
351 }
352 EXPORT_SYMBOL_GPL(rtnl_unregister);
353
354 /**
355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
356 * @protocol : Protocol family or PF_UNSPEC
357 *
358 * Identical to calling rtnl_unregster() for all registered message types
359 * of a certain protocol family.
360 */
rtnl_unregister_all(int protocol)361 void rtnl_unregister_all(int protocol)
362 {
363 struct rtnl_link __rcu **tab;
364 struct rtnl_link *link;
365 int msgindex;
366
367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
368
369 rtnl_lock();
370 tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL);
371 if (!tab) {
372 rtnl_unlock();
373 return;
374 }
375 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
376 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
377 kfree_rcu(link, rcu);
378 }
379 rtnl_unlock();
380
381 synchronize_net();
382
383 kfree(tab);
384 }
385 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
386
387 static LIST_HEAD(link_ops);
388
rtnl_link_ops_get(const char * kind)389 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
390 {
391 const struct rtnl_link_ops *ops;
392
393 list_for_each_entry(ops, &link_ops, list) {
394 if (!strcmp(ops->kind, kind))
395 return ops;
396 }
397 return NULL;
398 }
399
400 /**
401 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
402 * @ops: struct rtnl_link_ops * to register
403 *
404 * The caller must hold the rtnl_mutex. This function should be used
405 * by drivers that create devices during module initialization. It
406 * must be called before registering the devices.
407 *
408 * Returns 0 on success or a negative error code.
409 */
__rtnl_link_register(struct rtnl_link_ops * ops)410 int __rtnl_link_register(struct rtnl_link_ops *ops)
411 {
412 if (rtnl_link_ops_get(ops->kind))
413 return -EEXIST;
414
415 /* The check for alloc/setup is here because if ops
416 * does not have that filled up, it is not possible
417 * to use the ops for creating device. So do not
418 * fill up dellink as well. That disables rtnl_dellink.
419 */
420 if ((ops->alloc || ops->setup) && !ops->dellink)
421 ops->dellink = unregister_netdevice_queue;
422
423 list_add_tail(&ops->list, &link_ops);
424 return 0;
425 }
426 EXPORT_SYMBOL_GPL(__rtnl_link_register);
427
428 /**
429 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
430 * @ops: struct rtnl_link_ops * to register
431 *
432 * Returns 0 on success or a negative error code.
433 */
rtnl_link_register(struct rtnl_link_ops * ops)434 int rtnl_link_register(struct rtnl_link_ops *ops)
435 {
436 int err;
437
438 /* Sanity-check max sizes to avoid stack buffer overflow. */
439 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
440 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
441 return -EINVAL;
442
443 rtnl_lock();
444 err = __rtnl_link_register(ops);
445 rtnl_unlock();
446 return err;
447 }
448 EXPORT_SYMBOL_GPL(rtnl_link_register);
449
__rtnl_kill_links(struct net * net,struct rtnl_link_ops * ops)450 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
451 {
452 struct net_device *dev;
453 LIST_HEAD(list_kill);
454
455 for_each_netdev(net, dev) {
456 if (dev->rtnl_link_ops == ops)
457 ops->dellink(dev, &list_kill);
458 }
459 unregister_netdevice_many(&list_kill);
460 }
461
462 /**
463 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
464 * @ops: struct rtnl_link_ops * to unregister
465 *
466 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
467 * integrity (hold pernet_ops_rwsem for writing to close the race
468 * with setup_net() and cleanup_net()).
469 */
__rtnl_link_unregister(struct rtnl_link_ops * ops)470 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
471 {
472 struct net *net;
473
474 for_each_net(net) {
475 __rtnl_kill_links(net, ops);
476 }
477 list_del(&ops->list);
478 }
479 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
480
481 /* Return with the rtnl_lock held when there are no network
482 * devices unregistering in any network namespace.
483 */
rtnl_lock_unregistering_all(void)484 static void rtnl_lock_unregistering_all(void)
485 {
486 DEFINE_WAIT_FUNC(wait, woken_wake_function);
487
488 add_wait_queue(&netdev_unregistering_wq, &wait);
489 for (;;) {
490 rtnl_lock();
491 /* We held write locked pernet_ops_rwsem, and parallel
492 * setup_net() and cleanup_net() are not possible.
493 */
494 if (!atomic_read(&dev_unreg_count))
495 break;
496 __rtnl_unlock();
497
498 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
499 }
500 remove_wait_queue(&netdev_unregistering_wq, &wait);
501 }
502
503 /**
504 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
505 * @ops: struct rtnl_link_ops * to unregister
506 */
rtnl_link_unregister(struct rtnl_link_ops * ops)507 void rtnl_link_unregister(struct rtnl_link_ops *ops)
508 {
509 /* Close the race with setup_net() and cleanup_net() */
510 down_write(&pernet_ops_rwsem);
511 rtnl_lock_unregistering_all();
512 __rtnl_link_unregister(ops);
513 rtnl_unlock();
514 up_write(&pernet_ops_rwsem);
515 }
516 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
517
rtnl_link_get_slave_info_data_size(const struct net_device * dev)518 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
519 {
520 struct net_device *master_dev;
521 const struct rtnl_link_ops *ops;
522 size_t size = 0;
523
524 rcu_read_lock();
525
526 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
527 if (!master_dev)
528 goto out;
529
530 ops = master_dev->rtnl_link_ops;
531 if (!ops || !ops->get_slave_size)
532 goto out;
533 /* IFLA_INFO_SLAVE_DATA + nested data */
534 size = nla_total_size(sizeof(struct nlattr)) +
535 ops->get_slave_size(master_dev, dev);
536
537 out:
538 rcu_read_unlock();
539 return size;
540 }
541
rtnl_link_get_size(const struct net_device * dev)542 static size_t rtnl_link_get_size(const struct net_device *dev)
543 {
544 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
545 size_t size;
546
547 if (!ops)
548 return 0;
549
550 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
551 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
552
553 if (ops->get_size)
554 /* IFLA_INFO_DATA + nested data */
555 size += nla_total_size(sizeof(struct nlattr)) +
556 ops->get_size(dev);
557
558 if (ops->get_xstats_size)
559 /* IFLA_INFO_XSTATS */
560 size += nla_total_size(ops->get_xstats_size(dev));
561
562 size += rtnl_link_get_slave_info_data_size(dev);
563
564 return size;
565 }
566
567 static LIST_HEAD(rtnl_af_ops);
568
rtnl_af_lookup(const int family)569 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
570 {
571 const struct rtnl_af_ops *ops;
572
573 ASSERT_RTNL();
574
575 list_for_each_entry(ops, &rtnl_af_ops, list) {
576 if (ops->family == family)
577 return ops;
578 }
579
580 return NULL;
581 }
582
583 /**
584 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
585 * @ops: struct rtnl_af_ops * to register
586 *
587 * Returns 0 on success or a negative error code.
588 */
rtnl_af_register(struct rtnl_af_ops * ops)589 void rtnl_af_register(struct rtnl_af_ops *ops)
590 {
591 rtnl_lock();
592 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
593 rtnl_unlock();
594 }
595 EXPORT_SYMBOL_GPL(rtnl_af_register);
596
597 /**
598 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
599 * @ops: struct rtnl_af_ops * to unregister
600 */
rtnl_af_unregister(struct rtnl_af_ops * ops)601 void rtnl_af_unregister(struct rtnl_af_ops *ops)
602 {
603 rtnl_lock();
604 list_del_rcu(&ops->list);
605 rtnl_unlock();
606
607 synchronize_rcu();
608 }
609 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
610
rtnl_link_get_af_size(const struct net_device * dev,u32 ext_filter_mask)611 static size_t rtnl_link_get_af_size(const struct net_device *dev,
612 u32 ext_filter_mask)
613 {
614 struct rtnl_af_ops *af_ops;
615 size_t size;
616
617 /* IFLA_AF_SPEC */
618 size = nla_total_size(sizeof(struct nlattr));
619
620 rcu_read_lock();
621 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
622 if (af_ops->get_link_af_size) {
623 /* AF_* + nested data */
624 size += nla_total_size(sizeof(struct nlattr)) +
625 af_ops->get_link_af_size(dev, ext_filter_mask);
626 }
627 }
628 rcu_read_unlock();
629
630 return size;
631 }
632
rtnl_have_link_slave_info(const struct net_device * dev)633 static bool rtnl_have_link_slave_info(const struct net_device *dev)
634 {
635 struct net_device *master_dev;
636 bool ret = false;
637
638 rcu_read_lock();
639
640 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
641 if (master_dev && master_dev->rtnl_link_ops)
642 ret = true;
643 rcu_read_unlock();
644 return ret;
645 }
646
rtnl_link_slave_info_fill(struct sk_buff * skb,const struct net_device * dev)647 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
648 const struct net_device *dev)
649 {
650 struct net_device *master_dev;
651 const struct rtnl_link_ops *ops;
652 struct nlattr *slave_data;
653 int err;
654
655 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
656 if (!master_dev)
657 return 0;
658 ops = master_dev->rtnl_link_ops;
659 if (!ops)
660 return 0;
661 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
662 return -EMSGSIZE;
663 if (ops->fill_slave_info) {
664 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
665 if (!slave_data)
666 return -EMSGSIZE;
667 err = ops->fill_slave_info(skb, master_dev, dev);
668 if (err < 0)
669 goto err_cancel_slave_data;
670 nla_nest_end(skb, slave_data);
671 }
672 return 0;
673
674 err_cancel_slave_data:
675 nla_nest_cancel(skb, slave_data);
676 return err;
677 }
678
rtnl_link_info_fill(struct sk_buff * skb,const struct net_device * dev)679 static int rtnl_link_info_fill(struct sk_buff *skb,
680 const struct net_device *dev)
681 {
682 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
683 struct nlattr *data;
684 int err;
685
686 if (!ops)
687 return 0;
688 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
689 return -EMSGSIZE;
690 if (ops->fill_xstats) {
691 err = ops->fill_xstats(skb, dev);
692 if (err < 0)
693 return err;
694 }
695 if (ops->fill_info) {
696 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
697 if (data == NULL)
698 return -EMSGSIZE;
699 err = ops->fill_info(skb, dev);
700 if (err < 0)
701 goto err_cancel_data;
702 nla_nest_end(skb, data);
703 }
704 return 0;
705
706 err_cancel_data:
707 nla_nest_cancel(skb, data);
708 return err;
709 }
710
rtnl_link_fill(struct sk_buff * skb,const struct net_device * dev)711 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
712 {
713 struct nlattr *linkinfo;
714 int err = -EMSGSIZE;
715
716 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
717 if (linkinfo == NULL)
718 goto out;
719
720 err = rtnl_link_info_fill(skb, dev);
721 if (err < 0)
722 goto err_cancel_link;
723
724 err = rtnl_link_slave_info_fill(skb, dev);
725 if (err < 0)
726 goto err_cancel_link;
727
728 nla_nest_end(skb, linkinfo);
729 return 0;
730
731 err_cancel_link:
732 nla_nest_cancel(skb, linkinfo);
733 out:
734 return err;
735 }
736
rtnetlink_send(struct sk_buff * skb,struct net * net,u32 pid,unsigned int group,int echo)737 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
738 {
739 struct sock *rtnl = net->rtnl;
740
741 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
742 }
743
rtnl_unicast(struct sk_buff * skb,struct net * net,u32 pid)744 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
745 {
746 struct sock *rtnl = net->rtnl;
747
748 return nlmsg_unicast(rtnl, skb, pid);
749 }
750 EXPORT_SYMBOL(rtnl_unicast);
751
rtnl_notify(struct sk_buff * skb,struct net * net,u32 pid,u32 group,const struct nlmsghdr * nlh,gfp_t flags)752 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
753 const struct nlmsghdr *nlh, gfp_t flags)
754 {
755 struct sock *rtnl = net->rtnl;
756
757 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
758 }
759 EXPORT_SYMBOL(rtnl_notify);
760
rtnl_set_sk_err(struct net * net,u32 group,int error)761 void rtnl_set_sk_err(struct net *net, u32 group, int error)
762 {
763 struct sock *rtnl = net->rtnl;
764
765 netlink_set_err(rtnl, 0, group, error);
766 }
767 EXPORT_SYMBOL(rtnl_set_sk_err);
768
rtnetlink_put_metrics(struct sk_buff * skb,u32 * metrics)769 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
770 {
771 struct nlattr *mx;
772 int i, valid = 0;
773
774 /* nothing is dumped for dst_default_metrics, so just skip the loop */
775 if (metrics == dst_default_metrics.metrics)
776 return 0;
777
778 mx = nla_nest_start_noflag(skb, RTA_METRICS);
779 if (mx == NULL)
780 return -ENOBUFS;
781
782 for (i = 0; i < RTAX_MAX; i++) {
783 if (metrics[i]) {
784 if (i == RTAX_CC_ALGO - 1) {
785 char tmp[TCP_CA_NAME_MAX], *name;
786
787 name = tcp_ca_get_name_by_key(metrics[i], tmp);
788 if (!name)
789 continue;
790 if (nla_put_string(skb, i + 1, name))
791 goto nla_put_failure;
792 } else if (i == RTAX_FEATURES - 1) {
793 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
794
795 if (!user_features)
796 continue;
797 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
798 if (nla_put_u32(skb, i + 1, user_features))
799 goto nla_put_failure;
800 } else {
801 if (nla_put_u32(skb, i + 1, metrics[i]))
802 goto nla_put_failure;
803 }
804 valid++;
805 }
806 }
807
808 if (!valid) {
809 nla_nest_cancel(skb, mx);
810 return 0;
811 }
812
813 return nla_nest_end(skb, mx);
814
815 nla_put_failure:
816 nla_nest_cancel(skb, mx);
817 return -EMSGSIZE;
818 }
819 EXPORT_SYMBOL(rtnetlink_put_metrics);
820
rtnl_put_cacheinfo(struct sk_buff * skb,struct dst_entry * dst,u32 id,long expires,u32 error)821 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
822 long expires, u32 error)
823 {
824 struct rta_cacheinfo ci = {
825 .rta_error = error,
826 .rta_id = id,
827 };
828
829 if (dst) {
830 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
831 ci.rta_used = dst->__use;
832 ci.rta_clntref = rcuref_read(&dst->__rcuref);
833 }
834 if (expires) {
835 unsigned long clock;
836
837 clock = jiffies_to_clock_t(abs(expires));
838 clock = min_t(unsigned long, clock, INT_MAX);
839 ci.rta_expires = (expires > 0) ? clock : -clock;
840 }
841 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
842 }
843 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
844
netdev_set_operstate(struct net_device * dev,int newstate)845 void netdev_set_operstate(struct net_device *dev, int newstate)
846 {
847 unsigned int old = READ_ONCE(dev->operstate);
848
849 do {
850 if (old == newstate)
851 return;
852 } while (!try_cmpxchg(&dev->operstate, &old, newstate));
853
854 netdev_state_change(dev);
855 }
856 EXPORT_SYMBOL(netdev_set_operstate);
857
set_operstate(struct net_device * dev,unsigned char transition)858 static void set_operstate(struct net_device *dev, unsigned char transition)
859 {
860 unsigned char operstate = READ_ONCE(dev->operstate);
861
862 switch (transition) {
863 case IF_OPER_UP:
864 if ((operstate == IF_OPER_DORMANT ||
865 operstate == IF_OPER_TESTING ||
866 operstate == IF_OPER_UNKNOWN) &&
867 !netif_dormant(dev) && !netif_testing(dev))
868 operstate = IF_OPER_UP;
869 break;
870
871 case IF_OPER_TESTING:
872 if (netif_oper_up(dev))
873 operstate = IF_OPER_TESTING;
874 break;
875
876 case IF_OPER_DORMANT:
877 if (netif_oper_up(dev))
878 operstate = IF_OPER_DORMANT;
879 break;
880 }
881
882 netdev_set_operstate(dev, operstate);
883 }
884
rtnl_dev_get_flags(const struct net_device * dev)885 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
886 {
887 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
888 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
889 }
890
rtnl_dev_combine_flags(const struct net_device * dev,const struct ifinfomsg * ifm)891 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
892 const struct ifinfomsg *ifm)
893 {
894 unsigned int flags = ifm->ifi_flags;
895
896 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
897 if (ifm->ifi_change)
898 flags = (flags & ifm->ifi_change) |
899 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
900
901 return flags;
902 }
903
copy_rtnl_link_stats(struct rtnl_link_stats * a,const struct rtnl_link_stats64 * b)904 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
905 const struct rtnl_link_stats64 *b)
906 {
907 a->rx_packets = b->rx_packets;
908 a->tx_packets = b->tx_packets;
909 a->rx_bytes = b->rx_bytes;
910 a->tx_bytes = b->tx_bytes;
911 a->rx_errors = b->rx_errors;
912 a->tx_errors = b->tx_errors;
913 a->rx_dropped = b->rx_dropped;
914 a->tx_dropped = b->tx_dropped;
915
916 a->multicast = b->multicast;
917 a->collisions = b->collisions;
918
919 a->rx_length_errors = b->rx_length_errors;
920 a->rx_over_errors = b->rx_over_errors;
921 a->rx_crc_errors = b->rx_crc_errors;
922 a->rx_frame_errors = b->rx_frame_errors;
923 a->rx_fifo_errors = b->rx_fifo_errors;
924 a->rx_missed_errors = b->rx_missed_errors;
925
926 a->tx_aborted_errors = b->tx_aborted_errors;
927 a->tx_carrier_errors = b->tx_carrier_errors;
928 a->tx_fifo_errors = b->tx_fifo_errors;
929 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
930 a->tx_window_errors = b->tx_window_errors;
931
932 a->rx_compressed = b->rx_compressed;
933 a->tx_compressed = b->tx_compressed;
934
935 a->rx_nohandler = b->rx_nohandler;
936 }
937
938 /* All VF info */
rtnl_vfinfo_size(const struct net_device * dev,u32 ext_filter_mask)939 static inline int rtnl_vfinfo_size(const struct net_device *dev,
940 u32 ext_filter_mask)
941 {
942 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
943 int num_vfs = dev_num_vf(dev->dev.parent);
944 size_t size = nla_total_size(0);
945 size += num_vfs *
946 (nla_total_size(0) +
947 nla_total_size(sizeof(struct ifla_vf_mac)) +
948 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
949 nla_total_size(sizeof(struct ifla_vf_vlan)) +
950 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
951 nla_total_size(MAX_VLAN_LIST_LEN *
952 sizeof(struct ifla_vf_vlan_info)) +
953 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
954 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
955 nla_total_size(sizeof(struct ifla_vf_rate)) +
956 nla_total_size(sizeof(struct ifla_vf_link_state)) +
957 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
958 nla_total_size(sizeof(struct ifla_vf_trust)));
959 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
960 size += num_vfs *
961 (nla_total_size(0) + /* nest IFLA_VF_STATS */
962 /* IFLA_VF_STATS_RX_PACKETS */
963 nla_total_size_64bit(sizeof(__u64)) +
964 /* IFLA_VF_STATS_TX_PACKETS */
965 nla_total_size_64bit(sizeof(__u64)) +
966 /* IFLA_VF_STATS_RX_BYTES */
967 nla_total_size_64bit(sizeof(__u64)) +
968 /* IFLA_VF_STATS_TX_BYTES */
969 nla_total_size_64bit(sizeof(__u64)) +
970 /* IFLA_VF_STATS_BROADCAST */
971 nla_total_size_64bit(sizeof(__u64)) +
972 /* IFLA_VF_STATS_MULTICAST */
973 nla_total_size_64bit(sizeof(__u64)) +
974 /* IFLA_VF_STATS_RX_DROPPED */
975 nla_total_size_64bit(sizeof(__u64)) +
976 /* IFLA_VF_STATS_TX_DROPPED */
977 nla_total_size_64bit(sizeof(__u64)));
978 }
979 return size;
980 } else
981 return 0;
982 }
983
rtnl_port_size(const struct net_device * dev,u32 ext_filter_mask)984 static size_t rtnl_port_size(const struct net_device *dev,
985 u32 ext_filter_mask)
986 {
987 size_t port_size = nla_total_size(4) /* PORT_VF */
988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
991 + nla_total_size(1) /* PROT_VDP_REQUEST */
992 + nla_total_size(2); /* PORT_VDP_RESPONSE */
993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
995 + port_size;
996 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
997 + port_size;
998
999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1000 !(ext_filter_mask & RTEXT_FILTER_VF))
1001 return 0;
1002 if (dev_num_vf(dev->dev.parent))
1003 return port_self_size + vf_ports_size +
1004 vf_port_size * dev_num_vf(dev->dev.parent);
1005 else
1006 return port_self_size;
1007 }
1008
rtnl_xdp_size(void)1009 static size_t rtnl_xdp_size(void)
1010 {
1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
1012 nla_total_size(1) + /* XDP_ATTACHED */
1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */
1015
1016 return xdp_size;
1017 }
1018
rtnl_prop_list_size(const struct net_device * dev)1019 static size_t rtnl_prop_list_size(const struct net_device *dev)
1020 {
1021 struct netdev_name_node *name_node;
1022 unsigned int cnt = 0;
1023
1024 rcu_read_lock();
1025 list_for_each_entry_rcu(name_node, &dev->name_node->list, list)
1026 cnt++;
1027 rcu_read_unlock();
1028
1029 if (!cnt)
1030 return 0;
1031
1032 return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ);
1033 }
1034
rtnl_proto_down_size(const struct net_device * dev)1035 static size_t rtnl_proto_down_size(const struct net_device *dev)
1036 {
1037 size_t size = nla_total_size(1);
1038
1039 /* Assume dev->proto_down_reason is not zero. */
1040 size += nla_total_size(0) + nla_total_size(4);
1041
1042 return size;
1043 }
1044
rtnl_devlink_port_size(const struct net_device * dev)1045 static size_t rtnl_devlink_port_size(const struct net_device *dev)
1046 {
1047 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1048
1049 if (dev->devlink_port)
1050 size += devlink_nl_port_handle_size(dev->devlink_port);
1051
1052 return size;
1053 }
1054
rtnl_dpll_pin_size(const struct net_device * dev)1055 static size_t rtnl_dpll_pin_size(const struct net_device *dev)
1056 {
1057 size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
1058
1059 size += dpll_netdev_pin_handle_size(dev);
1060
1061 return size;
1062 }
1063
if_nlmsg_size(const struct net_device * dev,u32 ext_filter_mask)1064 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1065 u32 ext_filter_mask)
1066 {
1067 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1068 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1069 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1070 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1071 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1072 + nla_total_size(sizeof(struct rtnl_link_stats))
1073 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1074 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1075 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1076 + nla_total_size(4) /* IFLA_TXQLEN */
1077 + nla_total_size(4) /* IFLA_WEIGHT */
1078 + nla_total_size(4) /* IFLA_MTU */
1079 + nla_total_size(4) /* IFLA_LINK */
1080 + nla_total_size(4) /* IFLA_MASTER */
1081 + nla_total_size(1) /* IFLA_CARRIER */
1082 + nla_total_size(4) /* IFLA_PROMISCUITY */
1083 + nla_total_size(4) /* IFLA_ALLMULTI */
1084 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1085 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1086 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1087 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1088 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1089 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
1090 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
1091 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1092 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1093 + nla_total_size(1) /* IFLA_OPERSTATE */
1094 + nla_total_size(1) /* IFLA_LINKMODE */
1095 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1096 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1097 + nla_total_size(4) /* IFLA_GROUP */
1098 + nla_total_size(ext_filter_mask
1099 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1100 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1101 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1102 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1103 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1104 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1105 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1106 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1107 + rtnl_xdp_size() /* IFLA_XDP */
1108 + nla_total_size(4) /* IFLA_EVENT */
1109 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1110 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1111 + rtnl_proto_down_size(dev) /* proto down */
1112 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1113 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1114 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1115 + nla_total_size(4) /* IFLA_MIN_MTU */
1116 + nla_total_size(4) /* IFLA_MAX_MTU */
1117 + rtnl_prop_list_size(dev)
1118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1119 + rtnl_devlink_port_size(dev)
1120 + rtnl_dpll_pin_size(dev)
1121 + 0;
1122 }
1123
rtnl_vf_ports_fill(struct sk_buff * skb,struct net_device * dev)1124 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1125 {
1126 struct nlattr *vf_ports;
1127 struct nlattr *vf_port;
1128 int vf;
1129 int err;
1130
1131 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1132 if (!vf_ports)
1133 return -EMSGSIZE;
1134
1135 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1136 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1137 if (!vf_port)
1138 goto nla_put_failure;
1139 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1140 goto nla_put_failure;
1141 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1142 if (err == -EMSGSIZE)
1143 goto nla_put_failure;
1144 if (err) {
1145 nla_nest_cancel(skb, vf_port);
1146 continue;
1147 }
1148 nla_nest_end(skb, vf_port);
1149 }
1150
1151 nla_nest_end(skb, vf_ports);
1152
1153 return 0;
1154
1155 nla_put_failure:
1156 nla_nest_cancel(skb, vf_ports);
1157 return -EMSGSIZE;
1158 }
1159
rtnl_port_self_fill(struct sk_buff * skb,struct net_device * dev)1160 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1161 {
1162 struct nlattr *port_self;
1163 int err;
1164
1165 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1166 if (!port_self)
1167 return -EMSGSIZE;
1168
1169 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1170 if (err) {
1171 nla_nest_cancel(skb, port_self);
1172 return (err == -EMSGSIZE) ? err : 0;
1173 }
1174
1175 nla_nest_end(skb, port_self);
1176
1177 return 0;
1178 }
1179
rtnl_port_fill(struct sk_buff * skb,struct net_device * dev,u32 ext_filter_mask)1180 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1181 u32 ext_filter_mask)
1182 {
1183 int err;
1184
1185 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1186 !(ext_filter_mask & RTEXT_FILTER_VF))
1187 return 0;
1188
1189 err = rtnl_port_self_fill(skb, dev);
1190 if (err)
1191 return err;
1192
1193 if (dev_num_vf(dev->dev.parent)) {
1194 err = rtnl_vf_ports_fill(skb, dev);
1195 if (err)
1196 return err;
1197 }
1198
1199 return 0;
1200 }
1201
rtnl_phys_port_id_fill(struct sk_buff * skb,struct net_device * dev)1202 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1203 {
1204 int err;
1205 struct netdev_phys_item_id ppid;
1206
1207 err = dev_get_phys_port_id(dev, &ppid);
1208 if (err) {
1209 if (err == -EOPNOTSUPP)
1210 return 0;
1211 return err;
1212 }
1213
1214 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1215 return -EMSGSIZE;
1216
1217 return 0;
1218 }
1219
rtnl_phys_port_name_fill(struct sk_buff * skb,struct net_device * dev)1220 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1221 {
1222 char name[IFNAMSIZ];
1223 int err;
1224
1225 err = dev_get_phys_port_name(dev, name, sizeof(name));
1226 if (err) {
1227 if (err == -EOPNOTSUPP)
1228 return 0;
1229 return err;
1230 }
1231
1232 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1233 return -EMSGSIZE;
1234
1235 return 0;
1236 }
1237
rtnl_phys_switch_id_fill(struct sk_buff * skb,struct net_device * dev)1238 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1239 {
1240 struct netdev_phys_item_id ppid = { };
1241 int err;
1242
1243 err = dev_get_port_parent_id(dev, &ppid, false);
1244 if (err) {
1245 if (err == -EOPNOTSUPP)
1246 return 0;
1247 return err;
1248 }
1249
1250 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1251 return -EMSGSIZE;
1252
1253 return 0;
1254 }
1255
rtnl_fill_stats(struct sk_buff * skb,struct net_device * dev)1256 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1257 struct net_device *dev)
1258 {
1259 struct rtnl_link_stats64 *sp;
1260 struct nlattr *attr;
1261
1262 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1263 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1264 if (!attr)
1265 return -EMSGSIZE;
1266
1267 sp = nla_data(attr);
1268 dev_get_stats(dev, sp);
1269
1270 attr = nla_reserve(skb, IFLA_STATS,
1271 sizeof(struct rtnl_link_stats));
1272 if (!attr)
1273 return -EMSGSIZE;
1274
1275 copy_rtnl_link_stats(nla_data(attr), sp);
1276
1277 return 0;
1278 }
1279
rtnl_fill_vfinfo(struct sk_buff * skb,struct net_device * dev,int vfs_num,u32 ext_filter_mask)1280 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1281 struct net_device *dev,
1282 int vfs_num,
1283 u32 ext_filter_mask)
1284 {
1285 struct ifla_vf_rss_query_en vf_rss_query_en;
1286 struct nlattr *vf, *vfstats, *vfvlanlist;
1287 struct ifla_vf_link_state vf_linkstate;
1288 struct ifla_vf_vlan_info vf_vlan_info;
1289 struct ifla_vf_spoofchk vf_spoofchk;
1290 struct ifla_vf_tx_rate vf_tx_rate;
1291 struct ifla_vf_stats vf_stats;
1292 struct ifla_vf_trust vf_trust;
1293 struct ifla_vf_vlan vf_vlan;
1294 struct ifla_vf_rate vf_rate;
1295 struct ifla_vf_mac vf_mac;
1296 struct ifla_vf_broadcast vf_broadcast;
1297 struct ifla_vf_info ivi;
1298 struct ifla_vf_guid node_guid;
1299 struct ifla_vf_guid port_guid;
1300
1301 memset(&ivi, 0, sizeof(ivi));
1302
1303 /* Not all SR-IOV capable drivers support the
1304 * spoofcheck and "RSS query enable" query. Preset to
1305 * -1 so the user space tool can detect that the driver
1306 * didn't report anything.
1307 */
1308 ivi.spoofchk = -1;
1309 ivi.rss_query_en = -1;
1310 ivi.trusted = -1;
1311 /* The default value for VF link state is "auto"
1312 * IFLA_VF_LINK_STATE_AUTO which equals zero
1313 */
1314 ivi.linkstate = 0;
1315 /* VLAN Protocol by default is 802.1Q */
1316 ivi.vlan_proto = htons(ETH_P_8021Q);
1317 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1318 return 0;
1319
1320 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1321 memset(&node_guid, 0, sizeof(node_guid));
1322 memset(&port_guid, 0, sizeof(port_guid));
1323
1324 vf_mac.vf =
1325 vf_vlan.vf =
1326 vf_vlan_info.vf =
1327 vf_rate.vf =
1328 vf_tx_rate.vf =
1329 vf_spoofchk.vf =
1330 vf_linkstate.vf =
1331 vf_rss_query_en.vf =
1332 vf_trust.vf =
1333 node_guid.vf =
1334 port_guid.vf = ivi.vf;
1335
1336 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1337 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1338 vf_vlan.vlan = ivi.vlan;
1339 vf_vlan.qos = ivi.qos;
1340 vf_vlan_info.vlan = ivi.vlan;
1341 vf_vlan_info.qos = ivi.qos;
1342 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1343 vf_tx_rate.rate = ivi.max_tx_rate;
1344 vf_rate.min_tx_rate = ivi.min_tx_rate;
1345 vf_rate.max_tx_rate = ivi.max_tx_rate;
1346 vf_spoofchk.setting = ivi.spoofchk;
1347 vf_linkstate.link_state = ivi.linkstate;
1348 vf_rss_query_en.setting = ivi.rss_query_en;
1349 vf_trust.setting = ivi.trusted;
1350 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1351 if (!vf)
1352 return -EMSGSIZE;
1353 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1354 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1355 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1356 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1357 &vf_rate) ||
1358 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1359 &vf_tx_rate) ||
1360 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1361 &vf_spoofchk) ||
1362 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1363 &vf_linkstate) ||
1364 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1365 sizeof(vf_rss_query_en),
1366 &vf_rss_query_en) ||
1367 nla_put(skb, IFLA_VF_TRUST,
1368 sizeof(vf_trust), &vf_trust))
1369 goto nla_put_vf_failure;
1370
1371 if (dev->netdev_ops->ndo_get_vf_guid &&
1372 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1373 &port_guid)) {
1374 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1375 &node_guid) ||
1376 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1377 &port_guid))
1378 goto nla_put_vf_failure;
1379 }
1380 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1381 if (!vfvlanlist)
1382 goto nla_put_vf_failure;
1383 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1384 &vf_vlan_info)) {
1385 nla_nest_cancel(skb, vfvlanlist);
1386 goto nla_put_vf_failure;
1387 }
1388 nla_nest_end(skb, vfvlanlist);
1389 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1390 memset(&vf_stats, 0, sizeof(vf_stats));
1391 if (dev->netdev_ops->ndo_get_vf_stats)
1392 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1393 &vf_stats);
1394 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1395 if (!vfstats)
1396 goto nla_put_vf_failure;
1397 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1398 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1399 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1400 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1401 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1402 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1403 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1404 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1405 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1406 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1407 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1408 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1409 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1410 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1411 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1412 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1413 nla_nest_cancel(skb, vfstats);
1414 goto nla_put_vf_failure;
1415 }
1416 nla_nest_end(skb, vfstats);
1417 }
1418 nla_nest_end(skb, vf);
1419 return 0;
1420
1421 nla_put_vf_failure:
1422 nla_nest_cancel(skb, vf);
1423 return -EMSGSIZE;
1424 }
1425
rtnl_fill_vf(struct sk_buff * skb,struct net_device * dev,u32 ext_filter_mask)1426 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1427 struct net_device *dev,
1428 u32 ext_filter_mask)
1429 {
1430 struct nlattr *vfinfo;
1431 int i, num_vfs;
1432
1433 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1434 return 0;
1435
1436 num_vfs = dev_num_vf(dev->dev.parent);
1437 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1438 return -EMSGSIZE;
1439
1440 if (!dev->netdev_ops->ndo_get_vf_config)
1441 return 0;
1442
1443 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1444 if (!vfinfo)
1445 return -EMSGSIZE;
1446
1447 for (i = 0; i < num_vfs; i++) {
1448 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) {
1449 nla_nest_cancel(skb, vfinfo);
1450 return -EMSGSIZE;
1451 }
1452 }
1453
1454 nla_nest_end(skb, vfinfo);
1455 return 0;
1456 }
1457
rtnl_fill_link_ifmap(struct sk_buff * skb,const struct net_device * dev)1458 static int rtnl_fill_link_ifmap(struct sk_buff *skb,
1459 const struct net_device *dev)
1460 {
1461 struct rtnl_link_ifmap map;
1462
1463 memset(&map, 0, sizeof(map));
1464 map.mem_start = READ_ONCE(dev->mem_start);
1465 map.mem_end = READ_ONCE(dev->mem_end);
1466 map.base_addr = READ_ONCE(dev->base_addr);
1467 map.irq = READ_ONCE(dev->irq);
1468 map.dma = READ_ONCE(dev->dma);
1469 map.port = READ_ONCE(dev->if_port);
1470
1471 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1472 return -EMSGSIZE;
1473
1474 return 0;
1475 }
1476
rtnl_xdp_prog_skb(struct net_device * dev)1477 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1478 {
1479 const struct bpf_prog *generic_xdp_prog;
1480 u32 res = 0;
1481
1482 rcu_read_lock();
1483 generic_xdp_prog = rcu_dereference(dev->xdp_prog);
1484 if (generic_xdp_prog)
1485 res = generic_xdp_prog->aux->id;
1486 rcu_read_unlock();
1487
1488 return res;
1489 }
1490
rtnl_xdp_prog_drv(struct net_device * dev)1491 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1492 {
1493 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1494 }
1495
rtnl_xdp_prog_hw(struct net_device * dev)1496 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1497 {
1498 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1499 }
1500
rtnl_xdp_report_one(struct sk_buff * skb,struct net_device * dev,u32 * prog_id,u8 * mode,u8 tgt_mode,u32 attr,u32 (* get_prog_id)(struct net_device * dev))1501 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1502 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1503 u32 (*get_prog_id)(struct net_device *dev))
1504 {
1505 u32 curr_id;
1506 int err;
1507
1508 curr_id = get_prog_id(dev);
1509 if (!curr_id)
1510 return 0;
1511
1512 *prog_id = curr_id;
1513 err = nla_put_u32(skb, attr, curr_id);
1514 if (err)
1515 return err;
1516
1517 if (*mode != XDP_ATTACHED_NONE)
1518 *mode = XDP_ATTACHED_MULTI;
1519 else
1520 *mode = tgt_mode;
1521
1522 return 0;
1523 }
1524
rtnl_xdp_fill(struct sk_buff * skb,struct net_device * dev)1525 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1526 {
1527 struct nlattr *xdp;
1528 u32 prog_id;
1529 int err;
1530 u8 mode;
1531
1532 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1533 if (!xdp)
1534 return -EMSGSIZE;
1535
1536 prog_id = 0;
1537 mode = XDP_ATTACHED_NONE;
1538 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1539 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1540 if (err)
1541 goto err_cancel;
1542 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1543 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1544 if (err)
1545 goto err_cancel;
1546 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1547 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1548 if (err)
1549 goto err_cancel;
1550
1551 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1552 if (err)
1553 goto err_cancel;
1554
1555 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1556 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1557 if (err)
1558 goto err_cancel;
1559 }
1560
1561 nla_nest_end(skb, xdp);
1562 return 0;
1563
1564 err_cancel:
1565 nla_nest_cancel(skb, xdp);
1566 return err;
1567 }
1568
rtnl_get_event(unsigned long event)1569 static u32 rtnl_get_event(unsigned long event)
1570 {
1571 u32 rtnl_event_type = IFLA_EVENT_NONE;
1572
1573 switch (event) {
1574 case NETDEV_REBOOT:
1575 rtnl_event_type = IFLA_EVENT_REBOOT;
1576 break;
1577 case NETDEV_FEAT_CHANGE:
1578 rtnl_event_type = IFLA_EVENT_FEATURES;
1579 break;
1580 case NETDEV_BONDING_FAILOVER:
1581 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1582 break;
1583 case NETDEV_NOTIFY_PEERS:
1584 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1585 break;
1586 case NETDEV_RESEND_IGMP:
1587 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1588 break;
1589 case NETDEV_CHANGEINFODATA:
1590 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1591 break;
1592 default:
1593 break;
1594 }
1595
1596 return rtnl_event_type;
1597 }
1598
put_master_ifindex(struct sk_buff * skb,struct net_device * dev)1599 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1600 {
1601 const struct net_device *upper_dev;
1602 int ret = 0;
1603
1604 rcu_read_lock();
1605
1606 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1607 if (upper_dev)
1608 ret = nla_put_u32(skb, IFLA_MASTER,
1609 READ_ONCE(upper_dev->ifindex));
1610
1611 rcu_read_unlock();
1612 return ret;
1613 }
1614
nla_put_iflink(struct sk_buff * skb,const struct net_device * dev,bool force)1615 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1616 bool force)
1617 {
1618 int iflink = dev_get_iflink(dev);
1619
1620 if (force || READ_ONCE(dev->ifindex) != iflink)
1621 return nla_put_u32(skb, IFLA_LINK, iflink);
1622
1623 return 0;
1624 }
1625
nla_put_ifalias(struct sk_buff * skb,struct net_device * dev)1626 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1627 struct net_device *dev)
1628 {
1629 char buf[IFALIASZ];
1630 int ret;
1631
1632 ret = dev_get_alias(dev, buf, sizeof(buf));
1633 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1634 }
1635
rtnl_fill_link_netnsid(struct sk_buff * skb,const struct net_device * dev,struct net * src_net,gfp_t gfp)1636 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1637 const struct net_device *dev,
1638 struct net *src_net, gfp_t gfp)
1639 {
1640 bool put_iflink = false;
1641
1642 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1643 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1644
1645 if (!net_eq(dev_net(dev), link_net)) {
1646 int id = peernet2id_alloc(src_net, link_net, gfp);
1647
1648 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1649 return -EMSGSIZE;
1650
1651 put_iflink = true;
1652 }
1653 }
1654
1655 return nla_put_iflink(skb, dev, put_iflink);
1656 }
1657
rtnl_fill_link_af(struct sk_buff * skb,const struct net_device * dev,u32 ext_filter_mask)1658 static int rtnl_fill_link_af(struct sk_buff *skb,
1659 const struct net_device *dev,
1660 u32 ext_filter_mask)
1661 {
1662 const struct rtnl_af_ops *af_ops;
1663 struct nlattr *af_spec;
1664
1665 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1666 if (!af_spec)
1667 return -EMSGSIZE;
1668
1669 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1670 struct nlattr *af;
1671 int err;
1672
1673 if (!af_ops->fill_link_af)
1674 continue;
1675
1676 af = nla_nest_start_noflag(skb, af_ops->family);
1677 if (!af)
1678 return -EMSGSIZE;
1679
1680 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1681 /*
1682 * Caller may return ENODATA to indicate that there
1683 * was no data to be dumped. This is not an error, it
1684 * means we should trim the attribute header and
1685 * continue.
1686 */
1687 if (err == -ENODATA)
1688 nla_nest_cancel(skb, af);
1689 else if (err < 0)
1690 return -EMSGSIZE;
1691
1692 nla_nest_end(skb, af);
1693 }
1694
1695 nla_nest_end(skb, af_spec);
1696 return 0;
1697 }
1698
rtnl_fill_alt_ifnames(struct sk_buff * skb,const struct net_device * dev)1699 static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1700 const struct net_device *dev)
1701 {
1702 struct netdev_name_node *name_node;
1703 int count = 0;
1704
1705 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
1706 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1707 return -EMSGSIZE;
1708 count++;
1709 }
1710 return count;
1711 }
1712
1713 /* RCU protected. */
rtnl_fill_prop_list(struct sk_buff * skb,const struct net_device * dev)1714 static int rtnl_fill_prop_list(struct sk_buff *skb,
1715 const struct net_device *dev)
1716 {
1717 struct nlattr *prop_list;
1718 int ret;
1719
1720 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1721 if (!prop_list)
1722 return -EMSGSIZE;
1723
1724 ret = rtnl_fill_alt_ifnames(skb, dev);
1725 if (ret <= 0)
1726 goto nest_cancel;
1727
1728 nla_nest_end(skb, prop_list);
1729 return 0;
1730
1731 nest_cancel:
1732 nla_nest_cancel(skb, prop_list);
1733 return ret;
1734 }
1735
rtnl_fill_proto_down(struct sk_buff * skb,const struct net_device * dev)1736 static int rtnl_fill_proto_down(struct sk_buff *skb,
1737 const struct net_device *dev)
1738 {
1739 struct nlattr *pr;
1740 u32 preason;
1741
1742 if (nla_put_u8(skb, IFLA_PROTO_DOWN, READ_ONCE(dev->proto_down)))
1743 goto nla_put_failure;
1744
1745 preason = READ_ONCE(dev->proto_down_reason);
1746 if (!preason)
1747 return 0;
1748
1749 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1750 if (!pr)
1751 return -EMSGSIZE;
1752
1753 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1754 nla_nest_cancel(skb, pr);
1755 goto nla_put_failure;
1756 }
1757
1758 nla_nest_end(skb, pr);
1759 return 0;
1760
1761 nla_put_failure:
1762 return -EMSGSIZE;
1763 }
1764
rtnl_fill_devlink_port(struct sk_buff * skb,const struct net_device * dev)1765 static int rtnl_fill_devlink_port(struct sk_buff *skb,
1766 const struct net_device *dev)
1767 {
1768 struct nlattr *devlink_port_nest;
1769 int ret;
1770
1771 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1772 if (!devlink_port_nest)
1773 return -EMSGSIZE;
1774
1775 if (dev->devlink_port) {
1776 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1777 if (ret < 0)
1778 goto nest_cancel;
1779 }
1780
1781 nla_nest_end(skb, devlink_port_nest);
1782 return 0;
1783
1784 nest_cancel:
1785 nla_nest_cancel(skb, devlink_port_nest);
1786 return ret;
1787 }
1788
rtnl_fill_dpll_pin(struct sk_buff * skb,const struct net_device * dev)1789 static int rtnl_fill_dpll_pin(struct sk_buff *skb,
1790 const struct net_device *dev)
1791 {
1792 struct nlattr *dpll_pin_nest;
1793 int ret;
1794
1795 dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN);
1796 if (!dpll_pin_nest)
1797 return -EMSGSIZE;
1798
1799 ret = dpll_netdev_add_pin_handle(skb, dev);
1800 if (ret < 0)
1801 goto nest_cancel;
1802
1803 nla_nest_end(skb, dpll_pin_nest);
1804 return 0;
1805
1806 nest_cancel:
1807 nla_nest_cancel(skb, dpll_pin_nest);
1808 return ret;
1809 }
1810
rtnl_fill_ifinfo(struct sk_buff * skb,struct net_device * dev,struct net * src_net,int type,u32 pid,u32 seq,u32 change,unsigned int flags,u32 ext_filter_mask,u32 event,int * new_nsid,int new_ifindex,int tgt_netnsid,gfp_t gfp)1811 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1812 struct net_device *dev, struct net *src_net,
1813 int type, u32 pid, u32 seq, u32 change,
1814 unsigned int flags, u32 ext_filter_mask,
1815 u32 event, int *new_nsid, int new_ifindex,
1816 int tgt_netnsid, gfp_t gfp)
1817 {
1818 char devname[IFNAMSIZ];
1819 struct ifinfomsg *ifm;
1820 struct nlmsghdr *nlh;
1821 struct Qdisc *qdisc;
1822
1823 ASSERT_RTNL();
1824 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1825 if (nlh == NULL)
1826 return -EMSGSIZE;
1827
1828 ifm = nlmsg_data(nlh);
1829 ifm->ifi_family = AF_UNSPEC;
1830 ifm->__ifi_pad = 0;
1831 ifm->ifi_type = READ_ONCE(dev->type);
1832 ifm->ifi_index = READ_ONCE(dev->ifindex);
1833 ifm->ifi_flags = dev_get_flags(dev);
1834 ifm->ifi_change = change;
1835
1836 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1837 goto nla_put_failure;
1838
1839 netdev_copy_name(dev, devname);
1840 if (nla_put_string(skb, IFLA_IFNAME, devname))
1841 goto nla_put_failure;
1842
1843 if (nla_put_u32(skb, IFLA_TXQLEN, READ_ONCE(dev->tx_queue_len)) ||
1844 nla_put_u8(skb, IFLA_OPERSTATE,
1845 netif_running(dev) ? READ_ONCE(dev->operstate) :
1846 IF_OPER_DOWN) ||
1847 nla_put_u8(skb, IFLA_LINKMODE, READ_ONCE(dev->link_mode)) ||
1848 nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
1849 nla_put_u32(skb, IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) ||
1850 nla_put_u32(skb, IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) ||
1851 nla_put_u32(skb, IFLA_GROUP, READ_ONCE(dev->group)) ||
1852 nla_put_u32(skb, IFLA_PROMISCUITY, READ_ONCE(dev->promiscuity)) ||
1853 nla_put_u32(skb, IFLA_ALLMULTI, READ_ONCE(dev->allmulti)) ||
1854 nla_put_u32(skb, IFLA_NUM_TX_QUEUES,
1855 READ_ONCE(dev->num_tx_queues)) ||
1856 nla_put_u32(skb, IFLA_GSO_MAX_SEGS,
1857 READ_ONCE(dev->gso_max_segs)) ||
1858 nla_put_u32(skb, IFLA_GSO_MAX_SIZE,
1859 READ_ONCE(dev->gso_max_size)) ||
1860 nla_put_u32(skb, IFLA_GRO_MAX_SIZE,
1861 READ_ONCE(dev->gro_max_size)) ||
1862 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE,
1863 READ_ONCE(dev->gso_ipv4_max_size)) ||
1864 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE,
1865 READ_ONCE(dev->gro_ipv4_max_size)) ||
1866 nla_put_u32(skb, IFLA_TSO_MAX_SIZE,
1867 READ_ONCE(dev->tso_max_size)) ||
1868 nla_put_u32(skb, IFLA_TSO_MAX_SEGS,
1869 READ_ONCE(dev->tso_max_segs)) ||
1870 #ifdef CONFIG_RPS
1871 nla_put_u32(skb, IFLA_NUM_RX_QUEUES,
1872 READ_ONCE(dev->num_rx_queues)) ||
1873 #endif
1874 put_master_ifindex(skb, dev) ||
1875 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1876 nla_put_ifalias(skb, dev) ||
1877 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1878 atomic_read(&dev->carrier_up_count) +
1879 atomic_read(&dev->carrier_down_count)) ||
1880 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1881 atomic_read(&dev->carrier_up_count)) ||
1882 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1883 atomic_read(&dev->carrier_down_count)))
1884 goto nla_put_failure;
1885
1886 if (rtnl_fill_proto_down(skb, dev))
1887 goto nla_put_failure;
1888
1889 if (event != IFLA_EVENT_NONE) {
1890 if (nla_put_u32(skb, IFLA_EVENT, event))
1891 goto nla_put_failure;
1892 }
1893
1894 if (dev->addr_len) {
1895 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1896 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1897 goto nla_put_failure;
1898 }
1899
1900 if (rtnl_phys_port_id_fill(skb, dev))
1901 goto nla_put_failure;
1902
1903 if (rtnl_phys_port_name_fill(skb, dev))
1904 goto nla_put_failure;
1905
1906 if (rtnl_phys_switch_id_fill(skb, dev))
1907 goto nla_put_failure;
1908
1909 if (rtnl_fill_stats(skb, dev))
1910 goto nla_put_failure;
1911
1912 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1913 goto nla_put_failure;
1914
1915 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1916 goto nla_put_failure;
1917
1918 if (rtnl_xdp_fill(skb, dev))
1919 goto nla_put_failure;
1920
1921 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1922 if (rtnl_link_fill(skb, dev) < 0)
1923 goto nla_put_failure;
1924 }
1925
1926 if (new_nsid &&
1927 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1928 goto nla_put_failure;
1929 if (new_ifindex &&
1930 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1931 goto nla_put_failure;
1932
1933 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1934 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1935 goto nla_put_failure;
1936
1937 rcu_read_lock();
1938 if (rtnl_fill_link_netnsid(skb, dev, src_net, GFP_ATOMIC))
1939 goto nla_put_failure_rcu;
1940 qdisc = rcu_dereference(dev->qdisc);
1941 if (qdisc && nla_put_string(skb, IFLA_QDISC, qdisc->ops->id))
1942 goto nla_put_failure_rcu;
1943 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1944 goto nla_put_failure_rcu;
1945 if (rtnl_fill_link_ifmap(skb, dev))
1946 goto nla_put_failure_rcu;
1947 if (rtnl_fill_prop_list(skb, dev))
1948 goto nla_put_failure_rcu;
1949 rcu_read_unlock();
1950
1951 if (dev->dev.parent &&
1952 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1953 dev_name(dev->dev.parent)))
1954 goto nla_put_failure;
1955
1956 if (dev->dev.parent && dev->dev.parent->bus &&
1957 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1958 dev->dev.parent->bus->name))
1959 goto nla_put_failure;
1960
1961 if (rtnl_fill_devlink_port(skb, dev))
1962 goto nla_put_failure;
1963
1964 if (rtnl_fill_dpll_pin(skb, dev))
1965 goto nla_put_failure;
1966
1967 nlmsg_end(skb, nlh);
1968 return 0;
1969
1970 nla_put_failure_rcu:
1971 rcu_read_unlock();
1972 nla_put_failure:
1973 nlmsg_cancel(skb, nlh);
1974 return -EMSGSIZE;
1975 }
1976
1977 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1978 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1979 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1980 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1981 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1982 [IFLA_MTU] = { .type = NLA_U32 },
1983 [IFLA_LINK] = { .type = NLA_U32 },
1984 [IFLA_MASTER] = { .type = NLA_U32 },
1985 [IFLA_CARRIER] = { .type = NLA_U8 },
1986 [IFLA_TXQLEN] = { .type = NLA_U32 },
1987 [IFLA_WEIGHT] = { .type = NLA_U32 },
1988 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1989 [IFLA_LINKMODE] = { .type = NLA_U8 },
1990 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1991 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1992 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1993 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1994 * allow 0-length string (needed to remove an alias).
1995 */
1996 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1997 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1998 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1999 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
2000 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
2001 [IFLA_EXT_MASK] = { .type = NLA_U32 },
2002 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
2003 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
2004 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
2005 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
2006 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
2007 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
2008 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
2009 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
2010 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
2011 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
2012 [IFLA_XDP] = { .type = NLA_NESTED },
2013 [IFLA_EVENT] = { .type = NLA_U32 },
2014 [IFLA_GROUP] = { .type = NLA_U32 },
2015 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
2016 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
2017 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
2018 [IFLA_MIN_MTU] = { .type = NLA_U32 },
2019 [IFLA_MAX_MTU] = { .type = NLA_U32 },
2020 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
2021 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
2022 .len = ALTIFNAMSIZ - 1 },
2023 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
2024 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
2025 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
2026 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
2027 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
2028 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
2029 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
2030 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
2031 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
2032 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
2033 };
2034
2035 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
2036 [IFLA_INFO_KIND] = { .type = NLA_STRING },
2037 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
2038 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
2039 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
2040 };
2041
2042 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
2043 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
2044 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
2045 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
2046 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
2047 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
2048 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
2049 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
2050 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
2051 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
2052 [IFLA_VF_STATS] = { .type = NLA_NESTED },
2053 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
2054 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
2055 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
2056 };
2057
2058 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
2059 [IFLA_PORT_VF] = { .type = NLA_U32 },
2060 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
2061 .len = PORT_PROFILE_MAX },
2062 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2063 .len = PORT_UUID_MAX },
2064 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
2065 .len = PORT_UUID_MAX },
2066 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
2067 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
2068
2069 /* Unused, but we need to keep it here since user space could
2070 * fill it. It's also broken with regard to NLA_BINARY use in
2071 * combination with structs.
2072 */
2073 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
2074 .len = sizeof(struct ifla_port_vsi) },
2075 };
2076
2077 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
2078 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
2079 [IFLA_XDP_FD] = { .type = NLA_S32 },
2080 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
2081 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
2082 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
2083 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
2084 };
2085
linkinfo_to_kind_ops(const struct nlattr * nla)2086 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
2087 {
2088 const struct rtnl_link_ops *ops = NULL;
2089 struct nlattr *linfo[IFLA_INFO_MAX + 1];
2090
2091 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
2092 return NULL;
2093
2094 if (linfo[IFLA_INFO_KIND]) {
2095 char kind[MODULE_NAME_LEN];
2096
2097 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2098 ops = rtnl_link_ops_get(kind);
2099 }
2100
2101 return ops;
2102 }
2103
link_master_filtered(struct net_device * dev,int master_idx)2104 static bool link_master_filtered(struct net_device *dev, int master_idx)
2105 {
2106 struct net_device *master;
2107
2108 if (!master_idx)
2109 return false;
2110
2111 master = netdev_master_upper_dev_get(dev);
2112
2113 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2114 * another invalid value for ifindex to denote "no master".
2115 */
2116 if (master_idx == -1)
2117 return !!master;
2118
2119 if (!master || master->ifindex != master_idx)
2120 return true;
2121
2122 return false;
2123 }
2124
link_kind_filtered(const struct net_device * dev,const struct rtnl_link_ops * kind_ops)2125 static bool link_kind_filtered(const struct net_device *dev,
2126 const struct rtnl_link_ops *kind_ops)
2127 {
2128 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2129 return true;
2130
2131 return false;
2132 }
2133
link_dump_filtered(struct net_device * dev,int master_idx,const struct rtnl_link_ops * kind_ops)2134 static bool link_dump_filtered(struct net_device *dev,
2135 int master_idx,
2136 const struct rtnl_link_ops *kind_ops)
2137 {
2138 if (link_master_filtered(dev, master_idx) ||
2139 link_kind_filtered(dev, kind_ops))
2140 return true;
2141
2142 return false;
2143 }
2144
2145 /**
2146 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2147 * @sk: netlink socket
2148 * @netnsid: network namespace identifier
2149 *
2150 * Returns the network namespace identified by netnsid on success or an error
2151 * pointer on failure.
2152 */
rtnl_get_net_ns_capable(struct sock * sk,int netnsid)2153 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2154 {
2155 struct net *net;
2156
2157 net = get_net_ns_by_id(sock_net(sk), netnsid);
2158 if (!net)
2159 return ERR_PTR(-EINVAL);
2160
2161 /* For now, the caller is required to have CAP_NET_ADMIN in
2162 * the user namespace owning the target net ns.
2163 */
2164 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2165 put_net(net);
2166 return ERR_PTR(-EACCES);
2167 }
2168 return net;
2169 }
2170 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2171
rtnl_valid_dump_ifinfo_req(const struct nlmsghdr * nlh,bool strict_check,struct nlattr ** tb,struct netlink_ext_ack * extack)2172 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2173 bool strict_check, struct nlattr **tb,
2174 struct netlink_ext_ack *extack)
2175 {
2176 int hdrlen;
2177
2178 if (strict_check) {
2179 struct ifinfomsg *ifm;
2180
2181 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2182 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2183 return -EINVAL;
2184 }
2185
2186 ifm = nlmsg_data(nlh);
2187 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2188 ifm->ifi_change) {
2189 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2190 return -EINVAL;
2191 }
2192 if (ifm->ifi_index) {
2193 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2194 return -EINVAL;
2195 }
2196
2197 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2198 IFLA_MAX, ifla_policy,
2199 extack);
2200 }
2201
2202 /* A hack to preserve kernel<->userspace interface.
2203 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2204 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2205 * what iproute2 < v3.9.0 used.
2206 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2207 * attribute, its netlink message is shorter than struct ifinfomsg.
2208 */
2209 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2210 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2211
2212 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2213 extack);
2214 }
2215
rtnl_dump_ifinfo(struct sk_buff * skb,struct netlink_callback * cb)2216 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2217 {
2218 const struct rtnl_link_ops *kind_ops = NULL;
2219 struct netlink_ext_ack *extack = cb->extack;
2220 const struct nlmsghdr *nlh = cb->nlh;
2221 struct net *net = sock_net(skb->sk);
2222 unsigned int flags = NLM_F_MULTI;
2223 struct nlattr *tb[IFLA_MAX+1];
2224 struct {
2225 unsigned long ifindex;
2226 } *ctx = (void *)cb->ctx;
2227 struct net *tgt_net = net;
2228 u32 ext_filter_mask = 0;
2229 struct net_device *dev;
2230 int master_idx = 0;
2231 int netnsid = -1;
2232 int err, i;
2233
2234 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2235 if (err < 0) {
2236 if (cb->strict_check)
2237 return err;
2238
2239 goto walk_entries;
2240 }
2241
2242 for (i = 0; i <= IFLA_MAX; ++i) {
2243 if (!tb[i])
2244 continue;
2245
2246 /* new attributes should only be added with strict checking */
2247 switch (i) {
2248 case IFLA_TARGET_NETNSID:
2249 netnsid = nla_get_s32(tb[i]);
2250 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2251 if (IS_ERR(tgt_net)) {
2252 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2253 return PTR_ERR(tgt_net);
2254 }
2255 break;
2256 case IFLA_EXT_MASK:
2257 ext_filter_mask = nla_get_u32(tb[i]);
2258 break;
2259 case IFLA_MASTER:
2260 master_idx = nla_get_u32(tb[i]);
2261 break;
2262 case IFLA_LINKINFO:
2263 kind_ops = linkinfo_to_kind_ops(tb[i]);
2264 break;
2265 default:
2266 if (cb->strict_check) {
2267 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2268 return -EINVAL;
2269 }
2270 }
2271 }
2272
2273 if (master_idx || kind_ops)
2274 flags |= NLM_F_DUMP_FILTERED;
2275
2276 walk_entries:
2277 err = 0;
2278 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
2279 if (link_dump_filtered(dev, master_idx, kind_ops))
2280 continue;
2281 err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
2282 NETLINK_CB(cb->skb).portid,
2283 nlh->nlmsg_seq, 0, flags,
2284 ext_filter_mask, 0, NULL, 0,
2285 netnsid, GFP_KERNEL);
2286 if (err < 0)
2287 break;
2288 }
2289 cb->seq = tgt_net->dev_base_seq;
2290 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2291 if (netnsid >= 0)
2292 put_net(tgt_net);
2293
2294 return err;
2295 }
2296
rtnl_nla_parse_ifinfomsg(struct nlattr ** tb,const struct nlattr * nla_peer,struct netlink_ext_ack * exterr)2297 int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
2298 struct netlink_ext_ack *exterr)
2299 {
2300 const struct ifinfomsg *ifmp;
2301 const struct nlattr *attrs;
2302 size_t len;
2303
2304 ifmp = nla_data(nla_peer);
2305 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
2306 len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
2307
2308 if (ifmp->ifi_index < 0) {
2309 NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
2310 "ifindex can't be negative");
2311 return -EINVAL;
2312 }
2313
2314 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
2315 exterr);
2316 }
2317 EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
2318
rtnl_link_get_net(struct net * src_net,struct nlattr * tb[])2319 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2320 {
2321 struct net *net;
2322 /* Examine the link attributes and figure out which
2323 * network namespace we are talking about.
2324 */
2325 if (tb[IFLA_NET_NS_PID])
2326 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2327 else if (tb[IFLA_NET_NS_FD])
2328 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2329 else
2330 net = get_net(src_net);
2331 return net;
2332 }
2333 EXPORT_SYMBOL(rtnl_link_get_net);
2334
2335 /* Figure out which network namespace we are talking about by
2336 * examining the link attributes in the following order:
2337 *
2338 * 1. IFLA_NET_NS_PID
2339 * 2. IFLA_NET_NS_FD
2340 * 3. IFLA_TARGET_NETNSID
2341 */
rtnl_link_get_net_by_nlattr(struct net * src_net,struct nlattr * tb[])2342 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2343 struct nlattr *tb[])
2344 {
2345 struct net *net;
2346
2347 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2348 return rtnl_link_get_net(src_net, tb);
2349
2350 if (!tb[IFLA_TARGET_NETNSID])
2351 return get_net(src_net);
2352
2353 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2354 if (!net)
2355 return ERR_PTR(-EINVAL);
2356
2357 return net;
2358 }
2359
rtnl_link_get_net_capable(const struct sk_buff * skb,struct net * src_net,struct nlattr * tb[],int cap)2360 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2361 struct net *src_net,
2362 struct nlattr *tb[], int cap)
2363 {
2364 struct net *net;
2365
2366 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2367 if (IS_ERR(net))
2368 return net;
2369
2370 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2371 put_net(net);
2372 return ERR_PTR(-EPERM);
2373 }
2374
2375 return net;
2376 }
2377
2378 /* Verify that rtnetlink requests do not pass additional properties
2379 * potentially referring to different network namespaces.
2380 */
rtnl_ensure_unique_netns(struct nlattr * tb[],struct netlink_ext_ack * extack,bool netns_id_only)2381 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2382 struct netlink_ext_ack *extack,
2383 bool netns_id_only)
2384 {
2385
2386 if (netns_id_only) {
2387 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2388 return 0;
2389
2390 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2391 return -EOPNOTSUPP;
2392 }
2393
2394 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2395 goto invalid_attr;
2396
2397 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2398 goto invalid_attr;
2399
2400 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2401 goto invalid_attr;
2402
2403 return 0;
2404
2405 invalid_attr:
2406 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2407 return -EINVAL;
2408 }
2409
rtnl_set_vf_rate(struct net_device * dev,int vf,int min_tx_rate,int max_tx_rate)2410 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2411 int max_tx_rate)
2412 {
2413 const struct net_device_ops *ops = dev->netdev_ops;
2414
2415 if (!ops->ndo_set_vf_rate)
2416 return -EOPNOTSUPP;
2417 if (max_tx_rate && max_tx_rate < min_tx_rate)
2418 return -EINVAL;
2419
2420 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2421 }
2422
validate_linkmsg(struct net_device * dev,struct nlattr * tb[],struct netlink_ext_ack * extack)2423 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2424 struct netlink_ext_ack *extack)
2425 {
2426 if (tb[IFLA_ADDRESS] &&
2427 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2428 return -EINVAL;
2429
2430 if (tb[IFLA_BROADCAST] &&
2431 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2432 return -EINVAL;
2433
2434 if (tb[IFLA_GSO_MAX_SIZE] &&
2435 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) {
2436 NL_SET_ERR_MSG(extack, "too big gso_max_size");
2437 return -EINVAL;
2438 }
2439
2440 if (tb[IFLA_GSO_MAX_SEGS] &&
2441 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS ||
2442 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) {
2443 NL_SET_ERR_MSG(extack, "too big gso_max_segs");
2444 return -EINVAL;
2445 }
2446
2447 if (tb[IFLA_GRO_MAX_SIZE] &&
2448 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) {
2449 NL_SET_ERR_MSG(extack, "too big gro_max_size");
2450 return -EINVAL;
2451 }
2452
2453 if (tb[IFLA_GSO_IPV4_MAX_SIZE] &&
2454 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) {
2455 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size");
2456 return -EINVAL;
2457 }
2458
2459 if (tb[IFLA_GRO_IPV4_MAX_SIZE] &&
2460 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) {
2461 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size");
2462 return -EINVAL;
2463 }
2464
2465 if (tb[IFLA_AF_SPEC]) {
2466 struct nlattr *af;
2467 int rem, err;
2468
2469 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2470 const struct rtnl_af_ops *af_ops;
2471
2472 af_ops = rtnl_af_lookup(nla_type(af));
2473 if (!af_ops)
2474 return -EAFNOSUPPORT;
2475
2476 if (!af_ops->set_link_af)
2477 return -EOPNOTSUPP;
2478
2479 if (af_ops->validate_link_af) {
2480 err = af_ops->validate_link_af(dev, af, extack);
2481 if (err < 0)
2482 return err;
2483 }
2484 }
2485 }
2486
2487 return 0;
2488 }
2489
handle_infiniband_guid(struct net_device * dev,struct ifla_vf_guid * ivt,int guid_type)2490 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2491 int guid_type)
2492 {
2493 const struct net_device_ops *ops = dev->netdev_ops;
2494
2495 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2496 }
2497
handle_vf_guid(struct net_device * dev,struct ifla_vf_guid * ivt,int guid_type)2498 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2499 {
2500 if (dev->type != ARPHRD_INFINIBAND)
2501 return -EOPNOTSUPP;
2502
2503 return handle_infiniband_guid(dev, ivt, guid_type);
2504 }
2505
do_setvfinfo(struct net_device * dev,struct nlattr ** tb)2506 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2507 {
2508 const struct net_device_ops *ops = dev->netdev_ops;
2509 int err = -EINVAL;
2510
2511 if (tb[IFLA_VF_MAC]) {
2512 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2513
2514 if (ivm->vf >= INT_MAX)
2515 return -EINVAL;
2516 err = -EOPNOTSUPP;
2517 if (ops->ndo_set_vf_mac)
2518 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2519 ivm->mac);
2520 if (err < 0)
2521 return err;
2522 }
2523
2524 if (tb[IFLA_VF_VLAN]) {
2525 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2526
2527 if (ivv->vf >= INT_MAX)
2528 return -EINVAL;
2529 err = -EOPNOTSUPP;
2530 if (ops->ndo_set_vf_vlan)
2531 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2532 ivv->qos,
2533 htons(ETH_P_8021Q));
2534 if (err < 0)
2535 return err;
2536 }
2537
2538 if (tb[IFLA_VF_VLAN_LIST]) {
2539 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2540 struct nlattr *attr;
2541 int rem, len = 0;
2542
2543 err = -EOPNOTSUPP;
2544 if (!ops->ndo_set_vf_vlan)
2545 return err;
2546
2547 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2548 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2549 nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) {
2550 return -EINVAL;
2551 }
2552 if (len >= MAX_VLAN_LIST_LEN)
2553 return -EOPNOTSUPP;
2554 ivvl[len] = nla_data(attr);
2555
2556 len++;
2557 }
2558 if (len == 0)
2559 return -EINVAL;
2560
2561 if (ivvl[0]->vf >= INT_MAX)
2562 return -EINVAL;
2563 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2564 ivvl[0]->qos, ivvl[0]->vlan_proto);
2565 if (err < 0)
2566 return err;
2567 }
2568
2569 if (tb[IFLA_VF_TX_RATE]) {
2570 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2571 struct ifla_vf_info ivf;
2572
2573 if (ivt->vf >= INT_MAX)
2574 return -EINVAL;
2575 err = -EOPNOTSUPP;
2576 if (ops->ndo_get_vf_config)
2577 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2578 if (err < 0)
2579 return err;
2580
2581 err = rtnl_set_vf_rate(dev, ivt->vf,
2582 ivf.min_tx_rate, ivt->rate);
2583 if (err < 0)
2584 return err;
2585 }
2586
2587 if (tb[IFLA_VF_RATE]) {
2588 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2589
2590 if (ivt->vf >= INT_MAX)
2591 return -EINVAL;
2592
2593 err = rtnl_set_vf_rate(dev, ivt->vf,
2594 ivt->min_tx_rate, ivt->max_tx_rate);
2595 if (err < 0)
2596 return err;
2597 }
2598
2599 if (tb[IFLA_VF_SPOOFCHK]) {
2600 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2601
2602 if (ivs->vf >= INT_MAX)
2603 return -EINVAL;
2604 err = -EOPNOTSUPP;
2605 if (ops->ndo_set_vf_spoofchk)
2606 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2607 ivs->setting);
2608 if (err < 0)
2609 return err;
2610 }
2611
2612 if (tb[IFLA_VF_LINK_STATE]) {
2613 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2614
2615 if (ivl->vf >= INT_MAX)
2616 return -EINVAL;
2617 err = -EOPNOTSUPP;
2618 if (ops->ndo_set_vf_link_state)
2619 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2620 ivl->link_state);
2621 if (err < 0)
2622 return err;
2623 }
2624
2625 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2626 struct ifla_vf_rss_query_en *ivrssq_en;
2627
2628 err = -EOPNOTSUPP;
2629 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2630 if (ivrssq_en->vf >= INT_MAX)
2631 return -EINVAL;
2632 if (ops->ndo_set_vf_rss_query_en)
2633 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2634 ivrssq_en->setting);
2635 if (err < 0)
2636 return err;
2637 }
2638
2639 if (tb[IFLA_VF_TRUST]) {
2640 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2641
2642 if (ivt->vf >= INT_MAX)
2643 return -EINVAL;
2644 err = -EOPNOTSUPP;
2645 if (ops->ndo_set_vf_trust)
2646 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2647 if (err < 0)
2648 return err;
2649 }
2650
2651 if (tb[IFLA_VF_IB_NODE_GUID]) {
2652 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2653
2654 if (ivt->vf >= INT_MAX)
2655 return -EINVAL;
2656 if (!ops->ndo_set_vf_guid)
2657 return -EOPNOTSUPP;
2658 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2659 }
2660
2661 if (tb[IFLA_VF_IB_PORT_GUID]) {
2662 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2663
2664 if (ivt->vf >= INT_MAX)
2665 return -EINVAL;
2666 if (!ops->ndo_set_vf_guid)
2667 return -EOPNOTSUPP;
2668
2669 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2670 }
2671
2672 return err;
2673 }
2674
do_set_master(struct net_device * dev,int ifindex,struct netlink_ext_ack * extack)2675 static int do_set_master(struct net_device *dev, int ifindex,
2676 struct netlink_ext_ack *extack)
2677 {
2678 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2679 const struct net_device_ops *ops;
2680 int err;
2681
2682 if (upper_dev) {
2683 if (upper_dev->ifindex == ifindex)
2684 return 0;
2685 ops = upper_dev->netdev_ops;
2686 if (ops->ndo_del_slave) {
2687 err = ops->ndo_del_slave(upper_dev, dev);
2688 if (err)
2689 return err;
2690 } else {
2691 return -EOPNOTSUPP;
2692 }
2693 }
2694
2695 if (ifindex) {
2696 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2697 if (!upper_dev)
2698 return -EINVAL;
2699 ops = upper_dev->netdev_ops;
2700 if (ops->ndo_add_slave) {
2701 err = ops->ndo_add_slave(upper_dev, dev, extack);
2702 if (err)
2703 return err;
2704 } else {
2705 return -EOPNOTSUPP;
2706 }
2707 }
2708 return 0;
2709 }
2710
2711 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2712 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2713 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2714 };
2715
do_set_proto_down(struct net_device * dev,struct nlattr * nl_proto_down,struct nlattr * nl_proto_down_reason,struct netlink_ext_ack * extack)2716 static int do_set_proto_down(struct net_device *dev,
2717 struct nlattr *nl_proto_down,
2718 struct nlattr *nl_proto_down_reason,
2719 struct netlink_ext_ack *extack)
2720 {
2721 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2722 unsigned long mask = 0;
2723 u32 value;
2724 bool proto_down;
2725 int err;
2726
2727 if (!dev->change_proto_down) {
2728 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2729 return -EOPNOTSUPP;
2730 }
2731
2732 if (nl_proto_down_reason) {
2733 err = nla_parse_nested_deprecated(pdreason,
2734 IFLA_PROTO_DOWN_REASON_MAX,
2735 nl_proto_down_reason,
2736 ifla_proto_down_reason_policy,
2737 NULL);
2738 if (err < 0)
2739 return err;
2740
2741 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2742 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2743 return -EINVAL;
2744 }
2745
2746 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2747
2748 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2749 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2750
2751 dev_change_proto_down_reason(dev, mask, value);
2752 }
2753
2754 if (nl_proto_down) {
2755 proto_down = nla_get_u8(nl_proto_down);
2756
2757 /* Don't turn off protodown if there are active reasons */
2758 if (!proto_down && dev->proto_down_reason) {
2759 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2760 return -EBUSY;
2761 }
2762 err = dev_change_proto_down(dev,
2763 proto_down);
2764 if (err)
2765 return err;
2766 }
2767
2768 return 0;
2769 }
2770
2771 #define DO_SETLINK_MODIFIED 0x01
2772 /* notify flag means notify + modified. */
2773 #define DO_SETLINK_NOTIFY 0x03
do_setlink(const struct sk_buff * skb,struct net_device * dev,struct ifinfomsg * ifm,struct netlink_ext_ack * extack,struct nlattr ** tb,int status)2774 static int do_setlink(const struct sk_buff *skb,
2775 struct net_device *dev, struct ifinfomsg *ifm,
2776 struct netlink_ext_ack *extack,
2777 struct nlattr **tb, int status)
2778 {
2779 const struct net_device_ops *ops = dev->netdev_ops;
2780 char ifname[IFNAMSIZ];
2781 int err;
2782
2783 if (tb[IFLA_IFNAME])
2784 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2785 else
2786 ifname[0] = '\0';
2787
2788 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2789 const char *pat = ifname[0] ? ifname : NULL;
2790 struct net *net;
2791 int new_ifindex;
2792
2793 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2794 tb, CAP_NET_ADMIN);
2795 if (IS_ERR(net)) {
2796 err = PTR_ERR(net);
2797 goto errout;
2798 }
2799
2800 if (tb[IFLA_NEW_IFINDEX])
2801 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2802 else
2803 new_ifindex = 0;
2804
2805 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2806 put_net(net);
2807 if (err)
2808 goto errout;
2809 status |= DO_SETLINK_MODIFIED;
2810 }
2811
2812 if (tb[IFLA_MAP]) {
2813 struct rtnl_link_ifmap *u_map;
2814 struct ifmap k_map;
2815
2816 if (!ops->ndo_set_config) {
2817 err = -EOPNOTSUPP;
2818 goto errout;
2819 }
2820
2821 if (!netif_device_present(dev)) {
2822 err = -ENODEV;
2823 goto errout;
2824 }
2825
2826 u_map = nla_data(tb[IFLA_MAP]);
2827 k_map.mem_start = (unsigned long) u_map->mem_start;
2828 k_map.mem_end = (unsigned long) u_map->mem_end;
2829 k_map.base_addr = (unsigned short) u_map->base_addr;
2830 k_map.irq = (unsigned char) u_map->irq;
2831 k_map.dma = (unsigned char) u_map->dma;
2832 k_map.port = (unsigned char) u_map->port;
2833
2834 err = ops->ndo_set_config(dev, &k_map);
2835 if (err < 0)
2836 goto errout;
2837
2838 status |= DO_SETLINK_NOTIFY;
2839 }
2840
2841 if (tb[IFLA_ADDRESS]) {
2842 struct sockaddr *sa;
2843 int len;
2844
2845 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2846 sizeof(*sa));
2847 sa = kmalloc(len, GFP_KERNEL);
2848 if (!sa) {
2849 err = -ENOMEM;
2850 goto errout;
2851 }
2852 sa->sa_family = dev->type;
2853 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2854 dev->addr_len);
2855 err = dev_set_mac_address_user(dev, sa, extack);
2856 kfree(sa);
2857 if (err)
2858 goto errout;
2859 status |= DO_SETLINK_MODIFIED;
2860 }
2861
2862 if (tb[IFLA_MTU]) {
2863 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2864 if (err < 0)
2865 goto errout;
2866 status |= DO_SETLINK_MODIFIED;
2867 }
2868
2869 if (tb[IFLA_GROUP]) {
2870 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2871 status |= DO_SETLINK_NOTIFY;
2872 }
2873
2874 /*
2875 * Interface selected by interface index but interface
2876 * name provided implies that a name change has been
2877 * requested.
2878 */
2879 if (ifm->ifi_index > 0 && ifname[0]) {
2880 err = dev_change_name(dev, ifname);
2881 if (err < 0)
2882 goto errout;
2883 status |= DO_SETLINK_MODIFIED;
2884 }
2885
2886 if (tb[IFLA_IFALIAS]) {
2887 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2888 nla_len(tb[IFLA_IFALIAS]));
2889 if (err < 0)
2890 goto errout;
2891 status |= DO_SETLINK_NOTIFY;
2892 }
2893
2894 if (tb[IFLA_BROADCAST]) {
2895 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2896 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2897 }
2898
2899 if (ifm->ifi_flags || ifm->ifi_change) {
2900 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2901 extack);
2902 if (err < 0)
2903 goto errout;
2904 }
2905
2906 if (tb[IFLA_MASTER]) {
2907 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2908 if (err)
2909 goto errout;
2910 status |= DO_SETLINK_MODIFIED;
2911 }
2912
2913 if (tb[IFLA_CARRIER]) {
2914 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2915 if (err)
2916 goto errout;
2917 status |= DO_SETLINK_MODIFIED;
2918 }
2919
2920 if (tb[IFLA_TXQLEN]) {
2921 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2922
2923 err = dev_change_tx_queue_len(dev, value);
2924 if (err)
2925 goto errout;
2926 status |= DO_SETLINK_MODIFIED;
2927 }
2928
2929 if (tb[IFLA_GSO_MAX_SIZE]) {
2930 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2931
2932 if (dev->gso_max_size ^ max_size) {
2933 netif_set_gso_max_size(dev, max_size);
2934 status |= DO_SETLINK_MODIFIED;
2935 }
2936 }
2937
2938 if (tb[IFLA_GSO_MAX_SEGS]) {
2939 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2940
2941 if (dev->gso_max_segs ^ max_segs) {
2942 netif_set_gso_max_segs(dev, max_segs);
2943 status |= DO_SETLINK_MODIFIED;
2944 }
2945 }
2946
2947 if (tb[IFLA_GRO_MAX_SIZE]) {
2948 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2949
2950 if (dev->gro_max_size ^ gro_max_size) {
2951 netif_set_gro_max_size(dev, gro_max_size);
2952 status |= DO_SETLINK_MODIFIED;
2953 }
2954 }
2955
2956 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
2957 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
2958
2959 if (dev->gso_ipv4_max_size ^ max_size) {
2960 netif_set_gso_ipv4_max_size(dev, max_size);
2961 status |= DO_SETLINK_MODIFIED;
2962 }
2963 }
2964
2965 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
2966 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
2967
2968 if (dev->gro_ipv4_max_size ^ gro_max_size) {
2969 netif_set_gro_ipv4_max_size(dev, gro_max_size);
2970 status |= DO_SETLINK_MODIFIED;
2971 }
2972 }
2973
2974 if (tb[IFLA_OPERSTATE])
2975 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2976
2977 if (tb[IFLA_LINKMODE]) {
2978 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2979
2980 if (dev->link_mode ^ value)
2981 status |= DO_SETLINK_NOTIFY;
2982 WRITE_ONCE(dev->link_mode, value);
2983 }
2984
2985 if (tb[IFLA_VFINFO_LIST]) {
2986 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2987 struct nlattr *attr;
2988 int rem;
2989
2990 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2991 if (nla_type(attr) != IFLA_VF_INFO ||
2992 nla_len(attr) < NLA_HDRLEN) {
2993 err = -EINVAL;
2994 goto errout;
2995 }
2996 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2997 attr,
2998 ifla_vf_policy,
2999 NULL);
3000 if (err < 0)
3001 goto errout;
3002 err = do_setvfinfo(dev, vfinfo);
3003 if (err < 0)
3004 goto errout;
3005 status |= DO_SETLINK_NOTIFY;
3006 }
3007 }
3008 err = 0;
3009
3010 if (tb[IFLA_VF_PORTS]) {
3011 struct nlattr *port[IFLA_PORT_MAX+1];
3012 struct nlattr *attr;
3013 int vf;
3014 int rem;
3015
3016 err = -EOPNOTSUPP;
3017 if (!ops->ndo_set_vf_port)
3018 goto errout;
3019
3020 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
3021 if (nla_type(attr) != IFLA_VF_PORT ||
3022 nla_len(attr) < NLA_HDRLEN) {
3023 err = -EINVAL;
3024 goto errout;
3025 }
3026 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3027 attr,
3028 ifla_port_policy,
3029 NULL);
3030 if (err < 0)
3031 goto errout;
3032 if (!port[IFLA_PORT_VF]) {
3033 err = -EOPNOTSUPP;
3034 goto errout;
3035 }
3036 vf = nla_get_u32(port[IFLA_PORT_VF]);
3037 err = ops->ndo_set_vf_port(dev, vf, port);
3038 if (err < 0)
3039 goto errout;
3040 status |= DO_SETLINK_NOTIFY;
3041 }
3042 }
3043 err = 0;
3044
3045 if (tb[IFLA_PORT_SELF]) {
3046 struct nlattr *port[IFLA_PORT_MAX+1];
3047
3048 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3049 tb[IFLA_PORT_SELF],
3050 ifla_port_policy, NULL);
3051 if (err < 0)
3052 goto errout;
3053
3054 err = -EOPNOTSUPP;
3055 if (ops->ndo_set_vf_port)
3056 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
3057 if (err < 0)
3058 goto errout;
3059 status |= DO_SETLINK_NOTIFY;
3060 }
3061
3062 if (tb[IFLA_AF_SPEC]) {
3063 struct nlattr *af;
3064 int rem;
3065
3066 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
3067 const struct rtnl_af_ops *af_ops;
3068
3069 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
3070
3071 err = af_ops->set_link_af(dev, af, extack);
3072 if (err < 0)
3073 goto errout;
3074
3075 status |= DO_SETLINK_NOTIFY;
3076 }
3077 }
3078 err = 0;
3079
3080 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
3081 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
3082 tb[IFLA_PROTO_DOWN_REASON], extack);
3083 if (err)
3084 goto errout;
3085 status |= DO_SETLINK_NOTIFY;
3086 }
3087
3088 if (tb[IFLA_XDP]) {
3089 struct nlattr *xdp[IFLA_XDP_MAX + 1];
3090 u32 xdp_flags = 0;
3091
3092 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3093 tb[IFLA_XDP],
3094 ifla_xdp_policy, NULL);
3095 if (err < 0)
3096 goto errout;
3097
3098 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
3099 err = -EINVAL;
3100 goto errout;
3101 }
3102
3103 if (xdp[IFLA_XDP_FLAGS]) {
3104 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3105 if (xdp_flags & ~XDP_FLAGS_MASK) {
3106 err = -EINVAL;
3107 goto errout;
3108 }
3109 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3110 err = -EINVAL;
3111 goto errout;
3112 }
3113 }
3114
3115 if (xdp[IFLA_XDP_FD]) {
3116 int expected_fd = -1;
3117
3118 if (xdp_flags & XDP_FLAGS_REPLACE) {
3119 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3120 err = -EINVAL;
3121 goto errout;
3122 }
3123 expected_fd =
3124 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3125 }
3126
3127 err = dev_change_xdp_fd(dev, extack,
3128 nla_get_s32(xdp[IFLA_XDP_FD]),
3129 expected_fd,
3130 xdp_flags);
3131 if (err)
3132 goto errout;
3133 status |= DO_SETLINK_NOTIFY;
3134 }
3135 }
3136
3137 errout:
3138 if (status & DO_SETLINK_MODIFIED) {
3139 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3140 netdev_state_change(dev);
3141
3142 if (err < 0)
3143 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3144 dev->name);
3145 }
3146
3147 return err;
3148 }
3149
rtnl_dev_get(struct net * net,struct nlattr * tb[])3150 static struct net_device *rtnl_dev_get(struct net *net,
3151 struct nlattr *tb[])
3152 {
3153 char ifname[ALTIFNAMSIZ];
3154
3155 if (tb[IFLA_IFNAME])
3156 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3157 else if (tb[IFLA_ALT_IFNAME])
3158 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3159 else
3160 return NULL;
3161
3162 return __dev_get_by_name(net, ifname);
3163 }
3164
rtnl_setlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3165 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3166 struct netlink_ext_ack *extack)
3167 {
3168 struct net *net = sock_net(skb->sk);
3169 struct ifinfomsg *ifm;
3170 struct net_device *dev;
3171 int err;
3172 struct nlattr *tb[IFLA_MAX+1];
3173
3174 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3175 ifla_policy, extack);
3176 if (err < 0)
3177 goto errout;
3178
3179 err = rtnl_ensure_unique_netns(tb, extack, false);
3180 if (err < 0)
3181 goto errout;
3182
3183 err = -EINVAL;
3184 ifm = nlmsg_data(nlh);
3185 if (ifm->ifi_index > 0)
3186 dev = __dev_get_by_index(net, ifm->ifi_index);
3187 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3188 dev = rtnl_dev_get(net, tb);
3189 else
3190 goto errout;
3191
3192 if (dev == NULL) {
3193 err = -ENODEV;
3194 goto errout;
3195 }
3196
3197 err = validate_linkmsg(dev, tb, extack);
3198 if (err < 0)
3199 goto errout;
3200
3201 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3202 errout:
3203 return err;
3204 }
3205
rtnl_group_dellink(const struct net * net,int group)3206 static int rtnl_group_dellink(const struct net *net, int group)
3207 {
3208 struct net_device *dev, *aux;
3209 LIST_HEAD(list_kill);
3210 bool found = false;
3211
3212 if (!group)
3213 return -EPERM;
3214
3215 for_each_netdev(net, dev) {
3216 if (dev->group == group) {
3217 const struct rtnl_link_ops *ops;
3218
3219 found = true;
3220 ops = dev->rtnl_link_ops;
3221 if (!ops || !ops->dellink)
3222 return -EOPNOTSUPP;
3223 }
3224 }
3225
3226 if (!found)
3227 return -ENODEV;
3228
3229 for_each_netdev_safe(net, dev, aux) {
3230 if (dev->group == group) {
3231 const struct rtnl_link_ops *ops;
3232
3233 ops = dev->rtnl_link_ops;
3234 ops->dellink(dev, &list_kill);
3235 }
3236 }
3237 unregister_netdevice_many(&list_kill);
3238
3239 return 0;
3240 }
3241
rtnl_delete_link(struct net_device * dev,u32 portid,const struct nlmsghdr * nlh)3242 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3243 {
3244 const struct rtnl_link_ops *ops;
3245 LIST_HEAD(list_kill);
3246
3247 ops = dev->rtnl_link_ops;
3248 if (!ops || !ops->dellink)
3249 return -EOPNOTSUPP;
3250
3251 ops->dellink(dev, &list_kill);
3252 unregister_netdevice_many_notify(&list_kill, portid, nlh);
3253
3254 return 0;
3255 }
3256 EXPORT_SYMBOL_GPL(rtnl_delete_link);
3257
rtnl_dellink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3258 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3259 struct netlink_ext_ack *extack)
3260 {
3261 struct net *net = sock_net(skb->sk);
3262 u32 portid = NETLINK_CB(skb).portid;
3263 struct net *tgt_net = net;
3264 struct net_device *dev = NULL;
3265 struct ifinfomsg *ifm;
3266 struct nlattr *tb[IFLA_MAX+1];
3267 int err;
3268 int netnsid = -1;
3269
3270 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3271 ifla_policy, extack);
3272 if (err < 0)
3273 return err;
3274
3275 err = rtnl_ensure_unique_netns(tb, extack, true);
3276 if (err < 0)
3277 return err;
3278
3279 if (tb[IFLA_TARGET_NETNSID]) {
3280 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3281 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3282 if (IS_ERR(tgt_net))
3283 return PTR_ERR(tgt_net);
3284 }
3285
3286 err = -EINVAL;
3287 ifm = nlmsg_data(nlh);
3288 if (ifm->ifi_index > 0)
3289 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3290 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3291 dev = rtnl_dev_get(tgt_net, tb);
3292 else if (tb[IFLA_GROUP])
3293 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3294 else
3295 goto out;
3296
3297 if (!dev) {
3298 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3299 err = -ENODEV;
3300
3301 goto out;
3302 }
3303
3304 err = rtnl_delete_link(dev, portid, nlh);
3305
3306 out:
3307 if (netnsid >= 0)
3308 put_net(tgt_net);
3309
3310 return err;
3311 }
3312
rtnl_configure_link(struct net_device * dev,const struct ifinfomsg * ifm,u32 portid,const struct nlmsghdr * nlh)3313 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3314 u32 portid, const struct nlmsghdr *nlh)
3315 {
3316 unsigned int old_flags;
3317 int err;
3318
3319 old_flags = dev->flags;
3320 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3321 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3322 NULL);
3323 if (err < 0)
3324 return err;
3325 }
3326
3327 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3328 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3329 } else {
3330 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3331 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3332 }
3333 return 0;
3334 }
3335 EXPORT_SYMBOL(rtnl_configure_link);
3336
rtnl_create_link(struct net * net,const char * ifname,unsigned char name_assign_type,const struct rtnl_link_ops * ops,struct nlattr * tb[],struct netlink_ext_ack * extack)3337 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3338 unsigned char name_assign_type,
3339 const struct rtnl_link_ops *ops,
3340 struct nlattr *tb[],
3341 struct netlink_ext_ack *extack)
3342 {
3343 struct net_device *dev;
3344 unsigned int num_tx_queues = 1;
3345 unsigned int num_rx_queues = 1;
3346 int err;
3347
3348 if (tb[IFLA_NUM_TX_QUEUES])
3349 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3350 else if (ops->get_num_tx_queues)
3351 num_tx_queues = ops->get_num_tx_queues();
3352
3353 if (tb[IFLA_NUM_RX_QUEUES])
3354 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3355 else if (ops->get_num_rx_queues)
3356 num_rx_queues = ops->get_num_rx_queues();
3357
3358 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3359 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3360 return ERR_PTR(-EINVAL);
3361 }
3362
3363 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3364 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3365 return ERR_PTR(-EINVAL);
3366 }
3367
3368 if (ops->alloc) {
3369 dev = ops->alloc(tb, ifname, name_assign_type,
3370 num_tx_queues, num_rx_queues);
3371 if (IS_ERR(dev))
3372 return dev;
3373 } else {
3374 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3375 name_assign_type, ops->setup,
3376 num_tx_queues, num_rx_queues);
3377 }
3378
3379 if (!dev)
3380 return ERR_PTR(-ENOMEM);
3381
3382 err = validate_linkmsg(dev, tb, extack);
3383 if (err < 0) {
3384 free_netdev(dev);
3385 return ERR_PTR(err);
3386 }
3387
3388 dev_net_set(dev, net);
3389 dev->rtnl_link_ops = ops;
3390 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3391
3392 if (tb[IFLA_MTU]) {
3393 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3394
3395 err = dev_validate_mtu(dev, mtu, extack);
3396 if (err) {
3397 free_netdev(dev);
3398 return ERR_PTR(err);
3399 }
3400 dev->mtu = mtu;
3401 }
3402 if (tb[IFLA_ADDRESS]) {
3403 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3404 nla_len(tb[IFLA_ADDRESS]));
3405 dev->addr_assign_type = NET_ADDR_SET;
3406 }
3407 if (tb[IFLA_BROADCAST])
3408 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3409 nla_len(tb[IFLA_BROADCAST]));
3410 if (tb[IFLA_TXQLEN])
3411 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3412 if (tb[IFLA_OPERSTATE])
3413 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3414 if (tb[IFLA_LINKMODE])
3415 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3416 if (tb[IFLA_GROUP])
3417 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3418 if (tb[IFLA_GSO_MAX_SIZE])
3419 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3420 if (tb[IFLA_GSO_MAX_SEGS])
3421 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3422 if (tb[IFLA_GRO_MAX_SIZE])
3423 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3424 if (tb[IFLA_GSO_IPV4_MAX_SIZE])
3425 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
3426 if (tb[IFLA_GRO_IPV4_MAX_SIZE])
3427 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
3428
3429 return dev;
3430 }
3431 EXPORT_SYMBOL(rtnl_create_link);
3432
rtnl_group_changelink(const struct sk_buff * skb,struct net * net,int group,struct ifinfomsg * ifm,struct netlink_ext_ack * extack,struct nlattr ** tb)3433 static int rtnl_group_changelink(const struct sk_buff *skb,
3434 struct net *net, int group,
3435 struct ifinfomsg *ifm,
3436 struct netlink_ext_ack *extack,
3437 struct nlattr **tb)
3438 {
3439 struct net_device *dev, *aux;
3440 int err;
3441
3442 for_each_netdev_safe(net, dev, aux) {
3443 if (dev->group == group) {
3444 err = validate_linkmsg(dev, tb, extack);
3445 if (err < 0)
3446 return err;
3447 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3448 if (err < 0)
3449 return err;
3450 }
3451 }
3452
3453 return 0;
3454 }
3455
rtnl_newlink_create(struct sk_buff * skb,struct ifinfomsg * ifm,const struct rtnl_link_ops * ops,const struct nlmsghdr * nlh,struct nlattr ** tb,struct nlattr ** data,struct netlink_ext_ack * extack)3456 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3457 const struct rtnl_link_ops *ops,
3458 const struct nlmsghdr *nlh,
3459 struct nlattr **tb, struct nlattr **data,
3460 struct netlink_ext_ack *extack)
3461 {
3462 unsigned char name_assign_type = NET_NAME_USER;
3463 struct net *net = sock_net(skb->sk);
3464 u32 portid = NETLINK_CB(skb).portid;
3465 struct net *dest_net, *link_net;
3466 struct net_device *dev;
3467 char ifname[IFNAMSIZ];
3468 int err;
3469
3470 if (!ops->alloc && !ops->setup)
3471 return -EOPNOTSUPP;
3472
3473 if (tb[IFLA_IFNAME]) {
3474 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3475 } else {
3476 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3477 name_assign_type = NET_NAME_ENUM;
3478 }
3479
3480 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3481 if (IS_ERR(dest_net))
3482 return PTR_ERR(dest_net);
3483
3484 if (tb[IFLA_LINK_NETNSID]) {
3485 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3486
3487 link_net = get_net_ns_by_id(dest_net, id);
3488 if (!link_net) {
3489 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3490 err = -EINVAL;
3491 goto out;
3492 }
3493 err = -EPERM;
3494 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3495 goto out;
3496 } else {
3497 link_net = NULL;
3498 }
3499
3500 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3501 name_assign_type, ops, tb, extack);
3502 if (IS_ERR(dev)) {
3503 err = PTR_ERR(dev);
3504 goto out;
3505 }
3506
3507 dev->ifindex = ifm->ifi_index;
3508
3509 if (ops->newlink)
3510 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3511 else
3512 err = register_netdevice(dev);
3513 if (err < 0) {
3514 free_netdev(dev);
3515 goto out;
3516 }
3517
3518 err = rtnl_configure_link(dev, ifm, portid, nlh);
3519 if (err < 0)
3520 goto out_unregister;
3521 if (link_net) {
3522 err = dev_change_net_namespace(dev, dest_net, ifname);
3523 if (err < 0)
3524 goto out_unregister;
3525 }
3526 if (tb[IFLA_MASTER]) {
3527 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3528 if (err)
3529 goto out_unregister;
3530 }
3531 out:
3532 if (link_net)
3533 put_net(link_net);
3534 put_net(dest_net);
3535 return err;
3536 out_unregister:
3537 if (ops->newlink) {
3538 LIST_HEAD(list_kill);
3539
3540 ops->dellink(dev, &list_kill);
3541 unregister_netdevice_many(&list_kill);
3542 } else {
3543 unregister_netdevice(dev);
3544 }
3545 goto out;
3546 }
3547
3548 struct rtnl_newlink_tbs {
3549 struct nlattr *tb[IFLA_MAX + 1];
3550 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3551 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3552 };
3553
__rtnl_newlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct rtnl_newlink_tbs * tbs,struct netlink_ext_ack * extack)3554 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3555 struct rtnl_newlink_tbs *tbs,
3556 struct netlink_ext_ack *extack)
3557 {
3558 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3559 struct nlattr ** const tb = tbs->tb;
3560 const struct rtnl_link_ops *m_ops;
3561 struct net_device *master_dev;
3562 struct net *net = sock_net(skb->sk);
3563 const struct rtnl_link_ops *ops;
3564 struct nlattr **slave_data;
3565 char kind[MODULE_NAME_LEN];
3566 struct net_device *dev;
3567 struct ifinfomsg *ifm;
3568 struct nlattr **data;
3569 bool link_specified;
3570 int err;
3571
3572 #ifdef CONFIG_MODULES
3573 replay:
3574 #endif
3575 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3576 ifla_policy, extack);
3577 if (err < 0)
3578 return err;
3579
3580 err = rtnl_ensure_unique_netns(tb, extack, false);
3581 if (err < 0)
3582 return err;
3583
3584 ifm = nlmsg_data(nlh);
3585 if (ifm->ifi_index > 0) {
3586 link_specified = true;
3587 dev = __dev_get_by_index(net, ifm->ifi_index);
3588 } else if (ifm->ifi_index < 0) {
3589 NL_SET_ERR_MSG(extack, "ifindex can't be negative");
3590 return -EINVAL;
3591 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3592 link_specified = true;
3593 dev = rtnl_dev_get(net, tb);
3594 } else {
3595 link_specified = false;
3596 dev = NULL;
3597 }
3598
3599 master_dev = NULL;
3600 m_ops = NULL;
3601 if (dev) {
3602 master_dev = netdev_master_upper_dev_get(dev);
3603 if (master_dev)
3604 m_ops = master_dev->rtnl_link_ops;
3605 }
3606
3607 if (tb[IFLA_LINKINFO]) {
3608 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3609 tb[IFLA_LINKINFO],
3610 ifla_info_policy, NULL);
3611 if (err < 0)
3612 return err;
3613 } else
3614 memset(linkinfo, 0, sizeof(linkinfo));
3615
3616 if (linkinfo[IFLA_INFO_KIND]) {
3617 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3618 ops = rtnl_link_ops_get(kind);
3619 } else {
3620 kind[0] = '\0';
3621 ops = NULL;
3622 }
3623
3624 data = NULL;
3625 if (ops) {
3626 if (ops->maxtype > RTNL_MAX_TYPE)
3627 return -EINVAL;
3628
3629 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3630 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3631 linkinfo[IFLA_INFO_DATA],
3632 ops->policy, extack);
3633 if (err < 0)
3634 return err;
3635 data = tbs->attr;
3636 }
3637 if (ops->validate) {
3638 err = ops->validate(tb, data, extack);
3639 if (err < 0)
3640 return err;
3641 }
3642 }
3643
3644 slave_data = NULL;
3645 if (m_ops) {
3646 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3647 return -EINVAL;
3648
3649 if (m_ops->slave_maxtype &&
3650 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3651 err = nla_parse_nested_deprecated(tbs->slave_attr,
3652 m_ops->slave_maxtype,
3653 linkinfo[IFLA_INFO_SLAVE_DATA],
3654 m_ops->slave_policy,
3655 extack);
3656 if (err < 0)
3657 return err;
3658 slave_data = tbs->slave_attr;
3659 }
3660 }
3661
3662 if (dev) {
3663 int status = 0;
3664
3665 if (nlh->nlmsg_flags & NLM_F_EXCL)
3666 return -EEXIST;
3667 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3668 return -EOPNOTSUPP;
3669
3670 err = validate_linkmsg(dev, tb, extack);
3671 if (err < 0)
3672 return err;
3673
3674 if (linkinfo[IFLA_INFO_DATA]) {
3675 if (!ops || ops != dev->rtnl_link_ops ||
3676 !ops->changelink)
3677 return -EOPNOTSUPP;
3678
3679 err = ops->changelink(dev, tb, data, extack);
3680 if (err < 0)
3681 return err;
3682 status |= DO_SETLINK_NOTIFY;
3683 }
3684
3685 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3686 if (!m_ops || !m_ops->slave_changelink)
3687 return -EOPNOTSUPP;
3688
3689 err = m_ops->slave_changelink(master_dev, dev, tb,
3690 slave_data, extack);
3691 if (err < 0)
3692 return err;
3693 status |= DO_SETLINK_NOTIFY;
3694 }
3695
3696 return do_setlink(skb, dev, ifm, extack, tb, status);
3697 }
3698
3699 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3700 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3701 * or it's for a group
3702 */
3703 if (link_specified)
3704 return -ENODEV;
3705 if (tb[IFLA_GROUP])
3706 return rtnl_group_changelink(skb, net,
3707 nla_get_u32(tb[IFLA_GROUP]),
3708 ifm, extack, tb);
3709 return -ENODEV;
3710 }
3711
3712 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3713 return -EOPNOTSUPP;
3714
3715 if (!ops) {
3716 #ifdef CONFIG_MODULES
3717 if (kind[0]) {
3718 __rtnl_unlock();
3719 request_module("rtnl-link-%s", kind);
3720 rtnl_lock();
3721 ops = rtnl_link_ops_get(kind);
3722 if (ops)
3723 goto replay;
3724 }
3725 #endif
3726 NL_SET_ERR_MSG(extack, "Unknown device type");
3727 return -EOPNOTSUPP;
3728 }
3729
3730 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
3731 }
3732
rtnl_newlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3733 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3734 struct netlink_ext_ack *extack)
3735 {
3736 struct rtnl_newlink_tbs *tbs;
3737 int ret;
3738
3739 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3740 if (!tbs)
3741 return -ENOMEM;
3742
3743 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3744 kfree(tbs);
3745 return ret;
3746 }
3747
rtnl_valid_getlink_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)3748 static int rtnl_valid_getlink_req(struct sk_buff *skb,
3749 const struct nlmsghdr *nlh,
3750 struct nlattr **tb,
3751 struct netlink_ext_ack *extack)
3752 {
3753 struct ifinfomsg *ifm;
3754 int i, err;
3755
3756 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3757 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3758 return -EINVAL;
3759 }
3760
3761 if (!netlink_strict_get_check(skb))
3762 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3763 ifla_policy, extack);
3764
3765 ifm = nlmsg_data(nlh);
3766 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3767 ifm->ifi_change) {
3768 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3769 return -EINVAL;
3770 }
3771
3772 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3773 ifla_policy, extack);
3774 if (err)
3775 return err;
3776
3777 for (i = 0; i <= IFLA_MAX; i++) {
3778 if (!tb[i])
3779 continue;
3780
3781 switch (i) {
3782 case IFLA_IFNAME:
3783 case IFLA_ALT_IFNAME:
3784 case IFLA_EXT_MASK:
3785 case IFLA_TARGET_NETNSID:
3786 break;
3787 default:
3788 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3789 return -EINVAL;
3790 }
3791 }
3792
3793 return 0;
3794 }
3795
rtnl_getlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3796 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3797 struct netlink_ext_ack *extack)
3798 {
3799 struct net *net = sock_net(skb->sk);
3800 struct net *tgt_net = net;
3801 struct ifinfomsg *ifm;
3802 struct nlattr *tb[IFLA_MAX+1];
3803 struct net_device *dev = NULL;
3804 struct sk_buff *nskb;
3805 int netnsid = -1;
3806 int err;
3807 u32 ext_filter_mask = 0;
3808
3809 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3810 if (err < 0)
3811 return err;
3812
3813 err = rtnl_ensure_unique_netns(tb, extack, true);
3814 if (err < 0)
3815 return err;
3816
3817 if (tb[IFLA_TARGET_NETNSID]) {
3818 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3819 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3820 if (IS_ERR(tgt_net))
3821 return PTR_ERR(tgt_net);
3822 }
3823
3824 if (tb[IFLA_EXT_MASK])
3825 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3826
3827 err = -EINVAL;
3828 ifm = nlmsg_data(nlh);
3829 if (ifm->ifi_index > 0)
3830 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3831 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3832 dev = rtnl_dev_get(tgt_net, tb);
3833 else
3834 goto out;
3835
3836 err = -ENODEV;
3837 if (dev == NULL)
3838 goto out;
3839
3840 err = -ENOBUFS;
3841 nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask));
3842 if (nskb == NULL)
3843 goto out;
3844
3845 /* Synchronize the carrier state so we don't report a state
3846 * that we're not actually going to honour immediately; if
3847 * the driver just did a carrier off->on transition, we can
3848 * only TX if link watch work has run, but without this we'd
3849 * already report carrier on, even if it doesn't work yet.
3850 */
3851 linkwatch_sync_dev(dev);
3852
3853 err = rtnl_fill_ifinfo(nskb, dev, net,
3854 RTM_NEWLINK, NETLINK_CB(skb).portid,
3855 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3856 0, NULL, 0, netnsid, GFP_KERNEL);
3857 if (err < 0) {
3858 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3859 WARN_ON(err == -EMSGSIZE);
3860 kfree_skb(nskb);
3861 } else
3862 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3863 out:
3864 if (netnsid >= 0)
3865 put_net(tgt_net);
3866
3867 return err;
3868 }
3869
rtnl_alt_ifname(int cmd,struct net_device * dev,struct nlattr * attr,bool * changed,struct netlink_ext_ack * extack)3870 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3871 bool *changed, struct netlink_ext_ack *extack)
3872 {
3873 char *alt_ifname;
3874 size_t size;
3875 int err;
3876
3877 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3878 if (err)
3879 return err;
3880
3881 if (cmd == RTM_NEWLINKPROP) {
3882 size = rtnl_prop_list_size(dev);
3883 size += nla_total_size(ALTIFNAMSIZ);
3884 if (size >= U16_MAX) {
3885 NL_SET_ERR_MSG(extack,
3886 "effective property list too long");
3887 return -EINVAL;
3888 }
3889 }
3890
3891 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3892 if (!alt_ifname)
3893 return -ENOMEM;
3894
3895 if (cmd == RTM_NEWLINKPROP) {
3896 err = netdev_name_node_alt_create(dev, alt_ifname);
3897 if (!err)
3898 alt_ifname = NULL;
3899 } else if (cmd == RTM_DELLINKPROP) {
3900 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3901 } else {
3902 WARN_ON_ONCE(1);
3903 err = -EINVAL;
3904 }
3905
3906 kfree(alt_ifname);
3907 if (!err)
3908 *changed = true;
3909 return err;
3910 }
3911
rtnl_linkprop(int cmd,struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3912 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3913 struct netlink_ext_ack *extack)
3914 {
3915 struct net *net = sock_net(skb->sk);
3916 struct nlattr *tb[IFLA_MAX + 1];
3917 struct net_device *dev;
3918 struct ifinfomsg *ifm;
3919 bool changed = false;
3920 struct nlattr *attr;
3921 int err, rem;
3922
3923 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3924 if (err)
3925 return err;
3926
3927 err = rtnl_ensure_unique_netns(tb, extack, true);
3928 if (err)
3929 return err;
3930
3931 ifm = nlmsg_data(nlh);
3932 if (ifm->ifi_index > 0)
3933 dev = __dev_get_by_index(net, ifm->ifi_index);
3934 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3935 dev = rtnl_dev_get(net, tb);
3936 else
3937 return -EINVAL;
3938
3939 if (!dev)
3940 return -ENODEV;
3941
3942 if (!tb[IFLA_PROP_LIST])
3943 return 0;
3944
3945 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3946 switch (nla_type(attr)) {
3947 case IFLA_ALT_IFNAME:
3948 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3949 if (err)
3950 return err;
3951 break;
3952 }
3953 }
3954
3955 if (changed)
3956 netdev_state_change(dev);
3957 return 0;
3958 }
3959
rtnl_newlinkprop(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3960 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3961 struct netlink_ext_ack *extack)
3962 {
3963 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3964 }
3965
rtnl_dellinkprop(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3966 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3967 struct netlink_ext_ack *extack)
3968 {
3969 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3970 }
3971
rtnl_calcit(struct sk_buff * skb,struct nlmsghdr * nlh)3972 static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb,
3973 struct nlmsghdr *nlh)
3974 {
3975 struct net *net = sock_net(skb->sk);
3976 size_t min_ifinfo_dump_size = 0;
3977 u32 ext_filter_mask = 0;
3978 struct net_device *dev;
3979 struct nlattr *nla;
3980 int hdrlen, rem;
3981
3982 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3983 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3984 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3985
3986 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
3987 return NLMSG_GOODSIZE;
3988
3989 nla_for_each_attr_type(nla, IFLA_EXT_MASK,
3990 nlmsg_attrdata(nlh, hdrlen),
3991 nlmsg_attrlen(nlh, hdrlen), rem) {
3992 if (nla_len(nla) == sizeof(u32))
3993 ext_filter_mask = nla_get_u32(nla);
3994 }
3995
3996 if (!ext_filter_mask)
3997 return NLMSG_GOODSIZE;
3998 /*
3999 * traverse the list of net devices and compute the minimum
4000 * buffer size based upon the filter mask.
4001 */
4002 rcu_read_lock();
4003 for_each_netdev_rcu(net, dev) {
4004 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
4005 if_nlmsg_size(dev, ext_filter_mask));
4006 }
4007 rcu_read_unlock();
4008
4009 return nlmsg_total_size(min_ifinfo_dump_size);
4010 }
4011
rtnl_dump_all(struct sk_buff * skb,struct netlink_callback * cb)4012 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
4013 {
4014 int idx;
4015 int s_idx = cb->family;
4016 int type = cb->nlh->nlmsg_type - RTM_BASE;
4017 int ret = 0;
4018
4019 if (s_idx == 0)
4020 s_idx = 1;
4021
4022 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
4023 struct rtnl_link __rcu **tab;
4024 struct rtnl_link *link;
4025 rtnl_dumpit_func dumpit;
4026
4027 if (idx < s_idx || idx == PF_PACKET)
4028 continue;
4029
4030 if (type < 0 || type >= RTM_NR_MSGTYPES)
4031 continue;
4032
4033 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
4034 if (!tab)
4035 continue;
4036
4037 link = rcu_dereference_rtnl(tab[type]);
4038 if (!link)
4039 continue;
4040
4041 dumpit = link->dumpit;
4042 if (!dumpit)
4043 continue;
4044
4045 if (idx > s_idx) {
4046 memset(&cb->args[0], 0, sizeof(cb->args));
4047 cb->prev_seq = 0;
4048 cb->seq = 0;
4049 }
4050 ret = dumpit(skb, cb);
4051 if (ret)
4052 break;
4053 }
4054 cb->family = idx;
4055
4056 return skb->len ? : ret;
4057 }
4058
rtmsg_ifinfo_build_skb(int type,struct net_device * dev,unsigned int change,u32 event,gfp_t flags,int * new_nsid,int new_ifindex,u32 portid,const struct nlmsghdr * nlh)4059 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
4060 unsigned int change,
4061 u32 event, gfp_t flags, int *new_nsid,
4062 int new_ifindex, u32 portid,
4063 const struct nlmsghdr *nlh)
4064 {
4065 struct net *net = dev_net(dev);
4066 struct sk_buff *skb;
4067 int err = -ENOBUFS;
4068 u32 seq = 0;
4069
4070 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
4071 if (skb == NULL)
4072 goto errout;
4073
4074 if (nlmsg_report(nlh))
4075 seq = nlmsg_seq(nlh);
4076 else
4077 portid = 0;
4078
4079 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
4080 type, portid, seq, change, 0, 0, event,
4081 new_nsid, new_ifindex, -1, flags);
4082 if (err < 0) {
4083 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
4084 WARN_ON(err == -EMSGSIZE);
4085 kfree_skb(skb);
4086 goto errout;
4087 }
4088 return skb;
4089 errout:
4090 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4091 return NULL;
4092 }
4093
rtmsg_ifinfo_send(struct sk_buff * skb,struct net_device * dev,gfp_t flags,u32 portid,const struct nlmsghdr * nlh)4094 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
4095 u32 portid, const struct nlmsghdr *nlh)
4096 {
4097 struct net *net = dev_net(dev);
4098
4099 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
4100 }
4101
rtmsg_ifinfo_event(int type,struct net_device * dev,unsigned int change,u32 event,gfp_t flags,int * new_nsid,int new_ifindex,u32 portid,const struct nlmsghdr * nlh)4102 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
4103 unsigned int change, u32 event,
4104 gfp_t flags, int *new_nsid, int new_ifindex,
4105 u32 portid, const struct nlmsghdr *nlh)
4106 {
4107 struct sk_buff *skb;
4108
4109 if (dev->reg_state != NETREG_REGISTERED)
4110 return;
4111
4112 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
4113 new_ifindex, portid, nlh);
4114 if (skb)
4115 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
4116 }
4117
rtmsg_ifinfo(int type,struct net_device * dev,unsigned int change,gfp_t flags,u32 portid,const struct nlmsghdr * nlh)4118 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
4119 gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
4120 {
4121 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4122 NULL, 0, portid, nlh);
4123 }
4124
rtmsg_ifinfo_newnet(int type,struct net_device * dev,unsigned int change,gfp_t flags,int * new_nsid,int new_ifindex)4125 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4126 gfp_t flags, int *new_nsid, int new_ifindex)
4127 {
4128 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4129 new_nsid, new_ifindex, 0, NULL);
4130 }
4131
nlmsg_populate_fdb_fill(struct sk_buff * skb,struct net_device * dev,u8 * addr,u16 vid,u32 pid,u32 seq,int type,unsigned int flags,int nlflags,u16 ndm_state)4132 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4133 struct net_device *dev,
4134 u8 *addr, u16 vid, u32 pid, u32 seq,
4135 int type, unsigned int flags,
4136 int nlflags, u16 ndm_state)
4137 {
4138 struct nlmsghdr *nlh;
4139 struct ndmsg *ndm;
4140
4141 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4142 if (!nlh)
4143 return -EMSGSIZE;
4144
4145 ndm = nlmsg_data(nlh);
4146 ndm->ndm_family = AF_BRIDGE;
4147 ndm->ndm_pad1 = 0;
4148 ndm->ndm_pad2 = 0;
4149 ndm->ndm_flags = flags;
4150 ndm->ndm_type = 0;
4151 ndm->ndm_ifindex = dev->ifindex;
4152 ndm->ndm_state = ndm_state;
4153
4154 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
4155 goto nla_put_failure;
4156 if (vid)
4157 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4158 goto nla_put_failure;
4159
4160 nlmsg_end(skb, nlh);
4161 return 0;
4162
4163 nla_put_failure:
4164 nlmsg_cancel(skb, nlh);
4165 return -EMSGSIZE;
4166 }
4167
rtnl_fdb_nlmsg_size(const struct net_device * dev)4168 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
4169 {
4170 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4171 nla_total_size(dev->addr_len) + /* NDA_LLADDR */
4172 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4173 0;
4174 }
4175
rtnl_fdb_notify(struct net_device * dev,u8 * addr,u16 vid,int type,u16 ndm_state)4176 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4177 u16 ndm_state)
4178 {
4179 struct net *net = dev_net(dev);
4180 struct sk_buff *skb;
4181 int err = -ENOBUFS;
4182
4183 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
4184 if (!skb)
4185 goto errout;
4186
4187 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4188 0, 0, type, NTF_SELF, 0, ndm_state);
4189 if (err < 0) {
4190 kfree_skb(skb);
4191 goto errout;
4192 }
4193
4194 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4195 return;
4196 errout:
4197 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4198 }
4199
4200 /*
4201 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4202 */
ndo_dflt_fdb_add(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags)4203 int ndo_dflt_fdb_add(struct ndmsg *ndm,
4204 struct nlattr *tb[],
4205 struct net_device *dev,
4206 const unsigned char *addr, u16 vid,
4207 u16 flags)
4208 {
4209 int err = -EINVAL;
4210
4211 /* If aging addresses are supported device will need to
4212 * implement its own handler for this.
4213 */
4214 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4215 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4216 return err;
4217 }
4218
4219 if (tb[NDA_FLAGS_EXT]) {
4220 netdev_info(dev, "invalid flags given to default FDB implementation\n");
4221 return err;
4222 }
4223
4224 if (vid) {
4225 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4226 return err;
4227 }
4228
4229 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4230 err = dev_uc_add_excl(dev, addr);
4231 else if (is_multicast_ether_addr(addr))
4232 err = dev_mc_add_excl(dev, addr);
4233
4234 /* Only return duplicate errors if NLM_F_EXCL is set */
4235 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4236 err = 0;
4237
4238 return err;
4239 }
4240 EXPORT_SYMBOL(ndo_dflt_fdb_add);
4241
fdb_vid_parse(struct nlattr * vlan_attr,u16 * p_vid,struct netlink_ext_ack * extack)4242 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4243 struct netlink_ext_ack *extack)
4244 {
4245 u16 vid = 0;
4246
4247 if (vlan_attr) {
4248 if (nla_len(vlan_attr) != sizeof(u16)) {
4249 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4250 return -EINVAL;
4251 }
4252
4253 vid = nla_get_u16(vlan_attr);
4254
4255 if (!vid || vid >= VLAN_VID_MASK) {
4256 NL_SET_ERR_MSG(extack, "invalid vlan id");
4257 return -EINVAL;
4258 }
4259 }
4260 *p_vid = vid;
4261 return 0;
4262 }
4263
rtnl_fdb_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4264 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4265 struct netlink_ext_ack *extack)
4266 {
4267 struct net *net = sock_net(skb->sk);
4268 struct ndmsg *ndm;
4269 struct nlattr *tb[NDA_MAX+1];
4270 struct net_device *dev;
4271 u8 *addr;
4272 u16 vid;
4273 int err;
4274
4275 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4276 extack);
4277 if (err < 0)
4278 return err;
4279
4280 ndm = nlmsg_data(nlh);
4281 if (ndm->ndm_ifindex == 0) {
4282 NL_SET_ERR_MSG(extack, "invalid ifindex");
4283 return -EINVAL;
4284 }
4285
4286 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4287 if (dev == NULL) {
4288 NL_SET_ERR_MSG(extack, "unknown ifindex");
4289 return -ENODEV;
4290 }
4291
4292 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4293 NL_SET_ERR_MSG(extack, "invalid address");
4294 return -EINVAL;
4295 }
4296
4297 if (dev->type != ARPHRD_ETHER) {
4298 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4299 return -EINVAL;
4300 }
4301
4302 addr = nla_data(tb[NDA_LLADDR]);
4303
4304 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4305 if (err)
4306 return err;
4307
4308 err = -EOPNOTSUPP;
4309
4310 /* Support fdb on master device the net/bridge default case */
4311 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4312 netif_is_bridge_port(dev)) {
4313 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4314 const struct net_device_ops *ops = br_dev->netdev_ops;
4315
4316 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4317 nlh->nlmsg_flags, extack);
4318 if (err)
4319 goto out;
4320 else
4321 ndm->ndm_flags &= ~NTF_MASTER;
4322 }
4323
4324 /* Embedded bridge, macvlan, and any other device support */
4325 if ((ndm->ndm_flags & NTF_SELF)) {
4326 if (dev->netdev_ops->ndo_fdb_add)
4327 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4328 vid,
4329 nlh->nlmsg_flags,
4330 extack);
4331 else
4332 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4333 nlh->nlmsg_flags);
4334
4335 if (!err) {
4336 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4337 ndm->ndm_state);
4338 ndm->ndm_flags &= ~NTF_SELF;
4339 }
4340 }
4341 out:
4342 return err;
4343 }
4344
4345 /*
4346 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4347 */
ndo_dflt_fdb_del(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid)4348 int ndo_dflt_fdb_del(struct ndmsg *ndm,
4349 struct nlattr *tb[],
4350 struct net_device *dev,
4351 const unsigned char *addr, u16 vid)
4352 {
4353 int err = -EINVAL;
4354
4355 /* If aging addresses are supported device will need to
4356 * implement its own handler for this.
4357 */
4358 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4359 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4360 return err;
4361 }
4362
4363 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4364 err = dev_uc_del(dev, addr);
4365 else if (is_multicast_ether_addr(addr))
4366 err = dev_mc_del(dev, addr);
4367
4368 return err;
4369 }
4370 EXPORT_SYMBOL(ndo_dflt_fdb_del);
4371
rtnl_fdb_del(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4372 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4373 struct netlink_ext_ack *extack)
4374 {
4375 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4376 struct net *net = sock_net(skb->sk);
4377 const struct net_device_ops *ops;
4378 struct ndmsg *ndm;
4379 struct nlattr *tb[NDA_MAX+1];
4380 struct net_device *dev;
4381 __u8 *addr = NULL;
4382 int err;
4383 u16 vid;
4384
4385 if (!netlink_capable(skb, CAP_NET_ADMIN))
4386 return -EPERM;
4387
4388 if (!del_bulk) {
4389 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4390 NULL, extack);
4391 } else {
4392 /* For bulk delete, the drivers will parse the message with
4393 * policy.
4394 */
4395 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
4396 }
4397 if (err < 0)
4398 return err;
4399
4400 ndm = nlmsg_data(nlh);
4401 if (ndm->ndm_ifindex == 0) {
4402 NL_SET_ERR_MSG(extack, "invalid ifindex");
4403 return -EINVAL;
4404 }
4405
4406 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4407 if (dev == NULL) {
4408 NL_SET_ERR_MSG(extack, "unknown ifindex");
4409 return -ENODEV;
4410 }
4411
4412 if (!del_bulk) {
4413 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4414 NL_SET_ERR_MSG(extack, "invalid address");
4415 return -EINVAL;
4416 }
4417 addr = nla_data(tb[NDA_LLADDR]);
4418
4419 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4420 if (err)
4421 return err;
4422 }
4423
4424 if (dev->type != ARPHRD_ETHER) {
4425 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4426 return -EINVAL;
4427 }
4428
4429 err = -EOPNOTSUPP;
4430
4431 /* Support fdb on master device the net/bridge default case */
4432 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4433 netif_is_bridge_port(dev)) {
4434 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4435
4436 ops = br_dev->netdev_ops;
4437 if (!del_bulk) {
4438 if (ops->ndo_fdb_del)
4439 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4440 } else {
4441 if (ops->ndo_fdb_del_bulk)
4442 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
4443 }
4444
4445 if (err)
4446 goto out;
4447 else
4448 ndm->ndm_flags &= ~NTF_MASTER;
4449 }
4450
4451 /* Embedded bridge, macvlan, and any other device support */
4452 if (ndm->ndm_flags & NTF_SELF) {
4453 ops = dev->netdev_ops;
4454 if (!del_bulk) {
4455 if (ops->ndo_fdb_del)
4456 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4457 else
4458 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4459 } else {
4460 /* in case err was cleared by NTF_MASTER call */
4461 err = -EOPNOTSUPP;
4462 if (ops->ndo_fdb_del_bulk)
4463 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
4464 }
4465
4466 if (!err) {
4467 if (!del_bulk)
4468 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4469 ndm->ndm_state);
4470 ndm->ndm_flags &= ~NTF_SELF;
4471 }
4472 }
4473 out:
4474 return err;
4475 }
4476
nlmsg_populate_fdb(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,int * idx,struct netdev_hw_addr_list * list)4477 static int nlmsg_populate_fdb(struct sk_buff *skb,
4478 struct netlink_callback *cb,
4479 struct net_device *dev,
4480 int *idx,
4481 struct netdev_hw_addr_list *list)
4482 {
4483 struct netdev_hw_addr *ha;
4484 int err;
4485 u32 portid, seq;
4486
4487 portid = NETLINK_CB(cb->skb).portid;
4488 seq = cb->nlh->nlmsg_seq;
4489
4490 list_for_each_entry(ha, &list->list, list) {
4491 if (*idx < cb->args[2])
4492 goto skip;
4493
4494 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4495 portid, seq,
4496 RTM_NEWNEIGH, NTF_SELF,
4497 NLM_F_MULTI, NUD_PERMANENT);
4498 if (err < 0)
4499 return err;
4500 skip:
4501 *idx += 1;
4502 }
4503 return 0;
4504 }
4505
4506 /**
4507 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4508 * @skb: socket buffer to store message in
4509 * @cb: netlink callback
4510 * @dev: netdevice
4511 * @filter_dev: ignored
4512 * @idx: the number of FDB table entries dumped is added to *@idx
4513 *
4514 * Default netdevice operation to dump the existing unicast address list.
4515 * Returns number of addresses from list put in skb.
4516 */
ndo_dflt_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,struct net_device * filter_dev,int * idx)4517 int ndo_dflt_fdb_dump(struct sk_buff *skb,
4518 struct netlink_callback *cb,
4519 struct net_device *dev,
4520 struct net_device *filter_dev,
4521 int *idx)
4522 {
4523 int err;
4524
4525 if (dev->type != ARPHRD_ETHER)
4526 return -EINVAL;
4527
4528 netif_addr_lock_bh(dev);
4529 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4530 if (err)
4531 goto out;
4532 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4533 out:
4534 netif_addr_unlock_bh(dev);
4535 return err;
4536 }
4537 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4538
valid_fdb_dump_strict(const struct nlmsghdr * nlh,int * br_idx,int * brport_idx,struct netlink_ext_ack * extack)4539 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4540 int *br_idx, int *brport_idx,
4541 struct netlink_ext_ack *extack)
4542 {
4543 struct nlattr *tb[NDA_MAX + 1];
4544 struct ndmsg *ndm;
4545 int err, i;
4546
4547 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4548 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4549 return -EINVAL;
4550 }
4551
4552 ndm = nlmsg_data(nlh);
4553 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4554 ndm->ndm_flags || ndm->ndm_type) {
4555 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4556 return -EINVAL;
4557 }
4558
4559 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4560 NDA_MAX, NULL, extack);
4561 if (err < 0)
4562 return err;
4563
4564 *brport_idx = ndm->ndm_ifindex;
4565 for (i = 0; i <= NDA_MAX; ++i) {
4566 if (!tb[i])
4567 continue;
4568
4569 switch (i) {
4570 case NDA_IFINDEX:
4571 if (nla_len(tb[i]) != sizeof(u32)) {
4572 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4573 return -EINVAL;
4574 }
4575 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4576 break;
4577 case NDA_MASTER:
4578 if (nla_len(tb[i]) != sizeof(u32)) {
4579 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4580 return -EINVAL;
4581 }
4582 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4583 break;
4584 default:
4585 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4586 return -EINVAL;
4587 }
4588 }
4589
4590 return 0;
4591 }
4592
valid_fdb_dump_legacy(const struct nlmsghdr * nlh,int * br_idx,int * brport_idx,struct netlink_ext_ack * extack)4593 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4594 int *br_idx, int *brport_idx,
4595 struct netlink_ext_ack *extack)
4596 {
4597 struct nlattr *tb[IFLA_MAX+1];
4598 int err;
4599
4600 /* A hack to preserve kernel<->userspace interface.
4601 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4602 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4603 * So, check for ndmsg with an optional u32 attribute (not used here).
4604 * Fortunately these sizes don't conflict with the size of ifinfomsg
4605 * with an optional attribute.
4606 */
4607 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4608 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4609 nla_attr_size(sizeof(u32)))) {
4610 struct ifinfomsg *ifm;
4611
4612 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4613 tb, IFLA_MAX, ifla_policy,
4614 extack);
4615 if (err < 0) {
4616 return -EINVAL;
4617 } else if (err == 0) {
4618 if (tb[IFLA_MASTER])
4619 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4620 }
4621
4622 ifm = nlmsg_data(nlh);
4623 *brport_idx = ifm->ifi_index;
4624 }
4625 return 0;
4626 }
4627
rtnl_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb)4628 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4629 {
4630 struct net_device *dev;
4631 struct net_device *br_dev = NULL;
4632 const struct net_device_ops *ops = NULL;
4633 const struct net_device_ops *cops = NULL;
4634 struct net *net = sock_net(skb->sk);
4635 struct hlist_head *head;
4636 int brport_idx = 0;
4637 int br_idx = 0;
4638 int h, s_h;
4639 int idx = 0, s_idx;
4640 int err = 0;
4641 int fidx = 0;
4642
4643 if (cb->strict_check)
4644 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4645 cb->extack);
4646 else
4647 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4648 cb->extack);
4649 if (err < 0)
4650 return err;
4651
4652 if (br_idx) {
4653 br_dev = __dev_get_by_index(net, br_idx);
4654 if (!br_dev)
4655 return -ENODEV;
4656
4657 ops = br_dev->netdev_ops;
4658 }
4659
4660 s_h = cb->args[0];
4661 s_idx = cb->args[1];
4662
4663 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4664 idx = 0;
4665 head = &net->dev_index_head[h];
4666 hlist_for_each_entry(dev, head, index_hlist) {
4667
4668 if (brport_idx && (dev->ifindex != brport_idx))
4669 continue;
4670
4671 if (!br_idx) { /* user did not specify a specific bridge */
4672 if (netif_is_bridge_port(dev)) {
4673 br_dev = netdev_master_upper_dev_get(dev);
4674 cops = br_dev->netdev_ops;
4675 }
4676 } else {
4677 if (dev != br_dev &&
4678 !netif_is_bridge_port(dev))
4679 continue;
4680
4681 if (br_dev != netdev_master_upper_dev_get(dev) &&
4682 !netif_is_bridge_master(dev))
4683 continue;
4684 cops = ops;
4685 }
4686
4687 if (idx < s_idx)
4688 goto cont;
4689
4690 if (netif_is_bridge_port(dev)) {
4691 if (cops && cops->ndo_fdb_dump) {
4692 err = cops->ndo_fdb_dump(skb, cb,
4693 br_dev, dev,
4694 &fidx);
4695 if (err == -EMSGSIZE)
4696 goto out;
4697 }
4698 }
4699
4700 if (dev->netdev_ops->ndo_fdb_dump)
4701 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4702 dev, NULL,
4703 &fidx);
4704 else
4705 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4706 &fidx);
4707 if (err == -EMSGSIZE)
4708 goto out;
4709
4710 cops = NULL;
4711
4712 /* reset fdb offset to 0 for rest of the interfaces */
4713 cb->args[2] = 0;
4714 fidx = 0;
4715 cont:
4716 idx++;
4717 }
4718 }
4719
4720 out:
4721 cb->args[0] = h;
4722 cb->args[1] = idx;
4723 cb->args[2] = fidx;
4724
4725 return skb->len;
4726 }
4727
valid_fdb_get_strict(const struct nlmsghdr * nlh,struct nlattr ** tb,u8 * ndm_flags,int * br_idx,int * brport_idx,u8 ** addr,u16 * vid,struct netlink_ext_ack * extack)4728 static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4729 struct nlattr **tb, u8 *ndm_flags,
4730 int *br_idx, int *brport_idx, u8 **addr,
4731 u16 *vid, struct netlink_ext_ack *extack)
4732 {
4733 struct ndmsg *ndm;
4734 int err, i;
4735
4736 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4737 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4738 return -EINVAL;
4739 }
4740
4741 ndm = nlmsg_data(nlh);
4742 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4743 ndm->ndm_type) {
4744 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4745 return -EINVAL;
4746 }
4747
4748 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4749 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4750 return -EINVAL;
4751 }
4752
4753 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4754 NDA_MAX, nda_policy, extack);
4755 if (err < 0)
4756 return err;
4757
4758 *ndm_flags = ndm->ndm_flags;
4759 *brport_idx = ndm->ndm_ifindex;
4760 for (i = 0; i <= NDA_MAX; ++i) {
4761 if (!tb[i])
4762 continue;
4763
4764 switch (i) {
4765 case NDA_MASTER:
4766 *br_idx = nla_get_u32(tb[i]);
4767 break;
4768 case NDA_LLADDR:
4769 if (nla_len(tb[i]) != ETH_ALEN) {
4770 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4771 return -EINVAL;
4772 }
4773 *addr = nla_data(tb[i]);
4774 break;
4775 case NDA_VLAN:
4776 err = fdb_vid_parse(tb[i], vid, extack);
4777 if (err)
4778 return err;
4779 break;
4780 case NDA_VNI:
4781 break;
4782 default:
4783 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4784 return -EINVAL;
4785 }
4786 }
4787
4788 return 0;
4789 }
4790
rtnl_fdb_get(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4791 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4792 struct netlink_ext_ack *extack)
4793 {
4794 struct net_device *dev = NULL, *br_dev = NULL;
4795 const struct net_device_ops *ops = NULL;
4796 struct net *net = sock_net(in_skb->sk);
4797 struct nlattr *tb[NDA_MAX + 1];
4798 struct sk_buff *skb;
4799 int brport_idx = 0;
4800 u8 ndm_flags = 0;
4801 int br_idx = 0;
4802 u8 *addr = NULL;
4803 u16 vid = 0;
4804 int err;
4805
4806 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4807 &brport_idx, &addr, &vid, extack);
4808 if (err < 0)
4809 return err;
4810
4811 if (!addr) {
4812 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4813 return -EINVAL;
4814 }
4815
4816 if (brport_idx) {
4817 dev = __dev_get_by_index(net, brport_idx);
4818 if (!dev) {
4819 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4820 return -ENODEV;
4821 }
4822 }
4823
4824 if (br_idx) {
4825 if (dev) {
4826 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4827 return -EINVAL;
4828 }
4829
4830 br_dev = __dev_get_by_index(net, br_idx);
4831 if (!br_dev) {
4832 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4833 return -EINVAL;
4834 }
4835 ops = br_dev->netdev_ops;
4836 }
4837
4838 if (dev) {
4839 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4840 if (!netif_is_bridge_port(dev)) {
4841 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4842 return -EINVAL;
4843 }
4844 br_dev = netdev_master_upper_dev_get(dev);
4845 if (!br_dev) {
4846 NL_SET_ERR_MSG(extack, "Master of device not found");
4847 return -EINVAL;
4848 }
4849 ops = br_dev->netdev_ops;
4850 } else {
4851 if (!(ndm_flags & NTF_SELF)) {
4852 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4853 return -EINVAL;
4854 }
4855 ops = dev->netdev_ops;
4856 }
4857 }
4858
4859 if (!br_dev && !dev) {
4860 NL_SET_ERR_MSG(extack, "No device specified");
4861 return -ENODEV;
4862 }
4863
4864 if (!ops || !ops->ndo_fdb_get) {
4865 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4866 return -EOPNOTSUPP;
4867 }
4868
4869 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4870 if (!skb)
4871 return -ENOBUFS;
4872
4873 if (br_dev)
4874 dev = br_dev;
4875 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4876 NETLINK_CB(in_skb).portid,
4877 nlh->nlmsg_seq, extack);
4878 if (err)
4879 goto out;
4880
4881 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4882 out:
4883 kfree_skb(skb);
4884 return err;
4885 }
4886
brport_nla_put_flag(struct sk_buff * skb,u32 flags,u32 mask,unsigned int attrnum,unsigned int flag)4887 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4888 unsigned int attrnum, unsigned int flag)
4889 {
4890 if (mask & flag)
4891 return nla_put_u8(skb, attrnum, !!(flags & flag));
4892 return 0;
4893 }
4894
ndo_dflt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u16 mode,u32 flags,u32 mask,int nlflags,u32 filter_mask,int (* vlan_fill)(struct sk_buff * skb,struct net_device * dev,u32 filter_mask))4895 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4896 struct net_device *dev, u16 mode,
4897 u32 flags, u32 mask, int nlflags,
4898 u32 filter_mask,
4899 int (*vlan_fill)(struct sk_buff *skb,
4900 struct net_device *dev,
4901 u32 filter_mask))
4902 {
4903 struct nlmsghdr *nlh;
4904 struct ifinfomsg *ifm;
4905 struct nlattr *br_afspec;
4906 struct nlattr *protinfo;
4907 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4908 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4909 int err = 0;
4910
4911 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4912 if (nlh == NULL)
4913 return -EMSGSIZE;
4914
4915 ifm = nlmsg_data(nlh);
4916 ifm->ifi_family = AF_BRIDGE;
4917 ifm->__ifi_pad = 0;
4918 ifm->ifi_type = dev->type;
4919 ifm->ifi_index = dev->ifindex;
4920 ifm->ifi_flags = dev_get_flags(dev);
4921 ifm->ifi_change = 0;
4922
4923
4924 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4925 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4926 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4927 (br_dev &&
4928 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4929 (dev->addr_len &&
4930 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4931 (dev->ifindex != dev_get_iflink(dev) &&
4932 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4933 goto nla_put_failure;
4934
4935 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4936 if (!br_afspec)
4937 goto nla_put_failure;
4938
4939 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4940 nla_nest_cancel(skb, br_afspec);
4941 goto nla_put_failure;
4942 }
4943
4944 if (mode != BRIDGE_MODE_UNDEF) {
4945 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4946 nla_nest_cancel(skb, br_afspec);
4947 goto nla_put_failure;
4948 }
4949 }
4950 if (vlan_fill) {
4951 err = vlan_fill(skb, dev, filter_mask);
4952 if (err) {
4953 nla_nest_cancel(skb, br_afspec);
4954 goto nla_put_failure;
4955 }
4956 }
4957 nla_nest_end(skb, br_afspec);
4958
4959 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4960 if (!protinfo)
4961 goto nla_put_failure;
4962
4963 if (brport_nla_put_flag(skb, flags, mask,
4964 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4965 brport_nla_put_flag(skb, flags, mask,
4966 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4967 brport_nla_put_flag(skb, flags, mask,
4968 IFLA_BRPORT_FAST_LEAVE,
4969 BR_MULTICAST_FAST_LEAVE) ||
4970 brport_nla_put_flag(skb, flags, mask,
4971 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4972 brport_nla_put_flag(skb, flags, mask,
4973 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4974 brport_nla_put_flag(skb, flags, mask,
4975 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4976 brport_nla_put_flag(skb, flags, mask,
4977 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4978 brport_nla_put_flag(skb, flags, mask,
4979 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4980 brport_nla_put_flag(skb, flags, mask,
4981 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4982 brport_nla_put_flag(skb, flags, mask,
4983 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4984 nla_nest_cancel(skb, protinfo);
4985 goto nla_put_failure;
4986 }
4987
4988 nla_nest_end(skb, protinfo);
4989
4990 nlmsg_end(skb, nlh);
4991 return 0;
4992 nla_put_failure:
4993 nlmsg_cancel(skb, nlh);
4994 return err ? err : -EMSGSIZE;
4995 }
4996 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4997
valid_bridge_getlink_req(const struct nlmsghdr * nlh,bool strict_check,u32 * filter_mask,struct netlink_ext_ack * extack)4998 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4999 bool strict_check, u32 *filter_mask,
5000 struct netlink_ext_ack *extack)
5001 {
5002 struct nlattr *tb[IFLA_MAX+1];
5003 int err, i;
5004
5005 if (strict_check) {
5006 struct ifinfomsg *ifm;
5007
5008 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5009 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
5010 return -EINVAL;
5011 }
5012
5013 ifm = nlmsg_data(nlh);
5014 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5015 ifm->ifi_change || ifm->ifi_index) {
5016 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
5017 return -EINVAL;
5018 }
5019
5020 err = nlmsg_parse_deprecated_strict(nlh,
5021 sizeof(struct ifinfomsg),
5022 tb, IFLA_MAX, ifla_policy,
5023 extack);
5024 } else {
5025 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
5026 tb, IFLA_MAX, ifla_policy,
5027 extack);
5028 }
5029 if (err < 0)
5030 return err;
5031
5032 /* new attributes should only be added with strict checking */
5033 for (i = 0; i <= IFLA_MAX; ++i) {
5034 if (!tb[i])
5035 continue;
5036
5037 switch (i) {
5038 case IFLA_EXT_MASK:
5039 *filter_mask = nla_get_u32(tb[i]);
5040 break;
5041 default:
5042 if (strict_check) {
5043 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
5044 return -EINVAL;
5045 }
5046 }
5047 }
5048
5049 return 0;
5050 }
5051
rtnl_bridge_getlink(struct sk_buff * skb,struct netlink_callback * cb)5052 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
5053 {
5054 const struct nlmsghdr *nlh = cb->nlh;
5055 struct net *net = sock_net(skb->sk);
5056 struct net_device *dev;
5057 int idx = 0;
5058 u32 portid = NETLINK_CB(cb->skb).portid;
5059 u32 seq = nlh->nlmsg_seq;
5060 u32 filter_mask = 0;
5061 int err;
5062
5063 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
5064 cb->extack);
5065 if (err < 0 && cb->strict_check)
5066 return err;
5067
5068 rcu_read_lock();
5069 for_each_netdev_rcu(net, dev) {
5070 const struct net_device_ops *ops = dev->netdev_ops;
5071 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5072
5073 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
5074 if (idx >= cb->args[0]) {
5075 err = br_dev->netdev_ops->ndo_bridge_getlink(
5076 skb, portid, seq, dev,
5077 filter_mask, NLM_F_MULTI);
5078 if (err < 0 && err != -EOPNOTSUPP) {
5079 if (likely(skb->len))
5080 break;
5081
5082 goto out_err;
5083 }
5084 }
5085 idx++;
5086 }
5087
5088 if (ops->ndo_bridge_getlink) {
5089 if (idx >= cb->args[0]) {
5090 err = ops->ndo_bridge_getlink(skb, portid,
5091 seq, dev,
5092 filter_mask,
5093 NLM_F_MULTI);
5094 if (err < 0 && err != -EOPNOTSUPP) {
5095 if (likely(skb->len))
5096 break;
5097
5098 goto out_err;
5099 }
5100 }
5101 idx++;
5102 }
5103 }
5104 err = skb->len;
5105 out_err:
5106 rcu_read_unlock();
5107 cb->args[0] = idx;
5108
5109 return err;
5110 }
5111
bridge_nlmsg_size(void)5112 static inline size_t bridge_nlmsg_size(void)
5113 {
5114 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5115 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5116 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5117 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
5118 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
5119 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
5120 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
5121 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
5122 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
5123 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
5124 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
5125 }
5126
rtnl_bridge_notify(struct net_device * dev)5127 static int rtnl_bridge_notify(struct net_device *dev)
5128 {
5129 struct net *net = dev_net(dev);
5130 struct sk_buff *skb;
5131 int err = -EOPNOTSUPP;
5132
5133 if (!dev->netdev_ops->ndo_bridge_getlink)
5134 return 0;
5135
5136 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5137 if (!skb) {
5138 err = -ENOMEM;
5139 goto errout;
5140 }
5141
5142 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5143 if (err < 0)
5144 goto errout;
5145
5146 /* Notification info is only filled for bridge ports, not the bridge
5147 * device itself. Therefore, a zero notification length is valid and
5148 * should not result in an error.
5149 */
5150 if (!skb->len)
5151 goto errout;
5152
5153 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5154 return 0;
5155 errout:
5156 WARN_ON(err == -EMSGSIZE);
5157 kfree_skb(skb);
5158 if (err)
5159 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5160 return err;
5161 }
5162
rtnl_bridge_setlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5163 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5164 struct netlink_ext_ack *extack)
5165 {
5166 struct net *net = sock_net(skb->sk);
5167 struct ifinfomsg *ifm;
5168 struct net_device *dev;
5169 struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
5170 int rem, err = -EOPNOTSUPP;
5171 u16 flags = 0;
5172
5173 if (nlmsg_len(nlh) < sizeof(*ifm))
5174 return -EINVAL;
5175
5176 ifm = nlmsg_data(nlh);
5177 if (ifm->ifi_family != AF_BRIDGE)
5178 return -EPFNOSUPPORT;
5179
5180 dev = __dev_get_by_index(net, ifm->ifi_index);
5181 if (!dev) {
5182 NL_SET_ERR_MSG(extack, "unknown ifindex");
5183 return -ENODEV;
5184 }
5185
5186 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5187 if (br_spec) {
5188 nla_for_each_nested(attr, br_spec, rem) {
5189 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
5190 if (nla_len(attr) < sizeof(flags))
5191 return -EINVAL;
5192
5193 br_flags_attr = attr;
5194 flags = nla_get_u16(attr);
5195 }
5196
5197 if (nla_type(attr) == IFLA_BRIDGE_MODE) {
5198 if (nla_len(attr) < sizeof(u16))
5199 return -EINVAL;
5200 }
5201 }
5202 }
5203
5204 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5205 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5206
5207 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5208 err = -EOPNOTSUPP;
5209 goto out;
5210 }
5211
5212 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5213 extack);
5214 if (err)
5215 goto out;
5216
5217 flags &= ~BRIDGE_FLAGS_MASTER;
5218 }
5219
5220 if ((flags & BRIDGE_FLAGS_SELF)) {
5221 if (!dev->netdev_ops->ndo_bridge_setlink)
5222 err = -EOPNOTSUPP;
5223 else
5224 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5225 flags,
5226 extack);
5227 if (!err) {
5228 flags &= ~BRIDGE_FLAGS_SELF;
5229
5230 /* Generate event to notify upper layer of bridge
5231 * change
5232 */
5233 err = rtnl_bridge_notify(dev);
5234 }
5235 }
5236
5237 if (br_flags_attr)
5238 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
5239 out:
5240 return err;
5241 }
5242
rtnl_bridge_dellink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5243 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5244 struct netlink_ext_ack *extack)
5245 {
5246 struct net *net = sock_net(skb->sk);
5247 struct ifinfomsg *ifm;
5248 struct net_device *dev;
5249 struct nlattr *br_spec, *attr = NULL;
5250 int rem, err = -EOPNOTSUPP;
5251 u16 flags = 0;
5252 bool have_flags = false;
5253
5254 if (nlmsg_len(nlh) < sizeof(*ifm))
5255 return -EINVAL;
5256
5257 ifm = nlmsg_data(nlh);
5258 if (ifm->ifi_family != AF_BRIDGE)
5259 return -EPFNOSUPPORT;
5260
5261 dev = __dev_get_by_index(net, ifm->ifi_index);
5262 if (!dev) {
5263 NL_SET_ERR_MSG(extack, "unknown ifindex");
5264 return -ENODEV;
5265 }
5266
5267 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5268 if (br_spec) {
5269 nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec,
5270 rem) {
5271 if (nla_len(attr) < sizeof(flags))
5272 return -EINVAL;
5273
5274 have_flags = true;
5275 flags = nla_get_u16(attr);
5276 break;
5277 }
5278 }
5279
5280 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5281 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5282
5283 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5284 err = -EOPNOTSUPP;
5285 goto out;
5286 }
5287
5288 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5289 if (err)
5290 goto out;
5291
5292 flags &= ~BRIDGE_FLAGS_MASTER;
5293 }
5294
5295 if ((flags & BRIDGE_FLAGS_SELF)) {
5296 if (!dev->netdev_ops->ndo_bridge_dellink)
5297 err = -EOPNOTSUPP;
5298 else
5299 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5300 flags);
5301
5302 if (!err) {
5303 flags &= ~BRIDGE_FLAGS_SELF;
5304
5305 /* Generate event to notify upper layer of bridge
5306 * change
5307 */
5308 err = rtnl_bridge_notify(dev);
5309 }
5310 }
5311
5312 if (have_flags)
5313 memcpy(nla_data(attr), &flags, sizeof(flags));
5314 out:
5315 return err;
5316 }
5317
stats_attr_valid(unsigned int mask,int attrid,int idxattr)5318 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5319 {
5320 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5321 (!idxattr || idxattr == attrid);
5322 }
5323
5324 static bool
rtnl_offload_xstats_have_ndo(const struct net_device * dev,int attr_id)5325 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5326 {
5327 return dev->netdev_ops &&
5328 dev->netdev_ops->ndo_has_offload_stats &&
5329 dev->netdev_ops->ndo_get_offload_stats &&
5330 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5331 }
5332
5333 static unsigned int
rtnl_offload_xstats_get_size_ndo(const struct net_device * dev,int attr_id)5334 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5335 {
5336 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5337 sizeof(struct rtnl_link_stats64) : 0;
5338 }
5339
5340 static int
rtnl_offload_xstats_fill_ndo(struct net_device * dev,int attr_id,struct sk_buff * skb)5341 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5342 struct sk_buff *skb)
5343 {
5344 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5345 struct nlattr *attr = NULL;
5346 void *attr_data;
5347 int err;
5348
5349 if (!size)
5350 return -ENODATA;
5351
5352 attr = nla_reserve_64bit(skb, attr_id, size,
5353 IFLA_OFFLOAD_XSTATS_UNSPEC);
5354 if (!attr)
5355 return -EMSGSIZE;
5356
5357 attr_data = nla_data(attr);
5358 memset(attr_data, 0, size);
5359
5360 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5361 if (err)
5362 return err;
5363
5364 return 0;
5365 }
5366
5367 static unsigned int
rtnl_offload_xstats_get_size_stats(const struct net_device * dev,enum netdev_offload_xstats_type type)5368 rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5369 enum netdev_offload_xstats_type type)
5370 {
5371 bool enabled = netdev_offload_xstats_enabled(dev, type);
5372
5373 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5374 }
5375
5376 struct rtnl_offload_xstats_request_used {
5377 bool request;
5378 bool used;
5379 };
5380
5381 static int
rtnl_offload_xstats_get_stats(struct net_device * dev,enum netdev_offload_xstats_type type,struct rtnl_offload_xstats_request_used * ru,struct rtnl_hw_stats64 * stats,struct netlink_ext_ack * extack)5382 rtnl_offload_xstats_get_stats(struct net_device *dev,
5383 enum netdev_offload_xstats_type type,
5384 struct rtnl_offload_xstats_request_used *ru,
5385 struct rtnl_hw_stats64 *stats,
5386 struct netlink_ext_ack *extack)
5387 {
5388 bool request;
5389 bool used;
5390 int err;
5391
5392 request = netdev_offload_xstats_enabled(dev, type);
5393 if (!request) {
5394 used = false;
5395 goto out;
5396 }
5397
5398 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5399 if (err)
5400 return err;
5401
5402 out:
5403 if (ru) {
5404 ru->request = request;
5405 ru->used = used;
5406 }
5407 return 0;
5408 }
5409
5410 static int
rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff * skb,int attr_id,struct rtnl_offload_xstats_request_used * ru)5411 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5412 struct rtnl_offload_xstats_request_used *ru)
5413 {
5414 struct nlattr *nest;
5415
5416 nest = nla_nest_start(skb, attr_id);
5417 if (!nest)
5418 return -EMSGSIZE;
5419
5420 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5421 goto nla_put_failure;
5422
5423 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5424 goto nla_put_failure;
5425
5426 nla_nest_end(skb, nest);
5427 return 0;
5428
5429 nla_put_failure:
5430 nla_nest_cancel(skb, nest);
5431 return -EMSGSIZE;
5432 }
5433
5434 static int
rtnl_offload_xstats_fill_hw_s_info(struct sk_buff * skb,struct net_device * dev,struct netlink_ext_ack * extack)5435 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5436 struct netlink_ext_ack *extack)
5437 {
5438 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5439 struct rtnl_offload_xstats_request_used ru_l3;
5440 struct nlattr *nest;
5441 int err;
5442
5443 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5444 if (err)
5445 return err;
5446
5447 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5448 if (!nest)
5449 return -EMSGSIZE;
5450
5451 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5452 IFLA_OFFLOAD_XSTATS_L3_STATS,
5453 &ru_l3))
5454 goto nla_put_failure;
5455
5456 nla_nest_end(skb, nest);
5457 return 0;
5458
5459 nla_put_failure:
5460 nla_nest_cancel(skb, nest);
5461 return -EMSGSIZE;
5462 }
5463
rtnl_offload_xstats_fill(struct sk_buff * skb,struct net_device * dev,int * prividx,u32 off_filter_mask,struct netlink_ext_ack * extack)5464 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5465 int *prividx, u32 off_filter_mask,
5466 struct netlink_ext_ack *extack)
5467 {
5468 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5469 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5470 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5471 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5472 bool have_data = false;
5473 int err;
5474
5475 if (*prividx <= attr_id_cpu_hit &&
5476 (off_filter_mask &
5477 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5478 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5479 if (!err) {
5480 have_data = true;
5481 } else if (err != -ENODATA) {
5482 *prividx = attr_id_cpu_hit;
5483 return err;
5484 }
5485 }
5486
5487 if (*prividx <= attr_id_hw_s_info &&
5488 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5489 *prividx = attr_id_hw_s_info;
5490
5491 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5492 if (err)
5493 return err;
5494
5495 have_data = true;
5496 *prividx = 0;
5497 }
5498
5499 if (*prividx <= attr_id_l3_stats &&
5500 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5501 unsigned int size_l3;
5502 struct nlattr *attr;
5503
5504 *prividx = attr_id_l3_stats;
5505
5506 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5507 if (!size_l3)
5508 goto skip_l3_stats;
5509 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5510 IFLA_OFFLOAD_XSTATS_UNSPEC);
5511 if (!attr)
5512 return -EMSGSIZE;
5513
5514 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5515 nla_data(attr), extack);
5516 if (err)
5517 return err;
5518
5519 have_data = true;
5520 skip_l3_stats:
5521 *prividx = 0;
5522 }
5523
5524 if (!have_data)
5525 return -ENODATA;
5526
5527 *prividx = 0;
5528 return 0;
5529 }
5530
5531 static unsigned int
rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device * dev,enum netdev_offload_xstats_type type)5532 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5533 enum netdev_offload_xstats_type type)
5534 {
5535 return nla_total_size(0) +
5536 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5537 nla_total_size(sizeof(u8)) +
5538 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5539 nla_total_size(sizeof(u8)) +
5540 0;
5541 }
5542
5543 static unsigned int
rtnl_offload_xstats_get_size_hw_s_info(const struct net_device * dev)5544 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5545 {
5546 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5547
5548 return nla_total_size(0) +
5549 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5550 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5551 0;
5552 }
5553
rtnl_offload_xstats_get_size(const struct net_device * dev,u32 off_filter_mask)5554 static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5555 u32 off_filter_mask)
5556 {
5557 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5558 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5559 int nla_size = 0;
5560 int size;
5561
5562 if (off_filter_mask &
5563 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5564 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5565 nla_size += nla_total_size_64bit(size);
5566 }
5567
5568 if (off_filter_mask &
5569 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5570 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5571
5572 if (off_filter_mask &
5573 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5574 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5575 nla_size += nla_total_size_64bit(size);
5576 }
5577
5578 if (nla_size != 0)
5579 nla_size += nla_total_size(0);
5580
5581 return nla_size;
5582 }
5583
5584 struct rtnl_stats_dump_filters {
5585 /* mask[0] filters outer attributes. Then individual nests have their
5586 * filtering mask at the index of the nested attribute.
5587 */
5588 u32 mask[IFLA_STATS_MAX + 1];
5589 };
5590
rtnl_fill_statsinfo(struct sk_buff * skb,struct net_device * dev,int type,u32 pid,u32 seq,u32 change,unsigned int flags,const struct rtnl_stats_dump_filters * filters,int * idxattr,int * prividx,struct netlink_ext_ack * extack)5591 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5592 int type, u32 pid, u32 seq, u32 change,
5593 unsigned int flags,
5594 const struct rtnl_stats_dump_filters *filters,
5595 int *idxattr, int *prividx,
5596 struct netlink_ext_ack *extack)
5597 {
5598 unsigned int filter_mask = filters->mask[0];
5599 struct if_stats_msg *ifsm;
5600 struct nlmsghdr *nlh;
5601 struct nlattr *attr;
5602 int s_prividx = *prividx;
5603 int err;
5604
5605 ASSERT_RTNL();
5606
5607 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5608 if (!nlh)
5609 return -EMSGSIZE;
5610
5611 ifsm = nlmsg_data(nlh);
5612 ifsm->family = PF_UNSPEC;
5613 ifsm->pad1 = 0;
5614 ifsm->pad2 = 0;
5615 ifsm->ifindex = dev->ifindex;
5616 ifsm->filter_mask = filter_mask;
5617
5618 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5619 struct rtnl_link_stats64 *sp;
5620
5621 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5622 sizeof(struct rtnl_link_stats64),
5623 IFLA_STATS_UNSPEC);
5624 if (!attr) {
5625 err = -EMSGSIZE;
5626 goto nla_put_failure;
5627 }
5628
5629 sp = nla_data(attr);
5630 dev_get_stats(dev, sp);
5631 }
5632
5633 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5634 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5635
5636 if (ops && ops->fill_linkxstats) {
5637 *idxattr = IFLA_STATS_LINK_XSTATS;
5638 attr = nla_nest_start_noflag(skb,
5639 IFLA_STATS_LINK_XSTATS);
5640 if (!attr) {
5641 err = -EMSGSIZE;
5642 goto nla_put_failure;
5643 }
5644
5645 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5646 nla_nest_end(skb, attr);
5647 if (err)
5648 goto nla_put_failure;
5649 *idxattr = 0;
5650 }
5651 }
5652
5653 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5654 *idxattr)) {
5655 const struct rtnl_link_ops *ops = NULL;
5656 const struct net_device *master;
5657
5658 master = netdev_master_upper_dev_get(dev);
5659 if (master)
5660 ops = master->rtnl_link_ops;
5661 if (ops && ops->fill_linkxstats) {
5662 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5663 attr = nla_nest_start_noflag(skb,
5664 IFLA_STATS_LINK_XSTATS_SLAVE);
5665 if (!attr) {
5666 err = -EMSGSIZE;
5667 goto nla_put_failure;
5668 }
5669
5670 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5671 nla_nest_end(skb, attr);
5672 if (err)
5673 goto nla_put_failure;
5674 *idxattr = 0;
5675 }
5676 }
5677
5678 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5679 *idxattr)) {
5680 u32 off_filter_mask;
5681
5682 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5683 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5684 attr = nla_nest_start_noflag(skb,
5685 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5686 if (!attr) {
5687 err = -EMSGSIZE;
5688 goto nla_put_failure;
5689 }
5690
5691 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5692 off_filter_mask, extack);
5693 if (err == -ENODATA)
5694 nla_nest_cancel(skb, attr);
5695 else
5696 nla_nest_end(skb, attr);
5697
5698 if (err && err != -ENODATA)
5699 goto nla_put_failure;
5700 *idxattr = 0;
5701 }
5702
5703 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5704 struct rtnl_af_ops *af_ops;
5705
5706 *idxattr = IFLA_STATS_AF_SPEC;
5707 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5708 if (!attr) {
5709 err = -EMSGSIZE;
5710 goto nla_put_failure;
5711 }
5712
5713 rcu_read_lock();
5714 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5715 if (af_ops->fill_stats_af) {
5716 struct nlattr *af;
5717
5718 af = nla_nest_start_noflag(skb,
5719 af_ops->family);
5720 if (!af) {
5721 rcu_read_unlock();
5722 err = -EMSGSIZE;
5723 goto nla_put_failure;
5724 }
5725 err = af_ops->fill_stats_af(skb, dev);
5726
5727 if (err == -ENODATA) {
5728 nla_nest_cancel(skb, af);
5729 } else if (err < 0) {
5730 rcu_read_unlock();
5731 goto nla_put_failure;
5732 }
5733
5734 nla_nest_end(skb, af);
5735 }
5736 }
5737 rcu_read_unlock();
5738
5739 nla_nest_end(skb, attr);
5740
5741 *idxattr = 0;
5742 }
5743
5744 nlmsg_end(skb, nlh);
5745
5746 return 0;
5747
5748 nla_put_failure:
5749 /* not a multi message or no progress mean a real error */
5750 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5751 nlmsg_cancel(skb, nlh);
5752 else
5753 nlmsg_end(skb, nlh);
5754
5755 return err;
5756 }
5757
if_nlmsg_stats_size(const struct net_device * dev,const struct rtnl_stats_dump_filters * filters)5758 static size_t if_nlmsg_stats_size(const struct net_device *dev,
5759 const struct rtnl_stats_dump_filters *filters)
5760 {
5761 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5762 unsigned int filter_mask = filters->mask[0];
5763
5764 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5765 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5766
5767 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5768 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5769 int attr = IFLA_STATS_LINK_XSTATS;
5770
5771 if (ops && ops->get_linkxstats_size) {
5772 size += nla_total_size(ops->get_linkxstats_size(dev,
5773 attr));
5774 /* for IFLA_STATS_LINK_XSTATS */
5775 size += nla_total_size(0);
5776 }
5777 }
5778
5779 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5780 struct net_device *_dev = (struct net_device *)dev;
5781 const struct rtnl_link_ops *ops = NULL;
5782 const struct net_device *master;
5783
5784 /* netdev_master_upper_dev_get can't take const */
5785 master = netdev_master_upper_dev_get(_dev);
5786 if (master)
5787 ops = master->rtnl_link_ops;
5788 if (ops && ops->get_linkxstats_size) {
5789 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5790
5791 size += nla_total_size(ops->get_linkxstats_size(dev,
5792 attr));
5793 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5794 size += nla_total_size(0);
5795 }
5796 }
5797
5798 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5799 u32 off_filter_mask;
5800
5801 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5802 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5803 }
5804
5805 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5806 struct rtnl_af_ops *af_ops;
5807
5808 /* for IFLA_STATS_AF_SPEC */
5809 size += nla_total_size(0);
5810
5811 rcu_read_lock();
5812 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5813 if (af_ops->get_stats_af_size) {
5814 size += nla_total_size(
5815 af_ops->get_stats_af_size(dev));
5816
5817 /* for AF_* */
5818 size += nla_total_size(0);
5819 }
5820 }
5821 rcu_read_unlock();
5822 }
5823
5824 return size;
5825 }
5826
5827 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5828
5829 static const struct nla_policy
5830 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5831 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5832 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5833 };
5834
5835 static const struct nla_policy
5836 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5837 [IFLA_STATS_GET_FILTERS] =
5838 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5839 };
5840
5841 static const struct nla_policy
5842 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5843 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5844 };
5845
rtnl_stats_get_parse_filters(struct nlattr * ifla_filters,struct rtnl_stats_dump_filters * filters,struct netlink_ext_ack * extack)5846 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5847 struct rtnl_stats_dump_filters *filters,
5848 struct netlink_ext_ack *extack)
5849 {
5850 struct nlattr *tb[IFLA_STATS_MAX + 1];
5851 int err;
5852 int at;
5853
5854 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5855 rtnl_stats_get_policy_filters, extack);
5856 if (err < 0)
5857 return err;
5858
5859 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5860 if (tb[at]) {
5861 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5862 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5863 return -EINVAL;
5864 }
5865 filters->mask[at] = nla_get_u32(tb[at]);
5866 }
5867 }
5868
5869 return 0;
5870 }
5871
rtnl_stats_get_parse(const struct nlmsghdr * nlh,u32 filter_mask,struct rtnl_stats_dump_filters * filters,struct netlink_ext_ack * extack)5872 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5873 u32 filter_mask,
5874 struct rtnl_stats_dump_filters *filters,
5875 struct netlink_ext_ack *extack)
5876 {
5877 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5878 int err;
5879 int i;
5880
5881 filters->mask[0] = filter_mask;
5882 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5883 filters->mask[i] = -1U;
5884
5885 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5886 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5887 if (err < 0)
5888 return err;
5889
5890 if (tb[IFLA_STATS_GET_FILTERS]) {
5891 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5892 filters, extack);
5893 if (err)
5894 return err;
5895 }
5896
5897 return 0;
5898 }
5899
rtnl_valid_stats_req(const struct nlmsghdr * nlh,bool strict_check,bool is_dump,struct netlink_ext_ack * extack)5900 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5901 bool is_dump, struct netlink_ext_ack *extack)
5902 {
5903 struct if_stats_msg *ifsm;
5904
5905 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5906 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5907 return -EINVAL;
5908 }
5909
5910 if (!strict_check)
5911 return 0;
5912
5913 ifsm = nlmsg_data(nlh);
5914
5915 /* only requests using strict checks can pass data to influence
5916 * the dump. The legacy exception is filter_mask.
5917 */
5918 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5919 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5920 return -EINVAL;
5921 }
5922 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5923 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5924 return -EINVAL;
5925 }
5926
5927 return 0;
5928 }
5929
rtnl_stats_get(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5930 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5931 struct netlink_ext_ack *extack)
5932 {
5933 struct rtnl_stats_dump_filters filters;
5934 struct net *net = sock_net(skb->sk);
5935 struct net_device *dev = NULL;
5936 int idxattr = 0, prividx = 0;
5937 struct if_stats_msg *ifsm;
5938 struct sk_buff *nskb;
5939 int err;
5940
5941 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5942 false, extack);
5943 if (err)
5944 return err;
5945
5946 ifsm = nlmsg_data(nlh);
5947 if (ifsm->ifindex > 0)
5948 dev = __dev_get_by_index(net, ifsm->ifindex);
5949 else
5950 return -EINVAL;
5951
5952 if (!dev)
5953 return -ENODEV;
5954
5955 if (!ifsm->filter_mask) {
5956 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5957 return -EINVAL;
5958 }
5959
5960 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5961 if (err)
5962 return err;
5963
5964 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5965 if (!nskb)
5966 return -ENOBUFS;
5967
5968 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5969 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5970 0, &filters, &idxattr, &prividx, extack);
5971 if (err < 0) {
5972 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5973 WARN_ON(err == -EMSGSIZE);
5974 kfree_skb(nskb);
5975 } else {
5976 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5977 }
5978
5979 return err;
5980 }
5981
rtnl_stats_dump(struct sk_buff * skb,struct netlink_callback * cb)5982 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5983 {
5984 struct netlink_ext_ack *extack = cb->extack;
5985 struct rtnl_stats_dump_filters filters;
5986 struct net *net = sock_net(skb->sk);
5987 unsigned int flags = NLM_F_MULTI;
5988 struct if_stats_msg *ifsm;
5989 struct {
5990 unsigned long ifindex;
5991 int idxattr;
5992 int prividx;
5993 } *ctx = (void *)cb->ctx;
5994 struct net_device *dev;
5995 int err;
5996
5997 cb->seq = net->dev_base_seq;
5998
5999 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
6000 if (err)
6001 return err;
6002
6003 ifsm = nlmsg_data(cb->nlh);
6004 if (!ifsm->filter_mask) {
6005 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
6006 return -EINVAL;
6007 }
6008
6009 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
6010 extack);
6011 if (err)
6012 return err;
6013
6014 for_each_netdev_dump(net, dev, ctx->ifindex) {
6015 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
6016 NETLINK_CB(cb->skb).portid,
6017 cb->nlh->nlmsg_seq, 0,
6018 flags, &filters,
6019 &ctx->idxattr, &ctx->prividx,
6020 extack);
6021 /* If we ran out of room on the first message,
6022 * we're in trouble.
6023 */
6024 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
6025
6026 if (err < 0)
6027 break;
6028 ctx->prividx = 0;
6029 ctx->idxattr = 0;
6030 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
6031 }
6032
6033 return err;
6034 }
6035
rtnl_offload_xstats_notify(struct net_device * dev)6036 void rtnl_offload_xstats_notify(struct net_device *dev)
6037 {
6038 struct rtnl_stats_dump_filters response_filters = {};
6039 struct net *net = dev_net(dev);
6040 int idxattr = 0, prividx = 0;
6041 struct sk_buff *skb;
6042 int err = -ENOBUFS;
6043
6044 ASSERT_RTNL();
6045
6046 response_filters.mask[0] |=
6047 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6048 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6049 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6050
6051 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
6052 GFP_KERNEL);
6053 if (!skb)
6054 goto errout;
6055
6056 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
6057 &response_filters, &idxattr, &prividx, NULL);
6058 if (err < 0) {
6059 kfree_skb(skb);
6060 goto errout;
6061 }
6062
6063 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
6064 return;
6065
6066 errout:
6067 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
6068 }
6069 EXPORT_SYMBOL(rtnl_offload_xstats_notify);
6070
rtnl_stats_set(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6071 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
6072 struct netlink_ext_ack *extack)
6073 {
6074 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
6075 struct rtnl_stats_dump_filters response_filters = {};
6076 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6077 struct net *net = sock_net(skb->sk);
6078 struct net_device *dev = NULL;
6079 struct if_stats_msg *ifsm;
6080 bool notify = false;
6081 int err;
6082
6083 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6084 false, extack);
6085 if (err)
6086 return err;
6087
6088 ifsm = nlmsg_data(nlh);
6089 if (ifsm->family != AF_UNSPEC) {
6090 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
6091 return -EINVAL;
6092 }
6093
6094 if (ifsm->ifindex > 0)
6095 dev = __dev_get_by_index(net, ifsm->ifindex);
6096 else
6097 return -EINVAL;
6098
6099 if (!dev)
6100 return -ENODEV;
6101
6102 if (ifsm->filter_mask) {
6103 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6104 return -EINVAL;
6105 }
6106
6107 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6108 ifla_stats_set_policy, extack);
6109 if (err < 0)
6110 return err;
6111
6112 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6113 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
6114
6115 if (req)
6116 err = netdev_offload_xstats_enable(dev, t_l3, extack);
6117 else
6118 err = netdev_offload_xstats_disable(dev, t_l3);
6119
6120 if (!err)
6121 notify = true;
6122 else if (err != -EALREADY)
6123 return err;
6124
6125 response_filters.mask[0] |=
6126 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6127 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6128 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6129 }
6130
6131 if (notify)
6132 rtnl_offload_xstats_notify(dev);
6133
6134 return 0;
6135 }
6136
rtnl_mdb_valid_dump_req(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6137 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh,
6138 struct netlink_ext_ack *extack)
6139 {
6140 struct br_port_msg *bpm;
6141
6142 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
6143 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request");
6144 return -EINVAL;
6145 }
6146
6147 bpm = nlmsg_data(nlh);
6148 if (bpm->ifindex) {
6149 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request");
6150 return -EINVAL;
6151 }
6152 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
6153 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
6154 return -EINVAL;
6155 }
6156
6157 return 0;
6158 }
6159
6160 struct rtnl_mdb_dump_ctx {
6161 long idx;
6162 };
6163
rtnl_mdb_dump(struct sk_buff * skb,struct netlink_callback * cb)6164 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
6165 {
6166 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx;
6167 struct net *net = sock_net(skb->sk);
6168 struct net_device *dev;
6169 int idx, s_idx;
6170 int err;
6171
6172 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx);
6173
6174 if (cb->strict_check) {
6175 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
6176 if (err)
6177 return err;
6178 }
6179
6180 s_idx = ctx->idx;
6181 idx = 0;
6182
6183 for_each_netdev(net, dev) {
6184 if (idx < s_idx)
6185 goto skip;
6186 if (!dev->netdev_ops->ndo_mdb_dump)
6187 goto skip;
6188
6189 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
6190 if (err == -EMSGSIZE)
6191 goto out;
6192 /* Moving on to next device, reset markers and sequence
6193 * counters since they are all maintained per-device.
6194 */
6195 memset(cb->ctx, 0, sizeof(cb->ctx));
6196 cb->prev_seq = 0;
6197 cb->seq = 0;
6198 skip:
6199 idx++;
6200 }
6201
6202 out:
6203 ctx->idx = idx;
6204 return skb->len;
6205 }
6206
rtnl_validate_mdb_entry_get(const struct nlattr * attr,struct netlink_ext_ack * extack)6207 static int rtnl_validate_mdb_entry_get(const struct nlattr *attr,
6208 struct netlink_ext_ack *extack)
6209 {
6210 struct br_mdb_entry *entry = nla_data(attr);
6211
6212 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6213 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6214 return -EINVAL;
6215 }
6216
6217 if (entry->ifindex) {
6218 NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified");
6219 return -EINVAL;
6220 }
6221
6222 if (entry->state) {
6223 NL_SET_ERR_MSG(extack, "Entry state cannot be specified");
6224 return -EINVAL;
6225 }
6226
6227 if (entry->flags) {
6228 NL_SET_ERR_MSG(extack, "Entry flags cannot be specified");
6229 return -EINVAL;
6230 }
6231
6232 if (entry->vid >= VLAN_VID_MASK) {
6233 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6234 return -EINVAL;
6235 }
6236
6237 if (entry->addr.proto != htons(ETH_P_IP) &&
6238 entry->addr.proto != htons(ETH_P_IPV6) &&
6239 entry->addr.proto != 0) {
6240 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6241 return -EINVAL;
6242 }
6243
6244 return 0;
6245 }
6246
6247 static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = {
6248 [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6249 rtnl_validate_mdb_entry_get,
6250 sizeof(struct br_mdb_entry)),
6251 [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6252 };
6253
rtnl_mdb_get(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6254 static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6255 struct netlink_ext_ack *extack)
6256 {
6257 struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1];
6258 struct net *net = sock_net(in_skb->sk);
6259 struct br_port_msg *bpm;
6260 struct net_device *dev;
6261 int err;
6262
6263 err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb,
6264 MDBA_GET_ENTRY_MAX, mdba_get_policy, extack);
6265 if (err)
6266 return err;
6267
6268 bpm = nlmsg_data(nlh);
6269 if (!bpm->ifindex) {
6270 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6271 return -EINVAL;
6272 }
6273
6274 dev = __dev_get_by_index(net, bpm->ifindex);
6275 if (!dev) {
6276 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6277 return -ENODEV;
6278 }
6279
6280 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) {
6281 NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute");
6282 return -EINVAL;
6283 }
6284
6285 if (!dev->netdev_ops->ndo_mdb_get) {
6286 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6287 return -EOPNOTSUPP;
6288 }
6289
6290 return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid,
6291 nlh->nlmsg_seq, extack);
6292 }
6293
rtnl_validate_mdb_entry(const struct nlattr * attr,struct netlink_ext_ack * extack)6294 static int rtnl_validate_mdb_entry(const struct nlattr *attr,
6295 struct netlink_ext_ack *extack)
6296 {
6297 struct br_mdb_entry *entry = nla_data(attr);
6298
6299 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6300 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6301 return -EINVAL;
6302 }
6303
6304 if (entry->ifindex == 0) {
6305 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed");
6306 return -EINVAL;
6307 }
6308
6309 if (entry->addr.proto == htons(ETH_P_IP)) {
6310 if (!ipv4_is_multicast(entry->addr.u.ip4) &&
6311 !ipv4_is_zeronet(entry->addr.u.ip4)) {
6312 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0");
6313 return -EINVAL;
6314 }
6315 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
6316 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast");
6317 return -EINVAL;
6318 }
6319 #if IS_ENABLED(CONFIG_IPV6)
6320 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
6321 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
6322 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes");
6323 return -EINVAL;
6324 }
6325 #endif
6326 } else if (entry->addr.proto == 0) {
6327 /* L2 mdb */
6328 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
6329 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast");
6330 return -EINVAL;
6331 }
6332 } else {
6333 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6334 return -EINVAL;
6335 }
6336
6337 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6338 NL_SET_ERR_MSG(extack, "Unknown entry state");
6339 return -EINVAL;
6340 }
6341 if (entry->vid >= VLAN_VID_MASK) {
6342 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6343 return -EINVAL;
6344 }
6345
6346 return 0;
6347 }
6348
6349 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
6350 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
6351 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6352 rtnl_validate_mdb_entry,
6353 sizeof(struct br_mdb_entry)),
6354 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6355 };
6356
rtnl_mdb_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6357 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
6358 struct netlink_ext_ack *extack)
6359 {
6360 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6361 struct net *net = sock_net(skb->sk);
6362 struct br_port_msg *bpm;
6363 struct net_device *dev;
6364 int err;
6365
6366 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6367 MDBA_SET_ENTRY_MAX, mdba_policy, extack);
6368 if (err)
6369 return err;
6370
6371 bpm = nlmsg_data(nlh);
6372 if (!bpm->ifindex) {
6373 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6374 return -EINVAL;
6375 }
6376
6377 dev = __dev_get_by_index(net, bpm->ifindex);
6378 if (!dev) {
6379 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6380 return -ENODEV;
6381 }
6382
6383 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6384 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6385 return -EINVAL;
6386 }
6387
6388 if (!dev->netdev_ops->ndo_mdb_add) {
6389 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6390 return -EOPNOTSUPP;
6391 }
6392
6393 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
6394 }
6395
rtnl_validate_mdb_entry_del_bulk(const struct nlattr * attr,struct netlink_ext_ack * extack)6396 static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr,
6397 struct netlink_ext_ack *extack)
6398 {
6399 struct br_mdb_entry *entry = nla_data(attr);
6400 struct br_mdb_entry zero_entry = {};
6401
6402 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6403 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6404 return -EINVAL;
6405 }
6406
6407 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6408 NL_SET_ERR_MSG(extack, "Unknown entry state");
6409 return -EINVAL;
6410 }
6411
6412 if (entry->flags) {
6413 NL_SET_ERR_MSG(extack, "Entry flags cannot be set");
6414 return -EINVAL;
6415 }
6416
6417 if (entry->vid >= VLAN_N_VID - 1) {
6418 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6419 return -EINVAL;
6420 }
6421
6422 if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) {
6423 NL_SET_ERR_MSG(extack, "Entry address cannot be set");
6424 return -EINVAL;
6425 }
6426
6427 return 0;
6428 }
6429
6430 static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = {
6431 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6432 rtnl_validate_mdb_entry_del_bulk,
6433 sizeof(struct br_mdb_entry)),
6434 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6435 };
6436
rtnl_mdb_del(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6437 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
6438 struct netlink_ext_ack *extack)
6439 {
6440 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
6441 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6442 struct net *net = sock_net(skb->sk);
6443 struct br_port_msg *bpm;
6444 struct net_device *dev;
6445 int err;
6446
6447 if (!del_bulk)
6448 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6449 MDBA_SET_ENTRY_MAX, mdba_policy,
6450 extack);
6451 else
6452 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX,
6453 mdba_del_bulk_policy, extack);
6454 if (err)
6455 return err;
6456
6457 bpm = nlmsg_data(nlh);
6458 if (!bpm->ifindex) {
6459 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6460 return -EINVAL;
6461 }
6462
6463 dev = __dev_get_by_index(net, bpm->ifindex);
6464 if (!dev) {
6465 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6466 return -ENODEV;
6467 }
6468
6469 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6470 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6471 return -EINVAL;
6472 }
6473
6474 if (del_bulk) {
6475 if (!dev->netdev_ops->ndo_mdb_del_bulk) {
6476 NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion");
6477 return -EOPNOTSUPP;
6478 }
6479 return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack);
6480 }
6481
6482 if (!dev->netdev_ops->ndo_mdb_del) {
6483 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6484 return -EOPNOTSUPP;
6485 }
6486
6487 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack);
6488 }
6489
6490 /* Process one rtnetlink message. */
6491
rtnl_dumpit(struct sk_buff * skb,struct netlink_callback * cb)6492 static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
6493 {
6494 const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED);
6495 rtnl_dumpit_func dumpit = cb->data;
6496 int err;
6497
6498 /* Previous iteration have already finished, avoid calling->dumpit()
6499 * again, it may not expect to be called after it reached the end.
6500 */
6501 if (!dumpit)
6502 return 0;
6503
6504 if (needs_lock)
6505 rtnl_lock();
6506 err = dumpit(skb, cb);
6507 if (needs_lock)
6508 rtnl_unlock();
6509
6510 /* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
6511 * Some applications which parse netlink manually depend on this.
6512 */
6513 if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
6514 if (err < 0 && err != -EMSGSIZE)
6515 return err;
6516 if (!err)
6517 cb->data = NULL;
6518
6519 return skb->len;
6520 }
6521 return err;
6522 }
6523
rtnetlink_dump_start(struct sock * ssk,struct sk_buff * skb,const struct nlmsghdr * nlh,struct netlink_dump_control * control)6524 static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
6525 const struct nlmsghdr *nlh,
6526 struct netlink_dump_control *control)
6527 {
6528 if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE ||
6529 !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) {
6530 WARN_ON(control->data);
6531 control->data = control->dump;
6532 control->dump = rtnl_dumpit;
6533 }
6534
6535 return netlink_dump_start(ssk, skb, nlh, control);
6536 }
6537
rtnetlink_rcv_msg(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6538 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6539 struct netlink_ext_ack *extack)
6540 {
6541 struct net *net = sock_net(skb->sk);
6542 struct rtnl_link *link;
6543 enum rtnl_kinds kind;
6544 struct module *owner;
6545 int err = -EOPNOTSUPP;
6546 rtnl_doit_func doit;
6547 unsigned int flags;
6548 int family;
6549 int type;
6550
6551 type = nlh->nlmsg_type;
6552 if (type > RTM_MAX)
6553 return -EOPNOTSUPP;
6554
6555 type -= RTM_BASE;
6556
6557 /* All the messages must have at least 1 byte length */
6558 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6559 return 0;
6560
6561 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6562 kind = rtnl_msgtype_kind(type);
6563
6564 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6565 return -EPERM;
6566
6567 rcu_read_lock();
6568 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6569 struct sock *rtnl;
6570 rtnl_dumpit_func dumpit;
6571 u32 min_dump_alloc = 0;
6572
6573 link = rtnl_get_link(family, type);
6574 if (!link || !link->dumpit) {
6575 family = PF_UNSPEC;
6576 link = rtnl_get_link(family, type);
6577 if (!link || !link->dumpit)
6578 goto err_unlock;
6579 }
6580 owner = link->owner;
6581 dumpit = link->dumpit;
6582 flags = link->flags;
6583
6584 if (type == RTM_GETLINK - RTM_BASE)
6585 min_dump_alloc = rtnl_calcit(skb, nlh);
6586
6587 err = 0;
6588 /* need to do this before rcu_read_unlock() */
6589 if (!try_module_get(owner))
6590 err = -EPROTONOSUPPORT;
6591
6592 rcu_read_unlock();
6593
6594 rtnl = net->rtnl;
6595 if (err == 0) {
6596 struct netlink_dump_control c = {
6597 .dump = dumpit,
6598 .min_dump_alloc = min_dump_alloc,
6599 .module = owner,
6600 .flags = flags,
6601 };
6602 err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
6603 /* netlink_dump_start() will keep a reference on
6604 * module if dump is still in progress.
6605 */
6606 module_put(owner);
6607 }
6608 return err;
6609 }
6610
6611 link = rtnl_get_link(family, type);
6612 if (!link || !link->doit) {
6613 family = PF_UNSPEC;
6614 link = rtnl_get_link(PF_UNSPEC, type);
6615 if (!link || !link->doit)
6616 goto out_unlock;
6617 }
6618
6619 owner = link->owner;
6620 if (!try_module_get(owner)) {
6621 err = -EPROTONOSUPPORT;
6622 goto out_unlock;
6623 }
6624
6625 flags = link->flags;
6626 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6627 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6628 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6629 module_put(owner);
6630 goto err_unlock;
6631 }
6632
6633 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6634 doit = link->doit;
6635 rcu_read_unlock();
6636 if (doit)
6637 err = doit(skb, nlh, extack);
6638 module_put(owner);
6639 return err;
6640 }
6641 rcu_read_unlock();
6642
6643 rtnl_lock();
6644 link = rtnl_get_link(family, type);
6645 if (link && link->doit)
6646 err = link->doit(skb, nlh, extack);
6647 rtnl_unlock();
6648
6649 module_put(owner);
6650
6651 return err;
6652
6653 out_unlock:
6654 rcu_read_unlock();
6655 return err;
6656
6657 err_unlock:
6658 rcu_read_unlock();
6659 return -EOPNOTSUPP;
6660 }
6661
rtnetlink_rcv(struct sk_buff * skb)6662 static void rtnetlink_rcv(struct sk_buff *skb)
6663 {
6664 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6665 }
6666
rtnetlink_bind(struct net * net,int group)6667 static int rtnetlink_bind(struct net *net, int group)
6668 {
6669 switch (group) {
6670 case RTNLGRP_IPV4_MROUTE_R:
6671 case RTNLGRP_IPV6_MROUTE_R:
6672 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6673 return -EPERM;
6674 break;
6675 }
6676 return 0;
6677 }
6678
rtnetlink_event(struct notifier_block * this,unsigned long event,void * ptr)6679 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6680 {
6681 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6682
6683 switch (event) {
6684 case NETDEV_REBOOT:
6685 case NETDEV_CHANGEMTU:
6686 case NETDEV_CHANGEADDR:
6687 case NETDEV_CHANGENAME:
6688 case NETDEV_FEAT_CHANGE:
6689 case NETDEV_BONDING_FAILOVER:
6690 case NETDEV_POST_TYPE_CHANGE:
6691 case NETDEV_NOTIFY_PEERS:
6692 case NETDEV_CHANGEUPPER:
6693 case NETDEV_RESEND_IGMP:
6694 case NETDEV_CHANGEINFODATA:
6695 case NETDEV_CHANGELOWERSTATE:
6696 case NETDEV_CHANGE_TX_QUEUE_LEN:
6697 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6698 GFP_KERNEL, NULL, 0, 0, NULL);
6699 break;
6700 default:
6701 break;
6702 }
6703 return NOTIFY_DONE;
6704 }
6705
6706 static struct notifier_block rtnetlink_dev_notifier = {
6707 .notifier_call = rtnetlink_event,
6708 };
6709
6710
rtnetlink_net_init(struct net * net)6711 static int __net_init rtnetlink_net_init(struct net *net)
6712 {
6713 struct sock *sk;
6714 struct netlink_kernel_cfg cfg = {
6715 .groups = RTNLGRP_MAX,
6716 .input = rtnetlink_rcv,
6717 .flags = NL_CFG_F_NONROOT_RECV,
6718 .bind = rtnetlink_bind,
6719 };
6720
6721 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6722 if (!sk)
6723 return -ENOMEM;
6724 net->rtnl = sk;
6725 return 0;
6726 }
6727
rtnetlink_net_exit(struct net * net)6728 static void __net_exit rtnetlink_net_exit(struct net *net)
6729 {
6730 netlink_kernel_release(net->rtnl);
6731 net->rtnl = NULL;
6732 }
6733
6734 static struct pernet_operations rtnetlink_net_ops = {
6735 .init = rtnetlink_net_init,
6736 .exit = rtnetlink_net_exit,
6737 };
6738
rtnetlink_init(void)6739 void __init rtnetlink_init(void)
6740 {
6741 if (register_pernet_subsys(&rtnetlink_net_ops))
6742 panic("rtnetlink_init: cannot initialize rtnetlink\n");
6743
6744 register_netdevice_notifier(&rtnetlink_dev_notifier);
6745
6746 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6747 rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
6748 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6749 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6750 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6751
6752 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6753 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6754 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6755
6756 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6757 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6758
6759 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6760 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6761 RTNL_FLAG_BULK_DEL_SUPPORTED);
6762 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6763
6764 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6765 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6766 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6767
6768 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6769 0);
6770 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
6771
6772 rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0);
6773 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
6774 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL,
6775 RTNL_FLAG_BULK_DEL_SUPPORTED);
6776 }
6777