1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 #include <net/devlink.h> 57 #if IS_ENABLED(CONFIG_IPV6) 58 #include <net/addrconf.h> 59 #endif 60 #include <linux/dpll.h> 61 62 #include "dev.h" 63 64 #define RTNL_MAX_TYPE 50 65 #define RTNL_SLAVE_MAX_TYPE 44 66 67 struct rtnl_link { 68 rtnl_doit_func doit; 69 rtnl_dumpit_func dumpit; 70 struct module *owner; 71 unsigned int flags; 72 struct rcu_head rcu; 73 }; 74 75 static DEFINE_MUTEX(rtnl_mutex); 76 77 void rtnl_lock(void) 78 { 79 mutex_lock(&rtnl_mutex); 80 } 81 EXPORT_SYMBOL(rtnl_lock); 82 83 int rtnl_lock_killable(void) 84 { 85 return mutex_lock_killable(&rtnl_mutex); 86 } 87 EXPORT_SYMBOL(rtnl_lock_killable); 88 89 static struct sk_buff *defer_kfree_skb_list; 90 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 91 { 92 if (head && tail) { 93 tail->next = defer_kfree_skb_list; 94 defer_kfree_skb_list = head; 95 } 96 } 97 EXPORT_SYMBOL(rtnl_kfree_skbs); 98 99 void __rtnl_unlock(void) 100 { 101 struct sk_buff *head = defer_kfree_skb_list; 102 103 defer_kfree_skb_list = NULL; 104 105 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() 106 * is used. In some places, e.g. in cfg80211, we have code that will do 107 * something like 108 * rtnl_lock() 109 * wiphy_lock() 110 * ... 111 * rtnl_unlock() 112 * 113 * and because netdev_run_todo() acquires the RTNL for items on the list 114 * we could cause a situation such as this: 115 * Thread 1 Thread 2 116 * rtnl_lock() 117 * unregister_netdevice() 118 * __rtnl_unlock() 119 * rtnl_lock() 120 * wiphy_lock() 121 * rtnl_unlock() 122 * netdev_run_todo() 123 * __rtnl_unlock() 124 * 125 * // list not empty now 126 * // because of thread 2 127 * rtnl_lock() 128 * while (!list_empty(...)) 129 * rtnl_lock() 130 * wiphy_lock() 131 * **** DEADLOCK **** 132 * 133 * However, usage of __rtnl_unlock() is rare, and so we can ensure that 134 * it's not used in cases where something is added to do the list. 135 */ 136 WARN_ON(!list_empty(&net_todo_list)); 137 138 mutex_unlock(&rtnl_mutex); 139 140 while (head) { 141 struct sk_buff *next = head->next; 142 143 kfree_skb(head); 144 cond_resched(); 145 head = next; 146 } 147 } 148 149 void rtnl_unlock(void) 150 { 151 /* This fellow will unlock it for us. */ 152 netdev_run_todo(); 153 } 154 EXPORT_SYMBOL(rtnl_unlock); 155 156 int rtnl_trylock(void) 157 { 158 return mutex_trylock(&rtnl_mutex); 159 } 160 EXPORT_SYMBOL(rtnl_trylock); 161 162 int rtnl_is_locked(void) 163 { 164 return mutex_is_locked(&rtnl_mutex); 165 } 166 EXPORT_SYMBOL(rtnl_is_locked); 167 168 bool refcount_dec_and_rtnl_lock(refcount_t *r) 169 { 170 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 171 } 172 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 173 174 #ifdef CONFIG_PROVE_LOCKING 175 bool lockdep_rtnl_is_held(void) 176 { 177 return lockdep_is_held(&rtnl_mutex); 178 } 179 EXPORT_SYMBOL(lockdep_rtnl_is_held); 180 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 181 182 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 183 184 static inline int rtm_msgindex(int msgtype) 185 { 186 int msgindex = msgtype - RTM_BASE; 187 188 /* 189 * msgindex < 0 implies someone tried to register a netlink 190 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 191 * the message type has not been added to linux/rtnetlink.h 192 */ 193 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 194 195 return msgindex; 196 } 197 198 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 199 { 200 struct rtnl_link __rcu **tab; 201 202 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 203 protocol = PF_UNSPEC; 204 205 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 206 if (!tab) 207 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 208 209 return rcu_dereference_rtnl(tab[msgtype]); 210 } 211 212 static int rtnl_register_internal(struct module *owner, 213 int protocol, int msgtype, 214 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 215 unsigned int flags) 216 { 217 struct rtnl_link *link, *old; 218 struct rtnl_link __rcu **tab; 219 int msgindex; 220 int ret = -ENOBUFS; 221 222 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 223 msgindex = rtm_msgindex(msgtype); 224 225 rtnl_lock(); 226 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 227 if (tab == NULL) { 228 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 229 if (!tab) 230 goto unlock; 231 232 /* ensures we see the 0 stores */ 233 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 234 } 235 236 old = rtnl_dereference(tab[msgindex]); 237 if (old) { 238 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 239 if (!link) 240 goto unlock; 241 } else { 242 link = kzalloc(sizeof(*link), GFP_KERNEL); 243 if (!link) 244 goto unlock; 245 } 246 247 WARN_ON(link->owner && link->owner != owner); 248 link->owner = owner; 249 250 WARN_ON(doit && link->doit && link->doit != doit); 251 if (doit) 252 link->doit = doit; 253 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 254 if (dumpit) 255 link->dumpit = dumpit; 256 257 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && 258 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); 259 link->flags |= flags; 260 261 /* publish protocol:msgtype */ 262 rcu_assign_pointer(tab[msgindex], link); 263 ret = 0; 264 if (old) 265 kfree_rcu(old, rcu); 266 unlock: 267 rtnl_unlock(); 268 return ret; 269 } 270 271 /** 272 * rtnl_register_module - Register a rtnetlink message type 273 * 274 * @owner: module registering the hook (THIS_MODULE) 275 * @protocol: Protocol family or PF_UNSPEC 276 * @msgtype: rtnetlink message type 277 * @doit: Function pointer called for each request message 278 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 279 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 280 * 281 * Like rtnl_register, but for use by removable modules. 282 */ 283 int rtnl_register_module(struct module *owner, 284 int protocol, int msgtype, 285 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 286 unsigned int flags) 287 { 288 return rtnl_register_internal(owner, protocol, msgtype, 289 doit, dumpit, flags); 290 } 291 EXPORT_SYMBOL_GPL(rtnl_register_module); 292 293 /** 294 * rtnl_register - Register a rtnetlink message type 295 * @protocol: Protocol family or PF_UNSPEC 296 * @msgtype: rtnetlink message type 297 * @doit: Function pointer called for each request message 298 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 299 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 300 * 301 * Registers the specified function pointers (at least one of them has 302 * to be non-NULL) to be called whenever a request message for the 303 * specified protocol family and message type is received. 304 * 305 * The special protocol family PF_UNSPEC may be used to define fallback 306 * function pointers for the case when no entry for the specific protocol 307 * family exists. 308 */ 309 void rtnl_register(int protocol, int msgtype, 310 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 311 unsigned int flags) 312 { 313 int err; 314 315 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 316 flags); 317 if (err) 318 pr_err("Unable to register rtnetlink message handler, " 319 "protocol = %d, message type = %d\n", protocol, msgtype); 320 } 321 322 /** 323 * rtnl_unregister - Unregister a rtnetlink message type 324 * @protocol: Protocol family or PF_UNSPEC 325 * @msgtype: rtnetlink message type 326 * 327 * Returns 0 on success or a negative error code. 328 */ 329 int rtnl_unregister(int protocol, int msgtype) 330 { 331 struct rtnl_link __rcu **tab; 332 struct rtnl_link *link; 333 int msgindex; 334 335 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 336 msgindex = rtm_msgindex(msgtype); 337 338 rtnl_lock(); 339 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 340 if (!tab) { 341 rtnl_unlock(); 342 return -ENOENT; 343 } 344 345 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); 346 rtnl_unlock(); 347 348 kfree_rcu(link, rcu); 349 350 return 0; 351 } 352 EXPORT_SYMBOL_GPL(rtnl_unregister); 353 354 /** 355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 356 * @protocol : Protocol family or PF_UNSPEC 357 * 358 * Identical to calling rtnl_unregster() for all registered message types 359 * of a certain protocol family. 360 */ 361 void rtnl_unregister_all(int protocol) 362 { 363 struct rtnl_link __rcu **tab; 364 struct rtnl_link *link; 365 int msgindex; 366 367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 368 369 rtnl_lock(); 370 tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL); 371 if (!tab) { 372 rtnl_unlock(); 373 return; 374 } 375 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 376 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); 377 kfree_rcu(link, rcu); 378 } 379 rtnl_unlock(); 380 381 synchronize_net(); 382 383 kfree(tab); 384 } 385 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 386 387 static LIST_HEAD(link_ops); 388 389 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 390 { 391 const struct rtnl_link_ops *ops; 392 393 list_for_each_entry(ops, &link_ops, list) { 394 if (!strcmp(ops->kind, kind)) 395 return ops; 396 } 397 return NULL; 398 } 399 400 /** 401 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 402 * @ops: struct rtnl_link_ops * to register 403 * 404 * The caller must hold the rtnl_mutex. This function should be used 405 * by drivers that create devices during module initialization. It 406 * must be called before registering the devices. 407 * 408 * Returns 0 on success or a negative error code. 409 */ 410 int __rtnl_link_register(struct rtnl_link_ops *ops) 411 { 412 if (rtnl_link_ops_get(ops->kind)) 413 return -EEXIST; 414 415 /* The check for alloc/setup is here because if ops 416 * does not have that filled up, it is not possible 417 * to use the ops for creating device. So do not 418 * fill up dellink as well. That disables rtnl_dellink. 419 */ 420 if ((ops->alloc || ops->setup) && !ops->dellink) 421 ops->dellink = unregister_netdevice_queue; 422 423 list_add_tail(&ops->list, &link_ops); 424 return 0; 425 } 426 EXPORT_SYMBOL_GPL(__rtnl_link_register); 427 428 /** 429 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 430 * @ops: struct rtnl_link_ops * to register 431 * 432 * Returns 0 on success or a negative error code. 433 */ 434 int rtnl_link_register(struct rtnl_link_ops *ops) 435 { 436 int err; 437 438 /* Sanity-check max sizes to avoid stack buffer overflow. */ 439 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 440 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 441 return -EINVAL; 442 443 rtnl_lock(); 444 err = __rtnl_link_register(ops); 445 rtnl_unlock(); 446 return err; 447 } 448 EXPORT_SYMBOL_GPL(rtnl_link_register); 449 450 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 451 { 452 struct net_device *dev; 453 LIST_HEAD(list_kill); 454 455 for_each_netdev(net, dev) { 456 if (dev->rtnl_link_ops == ops) 457 ops->dellink(dev, &list_kill); 458 } 459 unregister_netdevice_many(&list_kill); 460 } 461 462 /** 463 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 464 * @ops: struct rtnl_link_ops * to unregister 465 * 466 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 467 * integrity (hold pernet_ops_rwsem for writing to close the race 468 * with setup_net() and cleanup_net()). 469 */ 470 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 471 { 472 struct net *net; 473 474 for_each_net(net) { 475 __rtnl_kill_links(net, ops); 476 } 477 list_del(&ops->list); 478 } 479 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 480 481 /* Return with the rtnl_lock held when there are no network 482 * devices unregistering in any network namespace. 483 */ 484 static void rtnl_lock_unregistering_all(void) 485 { 486 DEFINE_WAIT_FUNC(wait, woken_wake_function); 487 488 add_wait_queue(&netdev_unregistering_wq, &wait); 489 for (;;) { 490 rtnl_lock(); 491 /* We held write locked pernet_ops_rwsem, and parallel 492 * setup_net() and cleanup_net() are not possible. 493 */ 494 if (!atomic_read(&dev_unreg_count)) 495 break; 496 __rtnl_unlock(); 497 498 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 499 } 500 remove_wait_queue(&netdev_unregistering_wq, &wait); 501 } 502 503 /** 504 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 505 * @ops: struct rtnl_link_ops * to unregister 506 */ 507 void rtnl_link_unregister(struct rtnl_link_ops *ops) 508 { 509 /* Close the race with setup_net() and cleanup_net() */ 510 down_write(&pernet_ops_rwsem); 511 rtnl_lock_unregistering_all(); 512 __rtnl_link_unregister(ops); 513 rtnl_unlock(); 514 up_write(&pernet_ops_rwsem); 515 } 516 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 517 518 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 519 { 520 struct net_device *master_dev; 521 const struct rtnl_link_ops *ops; 522 size_t size = 0; 523 524 rcu_read_lock(); 525 526 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 527 if (!master_dev) 528 goto out; 529 530 ops = master_dev->rtnl_link_ops; 531 if (!ops || !ops->get_slave_size) 532 goto out; 533 /* IFLA_INFO_SLAVE_DATA + nested data */ 534 size = nla_total_size(sizeof(struct nlattr)) + 535 ops->get_slave_size(master_dev, dev); 536 537 out: 538 rcu_read_unlock(); 539 return size; 540 } 541 542 static size_t rtnl_link_get_size(const struct net_device *dev) 543 { 544 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 545 size_t size; 546 547 if (!ops) 548 return 0; 549 550 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 551 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 552 553 if (ops->get_size) 554 /* IFLA_INFO_DATA + nested data */ 555 size += nla_total_size(sizeof(struct nlattr)) + 556 ops->get_size(dev); 557 558 if (ops->get_xstats_size) 559 /* IFLA_INFO_XSTATS */ 560 size += nla_total_size(ops->get_xstats_size(dev)); 561 562 size += rtnl_link_get_slave_info_data_size(dev); 563 564 return size; 565 } 566 567 static LIST_HEAD(rtnl_af_ops); 568 569 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 570 { 571 const struct rtnl_af_ops *ops; 572 573 ASSERT_RTNL(); 574 575 list_for_each_entry(ops, &rtnl_af_ops, list) { 576 if (ops->family == family) 577 return ops; 578 } 579 580 return NULL; 581 } 582 583 /** 584 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 585 * @ops: struct rtnl_af_ops * to register 586 * 587 * Returns 0 on success or a negative error code. 588 */ 589 void rtnl_af_register(struct rtnl_af_ops *ops) 590 { 591 rtnl_lock(); 592 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 593 rtnl_unlock(); 594 } 595 EXPORT_SYMBOL_GPL(rtnl_af_register); 596 597 /** 598 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 599 * @ops: struct rtnl_af_ops * to unregister 600 */ 601 void rtnl_af_unregister(struct rtnl_af_ops *ops) 602 { 603 rtnl_lock(); 604 list_del_rcu(&ops->list); 605 rtnl_unlock(); 606 607 synchronize_rcu(); 608 } 609 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 610 611 static size_t rtnl_link_get_af_size(const struct net_device *dev, 612 u32 ext_filter_mask) 613 { 614 struct rtnl_af_ops *af_ops; 615 size_t size; 616 617 /* IFLA_AF_SPEC */ 618 size = nla_total_size(sizeof(struct nlattr)); 619 620 rcu_read_lock(); 621 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 622 if (af_ops->get_link_af_size) { 623 /* AF_* + nested data */ 624 size += nla_total_size(sizeof(struct nlattr)) + 625 af_ops->get_link_af_size(dev, ext_filter_mask); 626 } 627 } 628 rcu_read_unlock(); 629 630 return size; 631 } 632 633 static bool rtnl_have_link_slave_info(const struct net_device *dev) 634 { 635 struct net_device *master_dev; 636 bool ret = false; 637 638 rcu_read_lock(); 639 640 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 641 if (master_dev && master_dev->rtnl_link_ops) 642 ret = true; 643 rcu_read_unlock(); 644 return ret; 645 } 646 647 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 648 const struct net_device *dev) 649 { 650 struct net_device *master_dev; 651 const struct rtnl_link_ops *ops; 652 struct nlattr *slave_data; 653 int err; 654 655 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 656 if (!master_dev) 657 return 0; 658 ops = master_dev->rtnl_link_ops; 659 if (!ops) 660 return 0; 661 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 662 return -EMSGSIZE; 663 if (ops->fill_slave_info) { 664 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 665 if (!slave_data) 666 return -EMSGSIZE; 667 err = ops->fill_slave_info(skb, master_dev, dev); 668 if (err < 0) 669 goto err_cancel_slave_data; 670 nla_nest_end(skb, slave_data); 671 } 672 return 0; 673 674 err_cancel_slave_data: 675 nla_nest_cancel(skb, slave_data); 676 return err; 677 } 678 679 static int rtnl_link_info_fill(struct sk_buff *skb, 680 const struct net_device *dev) 681 { 682 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 683 struct nlattr *data; 684 int err; 685 686 if (!ops) 687 return 0; 688 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 689 return -EMSGSIZE; 690 if (ops->fill_xstats) { 691 err = ops->fill_xstats(skb, dev); 692 if (err < 0) 693 return err; 694 } 695 if (ops->fill_info) { 696 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 697 if (data == NULL) 698 return -EMSGSIZE; 699 err = ops->fill_info(skb, dev); 700 if (err < 0) 701 goto err_cancel_data; 702 nla_nest_end(skb, data); 703 } 704 return 0; 705 706 err_cancel_data: 707 nla_nest_cancel(skb, data); 708 return err; 709 } 710 711 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 712 { 713 struct nlattr *linkinfo; 714 int err = -EMSGSIZE; 715 716 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 717 if (linkinfo == NULL) 718 goto out; 719 720 err = rtnl_link_info_fill(skb, dev); 721 if (err < 0) 722 goto err_cancel_link; 723 724 err = rtnl_link_slave_info_fill(skb, dev); 725 if (err < 0) 726 goto err_cancel_link; 727 728 nla_nest_end(skb, linkinfo); 729 return 0; 730 731 err_cancel_link: 732 nla_nest_cancel(skb, linkinfo); 733 out: 734 return err; 735 } 736 737 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 738 { 739 struct sock *rtnl = net->rtnl; 740 741 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); 742 } 743 744 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 745 { 746 struct sock *rtnl = net->rtnl; 747 748 return nlmsg_unicast(rtnl, skb, pid); 749 } 750 EXPORT_SYMBOL(rtnl_unicast); 751 752 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 753 const struct nlmsghdr *nlh, gfp_t flags) 754 { 755 struct sock *rtnl = net->rtnl; 756 757 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); 758 } 759 EXPORT_SYMBOL(rtnl_notify); 760 761 void rtnl_set_sk_err(struct net *net, u32 group, int error) 762 { 763 struct sock *rtnl = net->rtnl; 764 765 netlink_set_err(rtnl, 0, group, error); 766 } 767 EXPORT_SYMBOL(rtnl_set_sk_err); 768 769 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 770 { 771 struct nlattr *mx; 772 int i, valid = 0; 773 774 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 775 if (metrics == dst_default_metrics.metrics) 776 return 0; 777 778 mx = nla_nest_start_noflag(skb, RTA_METRICS); 779 if (mx == NULL) 780 return -ENOBUFS; 781 782 for (i = 0; i < RTAX_MAX; i++) { 783 if (metrics[i]) { 784 if (i == RTAX_CC_ALGO - 1) { 785 char tmp[TCP_CA_NAME_MAX], *name; 786 787 name = tcp_ca_get_name_by_key(metrics[i], tmp); 788 if (!name) 789 continue; 790 if (nla_put_string(skb, i + 1, name)) 791 goto nla_put_failure; 792 } else if (i == RTAX_FEATURES - 1) { 793 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 794 795 if (!user_features) 796 continue; 797 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 798 if (nla_put_u32(skb, i + 1, user_features)) 799 goto nla_put_failure; 800 } else { 801 if (nla_put_u32(skb, i + 1, metrics[i])) 802 goto nla_put_failure; 803 } 804 valid++; 805 } 806 } 807 808 if (!valid) { 809 nla_nest_cancel(skb, mx); 810 return 0; 811 } 812 813 return nla_nest_end(skb, mx); 814 815 nla_put_failure: 816 nla_nest_cancel(skb, mx); 817 return -EMSGSIZE; 818 } 819 EXPORT_SYMBOL(rtnetlink_put_metrics); 820 821 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 822 long expires, u32 error) 823 { 824 struct rta_cacheinfo ci = { 825 .rta_error = error, 826 .rta_id = id, 827 }; 828 829 if (dst) { 830 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 831 ci.rta_used = dst->__use; 832 ci.rta_clntref = rcuref_read(&dst->__rcuref); 833 } 834 if (expires) { 835 unsigned long clock; 836 837 clock = jiffies_to_clock_t(abs(expires)); 838 clock = min_t(unsigned long, clock, INT_MAX); 839 ci.rta_expires = (expires > 0) ? clock : -clock; 840 } 841 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 842 } 843 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 844 845 void netdev_set_operstate(struct net_device *dev, int newstate) 846 { 847 unsigned int old = READ_ONCE(dev->operstate); 848 849 do { 850 if (old == newstate) 851 return; 852 } while (!try_cmpxchg(&dev->operstate, &old, newstate)); 853 854 netdev_state_change(dev); 855 } 856 EXPORT_SYMBOL(netdev_set_operstate); 857 858 static void set_operstate(struct net_device *dev, unsigned char transition) 859 { 860 unsigned char operstate = READ_ONCE(dev->operstate); 861 862 switch (transition) { 863 case IF_OPER_UP: 864 if ((operstate == IF_OPER_DORMANT || 865 operstate == IF_OPER_TESTING || 866 operstate == IF_OPER_UNKNOWN) && 867 !netif_dormant(dev) && !netif_testing(dev)) 868 operstate = IF_OPER_UP; 869 break; 870 871 case IF_OPER_TESTING: 872 if (netif_oper_up(dev)) 873 operstate = IF_OPER_TESTING; 874 break; 875 876 case IF_OPER_DORMANT: 877 if (netif_oper_up(dev)) 878 operstate = IF_OPER_DORMANT; 879 break; 880 } 881 882 netdev_set_operstate(dev, operstate); 883 } 884 885 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 886 { 887 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 888 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 889 } 890 891 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 892 const struct ifinfomsg *ifm) 893 { 894 unsigned int flags = ifm->ifi_flags; 895 896 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 897 if (ifm->ifi_change) 898 flags = (flags & ifm->ifi_change) | 899 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 900 901 return flags; 902 } 903 904 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 905 const struct rtnl_link_stats64 *b) 906 { 907 a->rx_packets = b->rx_packets; 908 a->tx_packets = b->tx_packets; 909 a->rx_bytes = b->rx_bytes; 910 a->tx_bytes = b->tx_bytes; 911 a->rx_errors = b->rx_errors; 912 a->tx_errors = b->tx_errors; 913 a->rx_dropped = b->rx_dropped; 914 a->tx_dropped = b->tx_dropped; 915 916 a->multicast = b->multicast; 917 a->collisions = b->collisions; 918 919 a->rx_length_errors = b->rx_length_errors; 920 a->rx_over_errors = b->rx_over_errors; 921 a->rx_crc_errors = b->rx_crc_errors; 922 a->rx_frame_errors = b->rx_frame_errors; 923 a->rx_fifo_errors = b->rx_fifo_errors; 924 a->rx_missed_errors = b->rx_missed_errors; 925 926 a->tx_aborted_errors = b->tx_aborted_errors; 927 a->tx_carrier_errors = b->tx_carrier_errors; 928 a->tx_fifo_errors = b->tx_fifo_errors; 929 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 930 a->tx_window_errors = b->tx_window_errors; 931 932 a->rx_compressed = b->rx_compressed; 933 a->tx_compressed = b->tx_compressed; 934 935 a->rx_nohandler = b->rx_nohandler; 936 } 937 938 /* All VF info */ 939 static inline int rtnl_vfinfo_size(const struct net_device *dev, 940 u32 ext_filter_mask) 941 { 942 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 943 int num_vfs = dev_num_vf(dev->dev.parent); 944 size_t size = nla_total_size(0); 945 size += num_vfs * 946 (nla_total_size(0) + 947 nla_total_size(sizeof(struct ifla_vf_mac)) + 948 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 949 nla_total_size(sizeof(struct ifla_vf_vlan)) + 950 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 951 nla_total_size(MAX_VLAN_LIST_LEN * 952 sizeof(struct ifla_vf_vlan_info)) + 953 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 954 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 955 nla_total_size(sizeof(struct ifla_vf_rate)) + 956 nla_total_size(sizeof(struct ifla_vf_link_state)) + 957 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 958 nla_total_size(sizeof(struct ifla_vf_trust))); 959 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 960 size += num_vfs * 961 (nla_total_size(0) + /* nest IFLA_VF_STATS */ 962 /* IFLA_VF_STATS_RX_PACKETS */ 963 nla_total_size_64bit(sizeof(__u64)) + 964 /* IFLA_VF_STATS_TX_PACKETS */ 965 nla_total_size_64bit(sizeof(__u64)) + 966 /* IFLA_VF_STATS_RX_BYTES */ 967 nla_total_size_64bit(sizeof(__u64)) + 968 /* IFLA_VF_STATS_TX_BYTES */ 969 nla_total_size_64bit(sizeof(__u64)) + 970 /* IFLA_VF_STATS_BROADCAST */ 971 nla_total_size_64bit(sizeof(__u64)) + 972 /* IFLA_VF_STATS_MULTICAST */ 973 nla_total_size_64bit(sizeof(__u64)) + 974 /* IFLA_VF_STATS_RX_DROPPED */ 975 nla_total_size_64bit(sizeof(__u64)) + 976 /* IFLA_VF_STATS_TX_DROPPED */ 977 nla_total_size_64bit(sizeof(__u64))); 978 } 979 return size; 980 } else 981 return 0; 982 } 983 984 static size_t rtnl_port_size(const struct net_device *dev, 985 u32 ext_filter_mask) 986 { 987 size_t port_size = nla_total_size(4) /* PORT_VF */ 988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 991 + nla_total_size(1) /* PROT_VDP_REQUEST */ 992 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 995 + port_size; 996 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 997 + port_size; 998 999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1000 !(ext_filter_mask & RTEXT_FILTER_VF)) 1001 return 0; 1002 if (dev_num_vf(dev->dev.parent)) 1003 return port_self_size + vf_ports_size + 1004 vf_port_size * dev_num_vf(dev->dev.parent); 1005 else 1006 return port_self_size; 1007 } 1008 1009 static size_t rtnl_xdp_size(void) 1010 { 1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 1012 nla_total_size(1) + /* XDP_ATTACHED */ 1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 1015 1016 return xdp_size; 1017 } 1018 1019 static size_t rtnl_prop_list_size(const struct net_device *dev) 1020 { 1021 struct netdev_name_node *name_node; 1022 unsigned int cnt = 0; 1023 1024 rcu_read_lock(); 1025 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) 1026 cnt++; 1027 rcu_read_unlock(); 1028 1029 if (!cnt) 1030 return 0; 1031 1032 return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ); 1033 } 1034 1035 static size_t rtnl_proto_down_size(const struct net_device *dev) 1036 { 1037 size_t size = nla_total_size(1); 1038 1039 /* Assume dev->proto_down_reason is not zero. */ 1040 size += nla_total_size(0) + nla_total_size(4); 1041 1042 return size; 1043 } 1044 1045 static size_t rtnl_devlink_port_size(const struct net_device *dev) 1046 { 1047 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */ 1048 1049 if (dev->devlink_port) 1050 size += devlink_nl_port_handle_size(dev->devlink_port); 1051 1052 return size; 1053 } 1054 1055 static size_t rtnl_dpll_pin_size(const struct net_device *dev) 1056 { 1057 size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */ 1058 1059 size += dpll_netdev_pin_handle_size(dev); 1060 1061 return size; 1062 } 1063 1064 static noinline size_t if_nlmsg_size(const struct net_device *dev, 1065 u32 ext_filter_mask) 1066 { 1067 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1068 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1069 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1070 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1071 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1072 + nla_total_size(sizeof(struct rtnl_link_stats)) 1073 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1074 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1075 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1076 + nla_total_size(4) /* IFLA_TXQLEN */ 1077 + nla_total_size(4) /* IFLA_WEIGHT */ 1078 + nla_total_size(4) /* IFLA_MTU */ 1079 + nla_total_size(4) /* IFLA_LINK */ 1080 + nla_total_size(4) /* IFLA_MASTER */ 1081 + nla_total_size(1) /* IFLA_CARRIER */ 1082 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1083 + nla_total_size(4) /* IFLA_ALLMULTI */ 1084 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1085 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1086 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1087 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1088 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ 1089 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */ 1090 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */ 1091 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */ 1092 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ 1093 + nla_total_size(1) /* IFLA_OPERSTATE */ 1094 + nla_total_size(1) /* IFLA_LINKMODE */ 1095 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1096 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1097 + nla_total_size(4) /* IFLA_GROUP */ 1098 + nla_total_size(ext_filter_mask 1099 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1100 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1101 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1102 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1103 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1104 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1105 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1106 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1107 + rtnl_xdp_size() /* IFLA_XDP */ 1108 + nla_total_size(4) /* IFLA_EVENT */ 1109 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1110 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1111 + rtnl_proto_down_size(dev) /* proto down */ 1112 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1113 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1114 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1115 + nla_total_size(4) /* IFLA_MIN_MTU */ 1116 + nla_total_size(4) /* IFLA_MAX_MTU */ 1117 + rtnl_prop_list_size(dev) 1118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ 1119 + rtnl_devlink_port_size(dev) 1120 + rtnl_dpll_pin_size(dev) 1121 + nla_total_size(8) /* IFLA_MAX_PACING_OFFLOAD_HORIZON */ 1122 + 0; 1123 } 1124 1125 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1126 { 1127 struct nlattr *vf_ports; 1128 struct nlattr *vf_port; 1129 int vf; 1130 int err; 1131 1132 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1133 if (!vf_ports) 1134 return -EMSGSIZE; 1135 1136 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1137 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1138 if (!vf_port) 1139 goto nla_put_failure; 1140 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1141 goto nla_put_failure; 1142 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1143 if (err == -EMSGSIZE) 1144 goto nla_put_failure; 1145 if (err) { 1146 nla_nest_cancel(skb, vf_port); 1147 continue; 1148 } 1149 nla_nest_end(skb, vf_port); 1150 } 1151 1152 nla_nest_end(skb, vf_ports); 1153 1154 return 0; 1155 1156 nla_put_failure: 1157 nla_nest_cancel(skb, vf_ports); 1158 return -EMSGSIZE; 1159 } 1160 1161 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1162 { 1163 struct nlattr *port_self; 1164 int err; 1165 1166 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1167 if (!port_self) 1168 return -EMSGSIZE; 1169 1170 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1171 if (err) { 1172 nla_nest_cancel(skb, port_self); 1173 return (err == -EMSGSIZE) ? err : 0; 1174 } 1175 1176 nla_nest_end(skb, port_self); 1177 1178 return 0; 1179 } 1180 1181 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1182 u32 ext_filter_mask) 1183 { 1184 int err; 1185 1186 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1187 !(ext_filter_mask & RTEXT_FILTER_VF)) 1188 return 0; 1189 1190 err = rtnl_port_self_fill(skb, dev); 1191 if (err) 1192 return err; 1193 1194 if (dev_num_vf(dev->dev.parent)) { 1195 err = rtnl_vf_ports_fill(skb, dev); 1196 if (err) 1197 return err; 1198 } 1199 1200 return 0; 1201 } 1202 1203 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1204 { 1205 int err; 1206 struct netdev_phys_item_id ppid; 1207 1208 err = dev_get_phys_port_id(dev, &ppid); 1209 if (err) { 1210 if (err == -EOPNOTSUPP) 1211 return 0; 1212 return err; 1213 } 1214 1215 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1216 return -EMSGSIZE; 1217 1218 return 0; 1219 } 1220 1221 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1222 { 1223 char name[IFNAMSIZ]; 1224 int err; 1225 1226 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1227 if (err) { 1228 if (err == -EOPNOTSUPP) 1229 return 0; 1230 return err; 1231 } 1232 1233 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1234 return -EMSGSIZE; 1235 1236 return 0; 1237 } 1238 1239 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1240 { 1241 struct netdev_phys_item_id ppid = { }; 1242 int err; 1243 1244 err = dev_get_port_parent_id(dev, &ppid, false); 1245 if (err) { 1246 if (err == -EOPNOTSUPP) 1247 return 0; 1248 return err; 1249 } 1250 1251 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1252 return -EMSGSIZE; 1253 1254 return 0; 1255 } 1256 1257 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1258 struct net_device *dev) 1259 { 1260 struct rtnl_link_stats64 *sp; 1261 struct nlattr *attr; 1262 1263 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1264 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1265 if (!attr) 1266 return -EMSGSIZE; 1267 1268 sp = nla_data(attr); 1269 dev_get_stats(dev, sp); 1270 1271 attr = nla_reserve(skb, IFLA_STATS, 1272 sizeof(struct rtnl_link_stats)); 1273 if (!attr) 1274 return -EMSGSIZE; 1275 1276 copy_rtnl_link_stats(nla_data(attr), sp); 1277 1278 return 0; 1279 } 1280 1281 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1282 struct net_device *dev, 1283 int vfs_num, 1284 u32 ext_filter_mask) 1285 { 1286 struct ifla_vf_rss_query_en vf_rss_query_en; 1287 struct nlattr *vf, *vfstats, *vfvlanlist; 1288 struct ifla_vf_link_state vf_linkstate; 1289 struct ifla_vf_vlan_info vf_vlan_info; 1290 struct ifla_vf_spoofchk vf_spoofchk; 1291 struct ifla_vf_tx_rate vf_tx_rate; 1292 struct ifla_vf_stats vf_stats; 1293 struct ifla_vf_trust vf_trust; 1294 struct ifla_vf_vlan vf_vlan; 1295 struct ifla_vf_rate vf_rate; 1296 struct ifla_vf_mac vf_mac; 1297 struct ifla_vf_broadcast vf_broadcast; 1298 struct ifla_vf_info ivi; 1299 struct ifla_vf_guid node_guid; 1300 struct ifla_vf_guid port_guid; 1301 1302 memset(&ivi, 0, sizeof(ivi)); 1303 1304 /* Not all SR-IOV capable drivers support the 1305 * spoofcheck and "RSS query enable" query. Preset to 1306 * -1 so the user space tool can detect that the driver 1307 * didn't report anything. 1308 */ 1309 ivi.spoofchk = -1; 1310 ivi.rss_query_en = -1; 1311 ivi.trusted = -1; 1312 /* The default value for VF link state is "auto" 1313 * IFLA_VF_LINK_STATE_AUTO which equals zero 1314 */ 1315 ivi.linkstate = 0; 1316 /* VLAN Protocol by default is 802.1Q */ 1317 ivi.vlan_proto = htons(ETH_P_8021Q); 1318 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1319 return 0; 1320 1321 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1322 memset(&node_guid, 0, sizeof(node_guid)); 1323 memset(&port_guid, 0, sizeof(port_guid)); 1324 1325 vf_mac.vf = 1326 vf_vlan.vf = 1327 vf_vlan_info.vf = 1328 vf_rate.vf = 1329 vf_tx_rate.vf = 1330 vf_spoofchk.vf = 1331 vf_linkstate.vf = 1332 vf_rss_query_en.vf = 1333 vf_trust.vf = 1334 node_guid.vf = 1335 port_guid.vf = ivi.vf; 1336 1337 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1338 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1339 vf_vlan.vlan = ivi.vlan; 1340 vf_vlan.qos = ivi.qos; 1341 vf_vlan_info.vlan = ivi.vlan; 1342 vf_vlan_info.qos = ivi.qos; 1343 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1344 vf_tx_rate.rate = ivi.max_tx_rate; 1345 vf_rate.min_tx_rate = ivi.min_tx_rate; 1346 vf_rate.max_tx_rate = ivi.max_tx_rate; 1347 vf_spoofchk.setting = ivi.spoofchk; 1348 vf_linkstate.link_state = ivi.linkstate; 1349 vf_rss_query_en.setting = ivi.rss_query_en; 1350 vf_trust.setting = ivi.trusted; 1351 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1352 if (!vf) 1353 return -EMSGSIZE; 1354 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1355 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1356 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1357 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1358 &vf_rate) || 1359 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1360 &vf_tx_rate) || 1361 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1362 &vf_spoofchk) || 1363 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1364 &vf_linkstate) || 1365 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1366 sizeof(vf_rss_query_en), 1367 &vf_rss_query_en) || 1368 nla_put(skb, IFLA_VF_TRUST, 1369 sizeof(vf_trust), &vf_trust)) 1370 goto nla_put_vf_failure; 1371 1372 if (dev->netdev_ops->ndo_get_vf_guid && 1373 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1374 &port_guid)) { 1375 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1376 &node_guid) || 1377 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1378 &port_guid)) 1379 goto nla_put_vf_failure; 1380 } 1381 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1382 if (!vfvlanlist) 1383 goto nla_put_vf_failure; 1384 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1385 &vf_vlan_info)) { 1386 nla_nest_cancel(skb, vfvlanlist); 1387 goto nla_put_vf_failure; 1388 } 1389 nla_nest_end(skb, vfvlanlist); 1390 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 1391 memset(&vf_stats, 0, sizeof(vf_stats)); 1392 if (dev->netdev_ops->ndo_get_vf_stats) 1393 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1394 &vf_stats); 1395 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1396 if (!vfstats) 1397 goto nla_put_vf_failure; 1398 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1399 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1400 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1401 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1402 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1403 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1404 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1405 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1406 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1407 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1408 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1409 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1410 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1411 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1412 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1413 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1414 nla_nest_cancel(skb, vfstats); 1415 goto nla_put_vf_failure; 1416 } 1417 nla_nest_end(skb, vfstats); 1418 } 1419 nla_nest_end(skb, vf); 1420 return 0; 1421 1422 nla_put_vf_failure: 1423 nla_nest_cancel(skb, vf); 1424 return -EMSGSIZE; 1425 } 1426 1427 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1428 struct net_device *dev, 1429 u32 ext_filter_mask) 1430 { 1431 struct nlattr *vfinfo; 1432 int i, num_vfs; 1433 1434 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1435 return 0; 1436 1437 num_vfs = dev_num_vf(dev->dev.parent); 1438 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1439 return -EMSGSIZE; 1440 1441 if (!dev->netdev_ops->ndo_get_vf_config) 1442 return 0; 1443 1444 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1445 if (!vfinfo) 1446 return -EMSGSIZE; 1447 1448 for (i = 0; i < num_vfs; i++) { 1449 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) { 1450 nla_nest_cancel(skb, vfinfo); 1451 return -EMSGSIZE; 1452 } 1453 } 1454 1455 nla_nest_end(skb, vfinfo); 1456 return 0; 1457 } 1458 1459 static int rtnl_fill_link_ifmap(struct sk_buff *skb, 1460 const struct net_device *dev) 1461 { 1462 struct rtnl_link_ifmap map; 1463 1464 memset(&map, 0, sizeof(map)); 1465 map.mem_start = READ_ONCE(dev->mem_start); 1466 map.mem_end = READ_ONCE(dev->mem_end); 1467 map.base_addr = READ_ONCE(dev->base_addr); 1468 map.irq = READ_ONCE(dev->irq); 1469 map.dma = READ_ONCE(dev->dma); 1470 map.port = READ_ONCE(dev->if_port); 1471 1472 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1473 return -EMSGSIZE; 1474 1475 return 0; 1476 } 1477 1478 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1479 { 1480 const struct bpf_prog *generic_xdp_prog; 1481 u32 res = 0; 1482 1483 rcu_read_lock(); 1484 generic_xdp_prog = rcu_dereference(dev->xdp_prog); 1485 if (generic_xdp_prog) 1486 res = generic_xdp_prog->aux->id; 1487 rcu_read_unlock(); 1488 1489 return res; 1490 } 1491 1492 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1493 { 1494 return dev_xdp_prog_id(dev, XDP_MODE_DRV); 1495 } 1496 1497 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1498 { 1499 return dev_xdp_prog_id(dev, XDP_MODE_HW); 1500 } 1501 1502 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1503 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1504 u32 (*get_prog_id)(struct net_device *dev)) 1505 { 1506 u32 curr_id; 1507 int err; 1508 1509 curr_id = get_prog_id(dev); 1510 if (!curr_id) 1511 return 0; 1512 1513 *prog_id = curr_id; 1514 err = nla_put_u32(skb, attr, curr_id); 1515 if (err) 1516 return err; 1517 1518 if (*mode != XDP_ATTACHED_NONE) 1519 *mode = XDP_ATTACHED_MULTI; 1520 else 1521 *mode = tgt_mode; 1522 1523 return 0; 1524 } 1525 1526 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1527 { 1528 struct nlattr *xdp; 1529 u32 prog_id; 1530 int err; 1531 u8 mode; 1532 1533 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1534 if (!xdp) 1535 return -EMSGSIZE; 1536 1537 prog_id = 0; 1538 mode = XDP_ATTACHED_NONE; 1539 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1540 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1541 if (err) 1542 goto err_cancel; 1543 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1544 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1545 if (err) 1546 goto err_cancel; 1547 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1548 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1549 if (err) 1550 goto err_cancel; 1551 1552 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1553 if (err) 1554 goto err_cancel; 1555 1556 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1557 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1558 if (err) 1559 goto err_cancel; 1560 } 1561 1562 nla_nest_end(skb, xdp); 1563 return 0; 1564 1565 err_cancel: 1566 nla_nest_cancel(skb, xdp); 1567 return err; 1568 } 1569 1570 static u32 rtnl_get_event(unsigned long event) 1571 { 1572 u32 rtnl_event_type = IFLA_EVENT_NONE; 1573 1574 switch (event) { 1575 case NETDEV_REBOOT: 1576 rtnl_event_type = IFLA_EVENT_REBOOT; 1577 break; 1578 case NETDEV_FEAT_CHANGE: 1579 rtnl_event_type = IFLA_EVENT_FEATURES; 1580 break; 1581 case NETDEV_BONDING_FAILOVER: 1582 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1583 break; 1584 case NETDEV_NOTIFY_PEERS: 1585 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1586 break; 1587 case NETDEV_RESEND_IGMP: 1588 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1589 break; 1590 case NETDEV_CHANGEINFODATA: 1591 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1592 break; 1593 default: 1594 break; 1595 } 1596 1597 return rtnl_event_type; 1598 } 1599 1600 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1601 { 1602 const struct net_device *upper_dev; 1603 int ret = 0; 1604 1605 rcu_read_lock(); 1606 1607 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1608 if (upper_dev) 1609 ret = nla_put_u32(skb, IFLA_MASTER, 1610 READ_ONCE(upper_dev->ifindex)); 1611 1612 rcu_read_unlock(); 1613 return ret; 1614 } 1615 1616 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1617 bool force) 1618 { 1619 int iflink = dev_get_iflink(dev); 1620 1621 if (force || READ_ONCE(dev->ifindex) != iflink) 1622 return nla_put_u32(skb, IFLA_LINK, iflink); 1623 1624 return 0; 1625 } 1626 1627 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1628 struct net_device *dev) 1629 { 1630 char buf[IFALIASZ]; 1631 int ret; 1632 1633 ret = dev_get_alias(dev, buf, sizeof(buf)); 1634 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1635 } 1636 1637 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1638 const struct net_device *dev, 1639 struct net *src_net, gfp_t gfp) 1640 { 1641 bool put_iflink = false; 1642 1643 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1644 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1645 1646 if (!net_eq(dev_net(dev), link_net)) { 1647 int id = peernet2id_alloc(src_net, link_net, gfp); 1648 1649 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1650 return -EMSGSIZE; 1651 1652 put_iflink = true; 1653 } 1654 } 1655 1656 return nla_put_iflink(skb, dev, put_iflink); 1657 } 1658 1659 static int rtnl_fill_link_af(struct sk_buff *skb, 1660 const struct net_device *dev, 1661 u32 ext_filter_mask) 1662 { 1663 const struct rtnl_af_ops *af_ops; 1664 struct nlattr *af_spec; 1665 1666 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1667 if (!af_spec) 1668 return -EMSGSIZE; 1669 1670 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1671 struct nlattr *af; 1672 int err; 1673 1674 if (!af_ops->fill_link_af) 1675 continue; 1676 1677 af = nla_nest_start_noflag(skb, af_ops->family); 1678 if (!af) 1679 return -EMSGSIZE; 1680 1681 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1682 /* 1683 * Caller may return ENODATA to indicate that there 1684 * was no data to be dumped. This is not an error, it 1685 * means we should trim the attribute header and 1686 * continue. 1687 */ 1688 if (err == -ENODATA) 1689 nla_nest_cancel(skb, af); 1690 else if (err < 0) 1691 return -EMSGSIZE; 1692 1693 nla_nest_end(skb, af); 1694 } 1695 1696 nla_nest_end(skb, af_spec); 1697 return 0; 1698 } 1699 1700 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1701 const struct net_device *dev) 1702 { 1703 struct netdev_name_node *name_node; 1704 int count = 0; 1705 1706 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) { 1707 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1708 return -EMSGSIZE; 1709 count++; 1710 } 1711 return count; 1712 } 1713 1714 /* RCU protected. */ 1715 static int rtnl_fill_prop_list(struct sk_buff *skb, 1716 const struct net_device *dev) 1717 { 1718 struct nlattr *prop_list; 1719 int ret; 1720 1721 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1722 if (!prop_list) 1723 return -EMSGSIZE; 1724 1725 ret = rtnl_fill_alt_ifnames(skb, dev); 1726 if (ret <= 0) 1727 goto nest_cancel; 1728 1729 nla_nest_end(skb, prop_list); 1730 return 0; 1731 1732 nest_cancel: 1733 nla_nest_cancel(skb, prop_list); 1734 return ret; 1735 } 1736 1737 static int rtnl_fill_proto_down(struct sk_buff *skb, 1738 const struct net_device *dev) 1739 { 1740 struct nlattr *pr; 1741 u32 preason; 1742 1743 if (nla_put_u8(skb, IFLA_PROTO_DOWN, READ_ONCE(dev->proto_down))) 1744 goto nla_put_failure; 1745 1746 preason = READ_ONCE(dev->proto_down_reason); 1747 if (!preason) 1748 return 0; 1749 1750 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); 1751 if (!pr) 1752 return -EMSGSIZE; 1753 1754 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { 1755 nla_nest_cancel(skb, pr); 1756 goto nla_put_failure; 1757 } 1758 1759 nla_nest_end(skb, pr); 1760 return 0; 1761 1762 nla_put_failure: 1763 return -EMSGSIZE; 1764 } 1765 1766 static int rtnl_fill_devlink_port(struct sk_buff *skb, 1767 const struct net_device *dev) 1768 { 1769 struct nlattr *devlink_port_nest; 1770 int ret; 1771 1772 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT); 1773 if (!devlink_port_nest) 1774 return -EMSGSIZE; 1775 1776 if (dev->devlink_port) { 1777 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port); 1778 if (ret < 0) 1779 goto nest_cancel; 1780 } 1781 1782 nla_nest_end(skb, devlink_port_nest); 1783 return 0; 1784 1785 nest_cancel: 1786 nla_nest_cancel(skb, devlink_port_nest); 1787 return ret; 1788 } 1789 1790 static int rtnl_fill_dpll_pin(struct sk_buff *skb, 1791 const struct net_device *dev) 1792 { 1793 struct nlattr *dpll_pin_nest; 1794 int ret; 1795 1796 dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN); 1797 if (!dpll_pin_nest) 1798 return -EMSGSIZE; 1799 1800 ret = dpll_netdev_add_pin_handle(skb, dev); 1801 if (ret < 0) 1802 goto nest_cancel; 1803 1804 nla_nest_end(skb, dpll_pin_nest); 1805 return 0; 1806 1807 nest_cancel: 1808 nla_nest_cancel(skb, dpll_pin_nest); 1809 return ret; 1810 } 1811 1812 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1813 struct net_device *dev, struct net *src_net, 1814 int type, u32 pid, u32 seq, u32 change, 1815 unsigned int flags, u32 ext_filter_mask, 1816 u32 event, int *new_nsid, int new_ifindex, 1817 int tgt_netnsid, gfp_t gfp) 1818 { 1819 char devname[IFNAMSIZ]; 1820 struct ifinfomsg *ifm; 1821 struct nlmsghdr *nlh; 1822 struct Qdisc *qdisc; 1823 1824 ASSERT_RTNL(); 1825 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1826 if (nlh == NULL) 1827 return -EMSGSIZE; 1828 1829 ifm = nlmsg_data(nlh); 1830 ifm->ifi_family = AF_UNSPEC; 1831 ifm->__ifi_pad = 0; 1832 ifm->ifi_type = READ_ONCE(dev->type); 1833 ifm->ifi_index = READ_ONCE(dev->ifindex); 1834 ifm->ifi_flags = dev_get_flags(dev); 1835 ifm->ifi_change = change; 1836 1837 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1838 goto nla_put_failure; 1839 1840 netdev_copy_name(dev, devname); 1841 if (nla_put_string(skb, IFLA_IFNAME, devname)) 1842 goto nla_put_failure; 1843 1844 if (nla_put_u32(skb, IFLA_TXQLEN, READ_ONCE(dev->tx_queue_len)) || 1845 nla_put_u8(skb, IFLA_OPERSTATE, 1846 netif_running(dev) ? READ_ONCE(dev->operstate) : 1847 IF_OPER_DOWN) || 1848 nla_put_u8(skb, IFLA_LINKMODE, READ_ONCE(dev->link_mode)) || 1849 nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) || 1850 nla_put_u32(skb, IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) || 1851 nla_put_u32(skb, IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) || 1852 nla_put_u32(skb, IFLA_GROUP, READ_ONCE(dev->group)) || 1853 nla_put_u32(skb, IFLA_PROMISCUITY, READ_ONCE(dev->promiscuity)) || 1854 nla_put_u32(skb, IFLA_ALLMULTI, READ_ONCE(dev->allmulti)) || 1855 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, 1856 READ_ONCE(dev->num_tx_queues)) || 1857 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, 1858 READ_ONCE(dev->gso_max_segs)) || 1859 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, 1860 READ_ONCE(dev->gso_max_size)) || 1861 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, 1862 READ_ONCE(dev->gro_max_size)) || 1863 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, 1864 READ_ONCE(dev->gso_ipv4_max_size)) || 1865 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, 1866 READ_ONCE(dev->gro_ipv4_max_size)) || 1867 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, 1868 READ_ONCE(dev->tso_max_size)) || 1869 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, 1870 READ_ONCE(dev->tso_max_segs)) || 1871 nla_put_uint(skb, IFLA_MAX_PACING_OFFLOAD_HORIZON, 1872 READ_ONCE(dev->max_pacing_offload_horizon)) || 1873 #ifdef CONFIG_RPS 1874 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, 1875 READ_ONCE(dev->num_rx_queues)) || 1876 #endif 1877 put_master_ifindex(skb, dev) || 1878 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1879 nla_put_ifalias(skb, dev) || 1880 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1881 atomic_read(&dev->carrier_up_count) + 1882 atomic_read(&dev->carrier_down_count)) || 1883 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1884 atomic_read(&dev->carrier_up_count)) || 1885 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1886 atomic_read(&dev->carrier_down_count))) 1887 goto nla_put_failure; 1888 1889 if (rtnl_fill_proto_down(skb, dev)) 1890 goto nla_put_failure; 1891 1892 if (event != IFLA_EVENT_NONE) { 1893 if (nla_put_u32(skb, IFLA_EVENT, event)) 1894 goto nla_put_failure; 1895 } 1896 1897 if (dev->addr_len) { 1898 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1899 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1900 goto nla_put_failure; 1901 } 1902 1903 if (rtnl_phys_port_id_fill(skb, dev)) 1904 goto nla_put_failure; 1905 1906 if (rtnl_phys_port_name_fill(skb, dev)) 1907 goto nla_put_failure; 1908 1909 if (rtnl_phys_switch_id_fill(skb, dev)) 1910 goto nla_put_failure; 1911 1912 if (rtnl_fill_stats(skb, dev)) 1913 goto nla_put_failure; 1914 1915 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1916 goto nla_put_failure; 1917 1918 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1919 goto nla_put_failure; 1920 1921 if (rtnl_xdp_fill(skb, dev)) 1922 goto nla_put_failure; 1923 1924 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1925 if (rtnl_link_fill(skb, dev) < 0) 1926 goto nla_put_failure; 1927 } 1928 1929 if (new_nsid && 1930 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1931 goto nla_put_failure; 1932 if (new_ifindex && 1933 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1934 goto nla_put_failure; 1935 1936 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) && 1937 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr)) 1938 goto nla_put_failure; 1939 1940 rcu_read_lock(); 1941 if (rtnl_fill_link_netnsid(skb, dev, src_net, GFP_ATOMIC)) 1942 goto nla_put_failure_rcu; 1943 qdisc = rcu_dereference(dev->qdisc); 1944 if (qdisc && nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) 1945 goto nla_put_failure_rcu; 1946 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1947 goto nla_put_failure_rcu; 1948 if (rtnl_fill_link_ifmap(skb, dev)) 1949 goto nla_put_failure_rcu; 1950 if (rtnl_fill_prop_list(skb, dev)) 1951 goto nla_put_failure_rcu; 1952 rcu_read_unlock(); 1953 1954 if (dev->dev.parent && 1955 nla_put_string(skb, IFLA_PARENT_DEV_NAME, 1956 dev_name(dev->dev.parent))) 1957 goto nla_put_failure; 1958 1959 if (dev->dev.parent && dev->dev.parent->bus && 1960 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, 1961 dev->dev.parent->bus->name)) 1962 goto nla_put_failure; 1963 1964 if (rtnl_fill_devlink_port(skb, dev)) 1965 goto nla_put_failure; 1966 1967 if (rtnl_fill_dpll_pin(skb, dev)) 1968 goto nla_put_failure; 1969 1970 nlmsg_end(skb, nlh); 1971 return 0; 1972 1973 nla_put_failure_rcu: 1974 rcu_read_unlock(); 1975 nla_put_failure: 1976 nlmsg_cancel(skb, nlh); 1977 return -EMSGSIZE; 1978 } 1979 1980 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1981 [IFLA_UNSPEC] = { .strict_start_type = IFLA_DPLL_PIN }, 1982 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1983 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1984 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1985 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1986 [IFLA_MTU] = { .type = NLA_U32 }, 1987 [IFLA_LINK] = { .type = NLA_U32 }, 1988 [IFLA_MASTER] = { .type = NLA_U32 }, 1989 [IFLA_CARRIER] = { .type = NLA_U8 }, 1990 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1991 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1992 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1993 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1994 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1995 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1996 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1997 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1998 * allow 0-length string (needed to remove an alias). 1999 */ 2000 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 2001 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 2002 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 2003 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 2004 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 2005 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 2006 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 2007 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 2008 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 2009 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 2010 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 2011 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 2012 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 2013 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 2014 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 2015 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 2016 [IFLA_XDP] = { .type = NLA_NESTED }, 2017 [IFLA_EVENT] = { .type = NLA_U32 }, 2018 [IFLA_GROUP] = { .type = NLA_U32 }, 2019 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 2020 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 2021 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 2022 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 2023 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 2024 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 2025 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 2026 .len = ALTIFNAMSIZ - 1 }, 2027 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, 2028 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, 2029 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 2030 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, 2031 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, 2032 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, 2033 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, 2034 [IFLA_ALLMULTI] = { .type = NLA_REJECT }, 2035 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 2036 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 2037 }; 2038 2039 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 2040 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 2041 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 2042 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 2043 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 2044 }; 2045 2046 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 2047 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 2048 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 2049 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 2050 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 2051 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 2052 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 2053 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 2054 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 2055 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 2056 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 2057 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 2058 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2059 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2060 }; 2061 2062 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 2063 [IFLA_PORT_VF] = { .type = NLA_U32 }, 2064 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 2065 .len = PORT_PROFILE_MAX }, 2066 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 2067 .len = PORT_UUID_MAX }, 2068 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 2069 .len = PORT_UUID_MAX }, 2070 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 2071 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 2072 2073 /* Unused, but we need to keep it here since user space could 2074 * fill it. It's also broken with regard to NLA_BINARY use in 2075 * combination with structs. 2076 */ 2077 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 2078 .len = sizeof(struct ifla_port_vsi) }, 2079 }; 2080 2081 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 2082 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, 2083 [IFLA_XDP_FD] = { .type = NLA_S32 }, 2084 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, 2085 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 2086 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 2087 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 2088 }; 2089 2090 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 2091 { 2092 const struct rtnl_link_ops *ops = NULL; 2093 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 2094 2095 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 2096 return NULL; 2097 2098 if (linfo[IFLA_INFO_KIND]) { 2099 char kind[MODULE_NAME_LEN]; 2100 2101 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 2102 ops = rtnl_link_ops_get(kind); 2103 } 2104 2105 return ops; 2106 } 2107 2108 static bool link_master_filtered(struct net_device *dev, int master_idx) 2109 { 2110 struct net_device *master; 2111 2112 if (!master_idx) 2113 return false; 2114 2115 master = netdev_master_upper_dev_get(dev); 2116 2117 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need 2118 * another invalid value for ifindex to denote "no master". 2119 */ 2120 if (master_idx == -1) 2121 return !!master; 2122 2123 if (!master || master->ifindex != master_idx) 2124 return true; 2125 2126 return false; 2127 } 2128 2129 static bool link_kind_filtered(const struct net_device *dev, 2130 const struct rtnl_link_ops *kind_ops) 2131 { 2132 if (kind_ops && dev->rtnl_link_ops != kind_ops) 2133 return true; 2134 2135 return false; 2136 } 2137 2138 static bool link_dump_filtered(struct net_device *dev, 2139 int master_idx, 2140 const struct rtnl_link_ops *kind_ops) 2141 { 2142 if (link_master_filtered(dev, master_idx) || 2143 link_kind_filtered(dev, kind_ops)) 2144 return true; 2145 2146 return false; 2147 } 2148 2149 /** 2150 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 2151 * @sk: netlink socket 2152 * @netnsid: network namespace identifier 2153 * 2154 * Returns the network namespace identified by netnsid on success or an error 2155 * pointer on failure. 2156 */ 2157 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 2158 { 2159 struct net *net; 2160 2161 net = get_net_ns_by_id(sock_net(sk), netnsid); 2162 if (!net) 2163 return ERR_PTR(-EINVAL); 2164 2165 /* For now, the caller is required to have CAP_NET_ADMIN in 2166 * the user namespace owning the target net ns. 2167 */ 2168 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 2169 put_net(net); 2170 return ERR_PTR(-EACCES); 2171 } 2172 return net; 2173 } 2174 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 2175 2176 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 2177 bool strict_check, struct nlattr **tb, 2178 struct netlink_ext_ack *extack) 2179 { 2180 int hdrlen; 2181 2182 if (strict_check) { 2183 struct ifinfomsg *ifm; 2184 2185 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2186 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 2187 return -EINVAL; 2188 } 2189 2190 ifm = nlmsg_data(nlh); 2191 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2192 ifm->ifi_change) { 2193 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 2194 return -EINVAL; 2195 } 2196 if (ifm->ifi_index) { 2197 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 2198 return -EINVAL; 2199 } 2200 2201 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 2202 IFLA_MAX, ifla_policy, 2203 extack); 2204 } 2205 2206 /* A hack to preserve kernel<->userspace interface. 2207 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 2208 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 2209 * what iproute2 < v3.9.0 used. 2210 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 2211 * attribute, its netlink message is shorter than struct ifinfomsg. 2212 */ 2213 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 2214 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 2215 2216 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 2217 extack); 2218 } 2219 2220 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 2221 { 2222 const struct rtnl_link_ops *kind_ops = NULL; 2223 struct netlink_ext_ack *extack = cb->extack; 2224 const struct nlmsghdr *nlh = cb->nlh; 2225 struct net *net = sock_net(skb->sk); 2226 unsigned int flags = NLM_F_MULTI; 2227 struct nlattr *tb[IFLA_MAX+1]; 2228 struct { 2229 unsigned long ifindex; 2230 } *ctx = (void *)cb->ctx; 2231 struct net *tgt_net = net; 2232 u32 ext_filter_mask = 0; 2233 struct net_device *dev; 2234 int master_idx = 0; 2235 int netnsid = -1; 2236 int err, i; 2237 2238 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2239 if (err < 0) { 2240 if (cb->strict_check) 2241 return err; 2242 2243 goto walk_entries; 2244 } 2245 2246 for (i = 0; i <= IFLA_MAX; ++i) { 2247 if (!tb[i]) 2248 continue; 2249 2250 /* new attributes should only be added with strict checking */ 2251 switch (i) { 2252 case IFLA_TARGET_NETNSID: 2253 netnsid = nla_get_s32(tb[i]); 2254 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2255 if (IS_ERR(tgt_net)) { 2256 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2257 return PTR_ERR(tgt_net); 2258 } 2259 break; 2260 case IFLA_EXT_MASK: 2261 ext_filter_mask = nla_get_u32(tb[i]); 2262 break; 2263 case IFLA_MASTER: 2264 master_idx = nla_get_u32(tb[i]); 2265 break; 2266 case IFLA_LINKINFO: 2267 kind_ops = linkinfo_to_kind_ops(tb[i]); 2268 break; 2269 default: 2270 if (cb->strict_check) { 2271 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2272 return -EINVAL; 2273 } 2274 } 2275 } 2276 2277 if (master_idx || kind_ops) 2278 flags |= NLM_F_DUMP_FILTERED; 2279 2280 walk_entries: 2281 err = 0; 2282 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) { 2283 if (link_dump_filtered(dev, master_idx, kind_ops)) 2284 continue; 2285 err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK, 2286 NETLINK_CB(cb->skb).portid, 2287 nlh->nlmsg_seq, 0, flags, 2288 ext_filter_mask, 0, NULL, 0, 2289 netnsid, GFP_KERNEL); 2290 if (err < 0) 2291 break; 2292 } 2293 cb->seq = tgt_net->dev_base_seq; 2294 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2295 if (netnsid >= 0) 2296 put_net(tgt_net); 2297 2298 return err; 2299 } 2300 2301 int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, 2302 struct netlink_ext_ack *exterr) 2303 { 2304 const struct ifinfomsg *ifmp; 2305 const struct nlattr *attrs; 2306 size_t len; 2307 2308 ifmp = nla_data(nla_peer); 2309 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); 2310 len = nla_len(nla_peer) - sizeof(struct ifinfomsg); 2311 2312 if (ifmp->ifi_index < 0) { 2313 NL_SET_ERR_MSG_ATTR(exterr, nla_peer, 2314 "ifindex can't be negative"); 2315 return -EINVAL; 2316 } 2317 2318 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, 2319 exterr); 2320 } 2321 EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); 2322 2323 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2324 { 2325 struct net *net; 2326 /* Examine the link attributes and figure out which 2327 * network namespace we are talking about. 2328 */ 2329 if (tb[IFLA_NET_NS_PID]) 2330 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2331 else if (tb[IFLA_NET_NS_FD]) 2332 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2333 else 2334 net = get_net(src_net); 2335 return net; 2336 } 2337 EXPORT_SYMBOL(rtnl_link_get_net); 2338 2339 /* Figure out which network namespace we are talking about by 2340 * examining the link attributes in the following order: 2341 * 2342 * 1. IFLA_NET_NS_PID 2343 * 2. IFLA_NET_NS_FD 2344 * 3. IFLA_TARGET_NETNSID 2345 */ 2346 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2347 struct nlattr *tb[]) 2348 { 2349 struct net *net; 2350 2351 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2352 return rtnl_link_get_net(src_net, tb); 2353 2354 if (!tb[IFLA_TARGET_NETNSID]) 2355 return get_net(src_net); 2356 2357 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2358 if (!net) 2359 return ERR_PTR(-EINVAL); 2360 2361 return net; 2362 } 2363 2364 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2365 struct net *src_net, 2366 struct nlattr *tb[], int cap) 2367 { 2368 struct net *net; 2369 2370 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2371 if (IS_ERR(net)) 2372 return net; 2373 2374 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2375 put_net(net); 2376 return ERR_PTR(-EPERM); 2377 } 2378 2379 return net; 2380 } 2381 2382 /* Verify that rtnetlink requests do not pass additional properties 2383 * potentially referring to different network namespaces. 2384 */ 2385 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2386 struct netlink_ext_ack *extack, 2387 bool netns_id_only) 2388 { 2389 2390 if (netns_id_only) { 2391 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2392 return 0; 2393 2394 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2395 return -EOPNOTSUPP; 2396 } 2397 2398 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2399 goto invalid_attr; 2400 2401 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2402 goto invalid_attr; 2403 2404 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2405 goto invalid_attr; 2406 2407 return 0; 2408 2409 invalid_attr: 2410 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2411 return -EINVAL; 2412 } 2413 2414 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2415 int max_tx_rate) 2416 { 2417 const struct net_device_ops *ops = dev->netdev_ops; 2418 2419 if (!ops->ndo_set_vf_rate) 2420 return -EOPNOTSUPP; 2421 if (max_tx_rate && max_tx_rate < min_tx_rate) 2422 return -EINVAL; 2423 2424 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate); 2425 } 2426 2427 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], 2428 struct netlink_ext_ack *extack) 2429 { 2430 if (tb[IFLA_ADDRESS] && 2431 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2432 return -EINVAL; 2433 2434 if (tb[IFLA_BROADCAST] && 2435 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2436 return -EINVAL; 2437 2438 if (tb[IFLA_GSO_MAX_SIZE] && 2439 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { 2440 NL_SET_ERR_MSG(extack, "too big gso_max_size"); 2441 return -EINVAL; 2442 } 2443 2444 if (tb[IFLA_GSO_MAX_SEGS] && 2445 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || 2446 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { 2447 NL_SET_ERR_MSG(extack, "too big gso_max_segs"); 2448 return -EINVAL; 2449 } 2450 2451 if (tb[IFLA_GRO_MAX_SIZE] && 2452 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { 2453 NL_SET_ERR_MSG(extack, "too big gro_max_size"); 2454 return -EINVAL; 2455 } 2456 2457 if (tb[IFLA_GSO_IPV4_MAX_SIZE] && 2458 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { 2459 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); 2460 return -EINVAL; 2461 } 2462 2463 if (tb[IFLA_GRO_IPV4_MAX_SIZE] && 2464 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { 2465 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size"); 2466 return -EINVAL; 2467 } 2468 2469 if (tb[IFLA_AF_SPEC]) { 2470 struct nlattr *af; 2471 int rem, err; 2472 2473 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2474 const struct rtnl_af_ops *af_ops; 2475 2476 af_ops = rtnl_af_lookup(nla_type(af)); 2477 if (!af_ops) 2478 return -EAFNOSUPPORT; 2479 2480 if (!af_ops->set_link_af) 2481 return -EOPNOTSUPP; 2482 2483 if (af_ops->validate_link_af) { 2484 err = af_ops->validate_link_af(dev, af, extack); 2485 if (err < 0) 2486 return err; 2487 } 2488 } 2489 } 2490 2491 return 0; 2492 } 2493 2494 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2495 int guid_type) 2496 { 2497 const struct net_device_ops *ops = dev->netdev_ops; 2498 2499 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2500 } 2501 2502 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2503 { 2504 if (dev->type != ARPHRD_INFINIBAND) 2505 return -EOPNOTSUPP; 2506 2507 return handle_infiniband_guid(dev, ivt, guid_type); 2508 } 2509 2510 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2511 { 2512 const struct net_device_ops *ops = dev->netdev_ops; 2513 int err = -EINVAL; 2514 2515 if (tb[IFLA_VF_MAC]) { 2516 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2517 2518 if (ivm->vf >= INT_MAX) 2519 return -EINVAL; 2520 err = -EOPNOTSUPP; 2521 if (ops->ndo_set_vf_mac) 2522 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2523 ivm->mac); 2524 if (err < 0) 2525 return err; 2526 } 2527 2528 if (tb[IFLA_VF_VLAN]) { 2529 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2530 2531 if (ivv->vf >= INT_MAX) 2532 return -EINVAL; 2533 err = -EOPNOTSUPP; 2534 if (ops->ndo_set_vf_vlan) 2535 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2536 ivv->qos, 2537 htons(ETH_P_8021Q)); 2538 if (err < 0) 2539 return err; 2540 } 2541 2542 if (tb[IFLA_VF_VLAN_LIST]) { 2543 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2544 struct nlattr *attr; 2545 int rem, len = 0; 2546 2547 err = -EOPNOTSUPP; 2548 if (!ops->ndo_set_vf_vlan) 2549 return err; 2550 2551 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2552 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2553 nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) { 2554 return -EINVAL; 2555 } 2556 if (len >= MAX_VLAN_LIST_LEN) 2557 return -EOPNOTSUPP; 2558 ivvl[len] = nla_data(attr); 2559 2560 len++; 2561 } 2562 if (len == 0) 2563 return -EINVAL; 2564 2565 if (ivvl[0]->vf >= INT_MAX) 2566 return -EINVAL; 2567 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2568 ivvl[0]->qos, ivvl[0]->vlan_proto); 2569 if (err < 0) 2570 return err; 2571 } 2572 2573 if (tb[IFLA_VF_TX_RATE]) { 2574 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2575 struct ifla_vf_info ivf; 2576 2577 if (ivt->vf >= INT_MAX) 2578 return -EINVAL; 2579 err = -EOPNOTSUPP; 2580 if (ops->ndo_get_vf_config) 2581 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2582 if (err < 0) 2583 return err; 2584 2585 err = rtnl_set_vf_rate(dev, ivt->vf, 2586 ivf.min_tx_rate, ivt->rate); 2587 if (err < 0) 2588 return err; 2589 } 2590 2591 if (tb[IFLA_VF_RATE]) { 2592 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2593 2594 if (ivt->vf >= INT_MAX) 2595 return -EINVAL; 2596 2597 err = rtnl_set_vf_rate(dev, ivt->vf, 2598 ivt->min_tx_rate, ivt->max_tx_rate); 2599 if (err < 0) 2600 return err; 2601 } 2602 2603 if (tb[IFLA_VF_SPOOFCHK]) { 2604 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2605 2606 if (ivs->vf >= INT_MAX) 2607 return -EINVAL; 2608 err = -EOPNOTSUPP; 2609 if (ops->ndo_set_vf_spoofchk) 2610 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2611 ivs->setting); 2612 if (err < 0) 2613 return err; 2614 } 2615 2616 if (tb[IFLA_VF_LINK_STATE]) { 2617 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2618 2619 if (ivl->vf >= INT_MAX) 2620 return -EINVAL; 2621 err = -EOPNOTSUPP; 2622 if (ops->ndo_set_vf_link_state) 2623 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2624 ivl->link_state); 2625 if (err < 0) 2626 return err; 2627 } 2628 2629 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2630 struct ifla_vf_rss_query_en *ivrssq_en; 2631 2632 err = -EOPNOTSUPP; 2633 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2634 if (ivrssq_en->vf >= INT_MAX) 2635 return -EINVAL; 2636 if (ops->ndo_set_vf_rss_query_en) 2637 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2638 ivrssq_en->setting); 2639 if (err < 0) 2640 return err; 2641 } 2642 2643 if (tb[IFLA_VF_TRUST]) { 2644 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2645 2646 if (ivt->vf >= INT_MAX) 2647 return -EINVAL; 2648 err = -EOPNOTSUPP; 2649 if (ops->ndo_set_vf_trust) 2650 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2651 if (err < 0) 2652 return err; 2653 } 2654 2655 if (tb[IFLA_VF_IB_NODE_GUID]) { 2656 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2657 2658 if (ivt->vf >= INT_MAX) 2659 return -EINVAL; 2660 if (!ops->ndo_set_vf_guid) 2661 return -EOPNOTSUPP; 2662 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2663 } 2664 2665 if (tb[IFLA_VF_IB_PORT_GUID]) { 2666 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2667 2668 if (ivt->vf >= INT_MAX) 2669 return -EINVAL; 2670 if (!ops->ndo_set_vf_guid) 2671 return -EOPNOTSUPP; 2672 2673 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2674 } 2675 2676 return err; 2677 } 2678 2679 static int do_set_master(struct net_device *dev, int ifindex, 2680 struct netlink_ext_ack *extack) 2681 { 2682 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2683 const struct net_device_ops *ops; 2684 int err; 2685 2686 if (upper_dev) { 2687 if (upper_dev->ifindex == ifindex) 2688 return 0; 2689 ops = upper_dev->netdev_ops; 2690 if (ops->ndo_del_slave) { 2691 err = ops->ndo_del_slave(upper_dev, dev); 2692 if (err) 2693 return err; 2694 } else { 2695 return -EOPNOTSUPP; 2696 } 2697 } 2698 2699 if (ifindex) { 2700 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2701 if (!upper_dev) 2702 return -EINVAL; 2703 ops = upper_dev->netdev_ops; 2704 if (ops->ndo_add_slave) { 2705 err = ops->ndo_add_slave(upper_dev, dev, extack); 2706 if (err) 2707 return err; 2708 } else { 2709 return -EOPNOTSUPP; 2710 } 2711 } 2712 return 0; 2713 } 2714 2715 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { 2716 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, 2717 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, 2718 }; 2719 2720 static int do_set_proto_down(struct net_device *dev, 2721 struct nlattr *nl_proto_down, 2722 struct nlattr *nl_proto_down_reason, 2723 struct netlink_ext_ack *extack) 2724 { 2725 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; 2726 unsigned long mask = 0; 2727 u32 value; 2728 bool proto_down; 2729 int err; 2730 2731 if (!dev->change_proto_down) { 2732 NL_SET_ERR_MSG(extack, "Protodown not supported by device"); 2733 return -EOPNOTSUPP; 2734 } 2735 2736 if (nl_proto_down_reason) { 2737 err = nla_parse_nested_deprecated(pdreason, 2738 IFLA_PROTO_DOWN_REASON_MAX, 2739 nl_proto_down_reason, 2740 ifla_proto_down_reason_policy, 2741 NULL); 2742 if (err < 0) 2743 return err; 2744 2745 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { 2746 NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); 2747 return -EINVAL; 2748 } 2749 2750 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); 2751 2752 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) 2753 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); 2754 2755 dev_change_proto_down_reason(dev, mask, value); 2756 } 2757 2758 if (nl_proto_down) { 2759 proto_down = nla_get_u8(nl_proto_down); 2760 2761 /* Don't turn off protodown if there are active reasons */ 2762 if (!proto_down && dev->proto_down_reason) { 2763 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); 2764 return -EBUSY; 2765 } 2766 err = dev_change_proto_down(dev, 2767 proto_down); 2768 if (err) 2769 return err; 2770 } 2771 2772 return 0; 2773 } 2774 2775 #define DO_SETLINK_MODIFIED 0x01 2776 /* notify flag means notify + modified. */ 2777 #define DO_SETLINK_NOTIFY 0x03 2778 static int do_setlink(const struct sk_buff *skb, 2779 struct net_device *dev, struct ifinfomsg *ifm, 2780 struct netlink_ext_ack *extack, 2781 struct nlattr **tb, int status) 2782 { 2783 const struct net_device_ops *ops = dev->netdev_ops; 2784 char ifname[IFNAMSIZ]; 2785 int err; 2786 2787 if (tb[IFLA_IFNAME]) 2788 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2789 else 2790 ifname[0] = '\0'; 2791 2792 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2793 const char *pat = ifname[0] ? ifname : NULL; 2794 struct net *net; 2795 int new_ifindex; 2796 2797 net = rtnl_link_get_net_capable(skb, dev_net(dev), 2798 tb, CAP_NET_ADMIN); 2799 if (IS_ERR(net)) { 2800 err = PTR_ERR(net); 2801 goto errout; 2802 } 2803 2804 if (tb[IFLA_NEW_IFINDEX]) 2805 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); 2806 else 2807 new_ifindex = 0; 2808 2809 err = __dev_change_net_namespace(dev, net, pat, new_ifindex); 2810 put_net(net); 2811 if (err) 2812 goto errout; 2813 status |= DO_SETLINK_MODIFIED; 2814 } 2815 2816 if (tb[IFLA_MAP]) { 2817 struct rtnl_link_ifmap *u_map; 2818 struct ifmap k_map; 2819 2820 if (!ops->ndo_set_config) { 2821 err = -EOPNOTSUPP; 2822 goto errout; 2823 } 2824 2825 if (!netif_device_present(dev)) { 2826 err = -ENODEV; 2827 goto errout; 2828 } 2829 2830 u_map = nla_data(tb[IFLA_MAP]); 2831 k_map.mem_start = (unsigned long) u_map->mem_start; 2832 k_map.mem_end = (unsigned long) u_map->mem_end; 2833 k_map.base_addr = (unsigned short) u_map->base_addr; 2834 k_map.irq = (unsigned char) u_map->irq; 2835 k_map.dma = (unsigned char) u_map->dma; 2836 k_map.port = (unsigned char) u_map->port; 2837 2838 err = ops->ndo_set_config(dev, &k_map); 2839 if (err < 0) 2840 goto errout; 2841 2842 status |= DO_SETLINK_NOTIFY; 2843 } 2844 2845 if (tb[IFLA_ADDRESS]) { 2846 struct sockaddr *sa; 2847 int len; 2848 2849 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2850 sizeof(*sa)); 2851 sa = kmalloc(len, GFP_KERNEL); 2852 if (!sa) { 2853 err = -ENOMEM; 2854 goto errout; 2855 } 2856 sa->sa_family = dev->type; 2857 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2858 dev->addr_len); 2859 err = dev_set_mac_address_user(dev, sa, extack); 2860 kfree(sa); 2861 if (err) 2862 goto errout; 2863 status |= DO_SETLINK_MODIFIED; 2864 } 2865 2866 if (tb[IFLA_MTU]) { 2867 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2868 if (err < 0) 2869 goto errout; 2870 status |= DO_SETLINK_MODIFIED; 2871 } 2872 2873 if (tb[IFLA_GROUP]) { 2874 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2875 status |= DO_SETLINK_NOTIFY; 2876 } 2877 2878 /* 2879 * Interface selected by interface index but interface 2880 * name provided implies that a name change has been 2881 * requested. 2882 */ 2883 if (ifm->ifi_index > 0 && ifname[0]) { 2884 err = dev_change_name(dev, ifname); 2885 if (err < 0) 2886 goto errout; 2887 status |= DO_SETLINK_MODIFIED; 2888 } 2889 2890 if (tb[IFLA_IFALIAS]) { 2891 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2892 nla_len(tb[IFLA_IFALIAS])); 2893 if (err < 0) 2894 goto errout; 2895 status |= DO_SETLINK_NOTIFY; 2896 } 2897 2898 if (tb[IFLA_BROADCAST]) { 2899 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2900 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2901 } 2902 2903 if (ifm->ifi_flags || ifm->ifi_change) { 2904 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2905 extack); 2906 if (err < 0) 2907 goto errout; 2908 } 2909 2910 if (tb[IFLA_MASTER]) { 2911 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2912 if (err) 2913 goto errout; 2914 status |= DO_SETLINK_MODIFIED; 2915 } 2916 2917 if (tb[IFLA_CARRIER]) { 2918 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2919 if (err) 2920 goto errout; 2921 status |= DO_SETLINK_MODIFIED; 2922 } 2923 2924 if (tb[IFLA_TXQLEN]) { 2925 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2926 2927 err = dev_change_tx_queue_len(dev, value); 2928 if (err) 2929 goto errout; 2930 status |= DO_SETLINK_MODIFIED; 2931 } 2932 2933 if (tb[IFLA_GSO_MAX_SIZE]) { 2934 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2935 2936 if (dev->gso_max_size ^ max_size) { 2937 netif_set_gso_max_size(dev, max_size); 2938 status |= DO_SETLINK_MODIFIED; 2939 } 2940 } 2941 2942 if (tb[IFLA_GSO_MAX_SEGS]) { 2943 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2944 2945 if (dev->gso_max_segs ^ max_segs) { 2946 netif_set_gso_max_segs(dev, max_segs); 2947 status |= DO_SETLINK_MODIFIED; 2948 } 2949 } 2950 2951 if (tb[IFLA_GRO_MAX_SIZE]) { 2952 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 2953 2954 if (dev->gro_max_size ^ gro_max_size) { 2955 netif_set_gro_max_size(dev, gro_max_size); 2956 status |= DO_SETLINK_MODIFIED; 2957 } 2958 } 2959 2960 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) { 2961 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]); 2962 2963 if (dev->gso_ipv4_max_size ^ max_size) { 2964 netif_set_gso_ipv4_max_size(dev, max_size); 2965 status |= DO_SETLINK_MODIFIED; 2966 } 2967 } 2968 2969 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) { 2970 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]); 2971 2972 if (dev->gro_ipv4_max_size ^ gro_max_size) { 2973 netif_set_gro_ipv4_max_size(dev, gro_max_size); 2974 status |= DO_SETLINK_MODIFIED; 2975 } 2976 } 2977 2978 if (tb[IFLA_OPERSTATE]) 2979 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2980 2981 if (tb[IFLA_LINKMODE]) { 2982 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2983 2984 if (dev->link_mode ^ value) 2985 status |= DO_SETLINK_NOTIFY; 2986 WRITE_ONCE(dev->link_mode, value); 2987 } 2988 2989 if (tb[IFLA_VFINFO_LIST]) { 2990 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2991 struct nlattr *attr; 2992 int rem; 2993 2994 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2995 if (nla_type(attr) != IFLA_VF_INFO || 2996 nla_len(attr) < NLA_HDRLEN) { 2997 err = -EINVAL; 2998 goto errout; 2999 } 3000 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 3001 attr, 3002 ifla_vf_policy, 3003 NULL); 3004 if (err < 0) 3005 goto errout; 3006 err = do_setvfinfo(dev, vfinfo); 3007 if (err < 0) 3008 goto errout; 3009 status |= DO_SETLINK_NOTIFY; 3010 } 3011 } 3012 err = 0; 3013 3014 if (tb[IFLA_VF_PORTS]) { 3015 struct nlattr *port[IFLA_PORT_MAX+1]; 3016 struct nlattr *attr; 3017 int vf; 3018 int rem; 3019 3020 err = -EOPNOTSUPP; 3021 if (!ops->ndo_set_vf_port) 3022 goto errout; 3023 3024 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 3025 if (nla_type(attr) != IFLA_VF_PORT || 3026 nla_len(attr) < NLA_HDRLEN) { 3027 err = -EINVAL; 3028 goto errout; 3029 } 3030 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3031 attr, 3032 ifla_port_policy, 3033 NULL); 3034 if (err < 0) 3035 goto errout; 3036 if (!port[IFLA_PORT_VF]) { 3037 err = -EOPNOTSUPP; 3038 goto errout; 3039 } 3040 vf = nla_get_u32(port[IFLA_PORT_VF]); 3041 err = ops->ndo_set_vf_port(dev, vf, port); 3042 if (err < 0) 3043 goto errout; 3044 status |= DO_SETLINK_NOTIFY; 3045 } 3046 } 3047 err = 0; 3048 3049 if (tb[IFLA_PORT_SELF]) { 3050 struct nlattr *port[IFLA_PORT_MAX+1]; 3051 3052 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3053 tb[IFLA_PORT_SELF], 3054 ifla_port_policy, NULL); 3055 if (err < 0) 3056 goto errout; 3057 3058 err = -EOPNOTSUPP; 3059 if (ops->ndo_set_vf_port) 3060 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 3061 if (err < 0) 3062 goto errout; 3063 status |= DO_SETLINK_NOTIFY; 3064 } 3065 3066 if (tb[IFLA_AF_SPEC]) { 3067 struct nlattr *af; 3068 int rem; 3069 3070 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 3071 const struct rtnl_af_ops *af_ops; 3072 3073 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 3074 3075 err = af_ops->set_link_af(dev, af, extack); 3076 if (err < 0) 3077 goto errout; 3078 3079 status |= DO_SETLINK_NOTIFY; 3080 } 3081 } 3082 err = 0; 3083 3084 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { 3085 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], 3086 tb[IFLA_PROTO_DOWN_REASON], extack); 3087 if (err) 3088 goto errout; 3089 status |= DO_SETLINK_NOTIFY; 3090 } 3091 3092 if (tb[IFLA_XDP]) { 3093 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 3094 u32 xdp_flags = 0; 3095 3096 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 3097 tb[IFLA_XDP], 3098 ifla_xdp_policy, NULL); 3099 if (err < 0) 3100 goto errout; 3101 3102 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 3103 err = -EINVAL; 3104 goto errout; 3105 } 3106 3107 if (xdp[IFLA_XDP_FLAGS]) { 3108 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 3109 if (xdp_flags & ~XDP_FLAGS_MASK) { 3110 err = -EINVAL; 3111 goto errout; 3112 } 3113 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 3114 err = -EINVAL; 3115 goto errout; 3116 } 3117 } 3118 3119 if (xdp[IFLA_XDP_FD]) { 3120 int expected_fd = -1; 3121 3122 if (xdp_flags & XDP_FLAGS_REPLACE) { 3123 if (!xdp[IFLA_XDP_EXPECTED_FD]) { 3124 err = -EINVAL; 3125 goto errout; 3126 } 3127 expected_fd = 3128 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]); 3129 } 3130 3131 err = dev_change_xdp_fd(dev, extack, 3132 nla_get_s32(xdp[IFLA_XDP_FD]), 3133 expected_fd, 3134 xdp_flags); 3135 if (err) 3136 goto errout; 3137 status |= DO_SETLINK_NOTIFY; 3138 } 3139 } 3140 3141 errout: 3142 if (status & DO_SETLINK_MODIFIED) { 3143 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 3144 netdev_state_change(dev); 3145 3146 if (err < 0) 3147 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 3148 dev->name); 3149 } 3150 3151 return err; 3152 } 3153 3154 static struct net_device *rtnl_dev_get(struct net *net, 3155 struct nlattr *tb[]) 3156 { 3157 char ifname[ALTIFNAMSIZ]; 3158 3159 if (tb[IFLA_IFNAME]) 3160 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3161 else if (tb[IFLA_ALT_IFNAME]) 3162 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ); 3163 else 3164 return NULL; 3165 3166 return __dev_get_by_name(net, ifname); 3167 } 3168 3169 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3170 struct netlink_ext_ack *extack) 3171 { 3172 struct net *net = sock_net(skb->sk); 3173 struct ifinfomsg *ifm; 3174 struct net_device *dev; 3175 int err; 3176 struct nlattr *tb[IFLA_MAX+1]; 3177 3178 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3179 ifla_policy, extack); 3180 if (err < 0) 3181 goto errout; 3182 3183 err = rtnl_ensure_unique_netns(tb, extack, false); 3184 if (err < 0) 3185 goto errout; 3186 3187 err = -EINVAL; 3188 ifm = nlmsg_data(nlh); 3189 if (ifm->ifi_index > 0) 3190 dev = __dev_get_by_index(net, ifm->ifi_index); 3191 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3192 dev = rtnl_dev_get(net, tb); 3193 else 3194 goto errout; 3195 3196 if (dev == NULL) { 3197 err = -ENODEV; 3198 goto errout; 3199 } 3200 3201 err = validate_linkmsg(dev, tb, extack); 3202 if (err < 0) 3203 goto errout; 3204 3205 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3206 errout: 3207 return err; 3208 } 3209 3210 static int rtnl_group_dellink(const struct net *net, int group) 3211 { 3212 struct net_device *dev, *aux; 3213 LIST_HEAD(list_kill); 3214 bool found = false; 3215 3216 if (!group) 3217 return -EPERM; 3218 3219 for_each_netdev(net, dev) { 3220 if (dev->group == group) { 3221 const struct rtnl_link_ops *ops; 3222 3223 found = true; 3224 ops = dev->rtnl_link_ops; 3225 if (!ops || !ops->dellink) 3226 return -EOPNOTSUPP; 3227 } 3228 } 3229 3230 if (!found) 3231 return -ENODEV; 3232 3233 for_each_netdev_safe(net, dev, aux) { 3234 if (dev->group == group) { 3235 const struct rtnl_link_ops *ops; 3236 3237 ops = dev->rtnl_link_ops; 3238 ops->dellink(dev, &list_kill); 3239 } 3240 } 3241 unregister_netdevice_many(&list_kill); 3242 3243 return 0; 3244 } 3245 3246 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh) 3247 { 3248 const struct rtnl_link_ops *ops; 3249 LIST_HEAD(list_kill); 3250 3251 ops = dev->rtnl_link_ops; 3252 if (!ops || !ops->dellink) 3253 return -EOPNOTSUPP; 3254 3255 ops->dellink(dev, &list_kill); 3256 unregister_netdevice_many_notify(&list_kill, portid, nlh); 3257 3258 return 0; 3259 } 3260 EXPORT_SYMBOL_GPL(rtnl_delete_link); 3261 3262 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 3263 struct netlink_ext_ack *extack) 3264 { 3265 struct net *net = sock_net(skb->sk); 3266 u32 portid = NETLINK_CB(skb).portid; 3267 struct net *tgt_net = net; 3268 struct net_device *dev = NULL; 3269 struct ifinfomsg *ifm; 3270 struct nlattr *tb[IFLA_MAX+1]; 3271 int err; 3272 int netnsid = -1; 3273 3274 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3275 ifla_policy, extack); 3276 if (err < 0) 3277 return err; 3278 3279 err = rtnl_ensure_unique_netns(tb, extack, true); 3280 if (err < 0) 3281 return err; 3282 3283 if (tb[IFLA_TARGET_NETNSID]) { 3284 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3285 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3286 if (IS_ERR(tgt_net)) 3287 return PTR_ERR(tgt_net); 3288 } 3289 3290 err = -EINVAL; 3291 ifm = nlmsg_data(nlh); 3292 if (ifm->ifi_index > 0) 3293 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3294 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3295 dev = rtnl_dev_get(tgt_net, tb); 3296 else if (tb[IFLA_GROUP]) 3297 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3298 else 3299 goto out; 3300 3301 if (!dev) { 3302 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0) 3303 err = -ENODEV; 3304 3305 goto out; 3306 } 3307 3308 err = rtnl_delete_link(dev, portid, nlh); 3309 3310 out: 3311 if (netnsid >= 0) 3312 put_net(tgt_net); 3313 3314 return err; 3315 } 3316 3317 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, 3318 u32 portid, const struct nlmsghdr *nlh) 3319 { 3320 unsigned int old_flags; 3321 int err; 3322 3323 old_flags = dev->flags; 3324 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 3325 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3326 NULL); 3327 if (err < 0) 3328 return err; 3329 } 3330 3331 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3332 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh); 3333 } else { 3334 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3335 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh); 3336 } 3337 return 0; 3338 } 3339 EXPORT_SYMBOL(rtnl_configure_link); 3340 3341 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3342 unsigned char name_assign_type, 3343 const struct rtnl_link_ops *ops, 3344 struct nlattr *tb[], 3345 struct netlink_ext_ack *extack) 3346 { 3347 struct net_device *dev; 3348 unsigned int num_tx_queues = 1; 3349 unsigned int num_rx_queues = 1; 3350 int err; 3351 3352 if (tb[IFLA_NUM_TX_QUEUES]) 3353 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3354 else if (ops->get_num_tx_queues) 3355 num_tx_queues = ops->get_num_tx_queues(); 3356 3357 if (tb[IFLA_NUM_RX_QUEUES]) 3358 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3359 else if (ops->get_num_rx_queues) 3360 num_rx_queues = ops->get_num_rx_queues(); 3361 3362 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3363 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3364 return ERR_PTR(-EINVAL); 3365 } 3366 3367 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3368 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3369 return ERR_PTR(-EINVAL); 3370 } 3371 3372 if (ops->alloc) { 3373 dev = ops->alloc(tb, ifname, name_assign_type, 3374 num_tx_queues, num_rx_queues); 3375 if (IS_ERR(dev)) 3376 return dev; 3377 } else { 3378 dev = alloc_netdev_mqs(ops->priv_size, ifname, 3379 name_assign_type, ops->setup, 3380 num_tx_queues, num_rx_queues); 3381 } 3382 3383 if (!dev) 3384 return ERR_PTR(-ENOMEM); 3385 3386 err = validate_linkmsg(dev, tb, extack); 3387 if (err < 0) { 3388 free_netdev(dev); 3389 return ERR_PTR(err); 3390 } 3391 3392 dev_net_set(dev, net); 3393 dev->rtnl_link_ops = ops; 3394 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3395 3396 if (tb[IFLA_MTU]) { 3397 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3398 3399 err = dev_validate_mtu(dev, mtu, extack); 3400 if (err) { 3401 free_netdev(dev); 3402 return ERR_PTR(err); 3403 } 3404 dev->mtu = mtu; 3405 } 3406 if (tb[IFLA_ADDRESS]) { 3407 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), 3408 nla_len(tb[IFLA_ADDRESS])); 3409 dev->addr_assign_type = NET_ADDR_SET; 3410 } 3411 if (tb[IFLA_BROADCAST]) 3412 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3413 nla_len(tb[IFLA_BROADCAST])); 3414 if (tb[IFLA_TXQLEN]) 3415 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3416 if (tb[IFLA_OPERSTATE]) 3417 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3418 if (tb[IFLA_LINKMODE]) 3419 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3420 if (tb[IFLA_GROUP]) 3421 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3422 if (tb[IFLA_GSO_MAX_SIZE]) 3423 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3424 if (tb[IFLA_GSO_MAX_SEGS]) 3425 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); 3426 if (tb[IFLA_GRO_MAX_SIZE]) 3427 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE])); 3428 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) 3429 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE])); 3430 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) 3431 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE])); 3432 3433 return dev; 3434 } 3435 EXPORT_SYMBOL(rtnl_create_link); 3436 3437 static int rtnl_group_changelink(const struct sk_buff *skb, 3438 struct net *net, int group, 3439 struct ifinfomsg *ifm, 3440 struct netlink_ext_ack *extack, 3441 struct nlattr **tb) 3442 { 3443 struct net_device *dev, *aux; 3444 int err; 3445 3446 for_each_netdev_safe(net, dev, aux) { 3447 if (dev->group == group) { 3448 err = validate_linkmsg(dev, tb, extack); 3449 if (err < 0) 3450 return err; 3451 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3452 if (err < 0) 3453 return err; 3454 } 3455 } 3456 3457 return 0; 3458 } 3459 3460 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, 3461 const struct rtnl_link_ops *ops, 3462 const struct nlmsghdr *nlh, 3463 struct nlattr **tb, struct nlattr **data, 3464 struct netlink_ext_ack *extack) 3465 { 3466 unsigned char name_assign_type = NET_NAME_USER; 3467 struct net *net = sock_net(skb->sk); 3468 u32 portid = NETLINK_CB(skb).portid; 3469 struct net *dest_net, *link_net; 3470 struct net_device *dev; 3471 char ifname[IFNAMSIZ]; 3472 int err; 3473 3474 if (!ops->alloc && !ops->setup) 3475 return -EOPNOTSUPP; 3476 3477 if (tb[IFLA_IFNAME]) { 3478 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3479 } else { 3480 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3481 name_assign_type = NET_NAME_ENUM; 3482 } 3483 3484 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3485 if (IS_ERR(dest_net)) 3486 return PTR_ERR(dest_net); 3487 3488 if (tb[IFLA_LINK_NETNSID]) { 3489 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3490 3491 link_net = get_net_ns_by_id(dest_net, id); 3492 if (!link_net) { 3493 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3494 err = -EINVAL; 3495 goto out; 3496 } 3497 err = -EPERM; 3498 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3499 goto out; 3500 } else { 3501 link_net = NULL; 3502 } 3503 3504 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3505 name_assign_type, ops, tb, extack); 3506 if (IS_ERR(dev)) { 3507 err = PTR_ERR(dev); 3508 goto out; 3509 } 3510 3511 dev->ifindex = ifm->ifi_index; 3512 3513 if (ops->newlink) 3514 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3515 else 3516 err = register_netdevice(dev); 3517 if (err < 0) { 3518 free_netdev(dev); 3519 goto out; 3520 } 3521 3522 err = rtnl_configure_link(dev, ifm, portid, nlh); 3523 if (err < 0) 3524 goto out_unregister; 3525 if (link_net) { 3526 err = dev_change_net_namespace(dev, dest_net, ifname); 3527 if (err < 0) 3528 goto out_unregister; 3529 } 3530 if (tb[IFLA_MASTER]) { 3531 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3532 if (err) 3533 goto out_unregister; 3534 } 3535 out: 3536 if (link_net) 3537 put_net(link_net); 3538 put_net(dest_net); 3539 return err; 3540 out_unregister: 3541 if (ops->newlink) { 3542 LIST_HEAD(list_kill); 3543 3544 ops->dellink(dev, &list_kill); 3545 unregister_netdevice_many(&list_kill); 3546 } else { 3547 unregister_netdevice(dev); 3548 } 3549 goto out; 3550 } 3551 3552 struct rtnl_newlink_tbs { 3553 struct nlattr *tb[IFLA_MAX + 1]; 3554 struct nlattr *attr[RTNL_MAX_TYPE + 1]; 3555 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3556 }; 3557 3558 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3559 struct rtnl_newlink_tbs *tbs, 3560 struct netlink_ext_ack *extack) 3561 { 3562 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3563 struct nlattr ** const tb = tbs->tb; 3564 const struct rtnl_link_ops *m_ops; 3565 struct net_device *master_dev; 3566 struct net *net = sock_net(skb->sk); 3567 const struct rtnl_link_ops *ops; 3568 struct nlattr **slave_data; 3569 char kind[MODULE_NAME_LEN]; 3570 struct net_device *dev; 3571 struct ifinfomsg *ifm; 3572 struct nlattr **data; 3573 bool link_specified; 3574 int err; 3575 3576 #ifdef CONFIG_MODULES 3577 replay: 3578 #endif 3579 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3580 ifla_policy, extack); 3581 if (err < 0) 3582 return err; 3583 3584 err = rtnl_ensure_unique_netns(tb, extack, false); 3585 if (err < 0) 3586 return err; 3587 3588 ifm = nlmsg_data(nlh); 3589 if (ifm->ifi_index > 0) { 3590 link_specified = true; 3591 dev = __dev_get_by_index(net, ifm->ifi_index); 3592 } else if (ifm->ifi_index < 0) { 3593 NL_SET_ERR_MSG(extack, "ifindex can't be negative"); 3594 return -EINVAL; 3595 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { 3596 link_specified = true; 3597 dev = rtnl_dev_get(net, tb); 3598 } else { 3599 link_specified = false; 3600 dev = NULL; 3601 } 3602 3603 master_dev = NULL; 3604 m_ops = NULL; 3605 if (dev) { 3606 master_dev = netdev_master_upper_dev_get(dev); 3607 if (master_dev) 3608 m_ops = master_dev->rtnl_link_ops; 3609 } 3610 3611 if (tb[IFLA_LINKINFO]) { 3612 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3613 tb[IFLA_LINKINFO], 3614 ifla_info_policy, NULL); 3615 if (err < 0) 3616 return err; 3617 } else 3618 memset(linkinfo, 0, sizeof(linkinfo)); 3619 3620 if (linkinfo[IFLA_INFO_KIND]) { 3621 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3622 ops = rtnl_link_ops_get(kind); 3623 } else { 3624 kind[0] = '\0'; 3625 ops = NULL; 3626 } 3627 3628 data = NULL; 3629 if (ops) { 3630 if (ops->maxtype > RTNL_MAX_TYPE) 3631 return -EINVAL; 3632 3633 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3634 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, 3635 linkinfo[IFLA_INFO_DATA], 3636 ops->policy, extack); 3637 if (err < 0) 3638 return err; 3639 data = tbs->attr; 3640 } 3641 if (ops->validate) { 3642 err = ops->validate(tb, data, extack); 3643 if (err < 0) 3644 return err; 3645 } 3646 } 3647 3648 slave_data = NULL; 3649 if (m_ops) { 3650 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3651 return -EINVAL; 3652 3653 if (m_ops->slave_maxtype && 3654 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3655 err = nla_parse_nested_deprecated(tbs->slave_attr, 3656 m_ops->slave_maxtype, 3657 linkinfo[IFLA_INFO_SLAVE_DATA], 3658 m_ops->slave_policy, 3659 extack); 3660 if (err < 0) 3661 return err; 3662 slave_data = tbs->slave_attr; 3663 } 3664 } 3665 3666 if (dev) { 3667 int status = 0; 3668 3669 if (nlh->nlmsg_flags & NLM_F_EXCL) 3670 return -EEXIST; 3671 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3672 return -EOPNOTSUPP; 3673 3674 err = validate_linkmsg(dev, tb, extack); 3675 if (err < 0) 3676 return err; 3677 3678 if (linkinfo[IFLA_INFO_DATA]) { 3679 if (!ops || ops != dev->rtnl_link_ops || 3680 !ops->changelink) 3681 return -EOPNOTSUPP; 3682 3683 err = ops->changelink(dev, tb, data, extack); 3684 if (err < 0) 3685 return err; 3686 status |= DO_SETLINK_NOTIFY; 3687 } 3688 3689 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3690 if (!m_ops || !m_ops->slave_changelink) 3691 return -EOPNOTSUPP; 3692 3693 err = m_ops->slave_changelink(master_dev, dev, tb, 3694 slave_data, extack); 3695 if (err < 0) 3696 return err; 3697 status |= DO_SETLINK_NOTIFY; 3698 } 3699 3700 return do_setlink(skb, dev, ifm, extack, tb, status); 3701 } 3702 3703 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3704 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, 3705 * or it's for a group 3706 */ 3707 if (link_specified) 3708 return -ENODEV; 3709 if (tb[IFLA_GROUP]) 3710 return rtnl_group_changelink(skb, net, 3711 nla_get_u32(tb[IFLA_GROUP]), 3712 ifm, extack, tb); 3713 return -ENODEV; 3714 } 3715 3716 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3717 return -EOPNOTSUPP; 3718 3719 if (!ops) { 3720 #ifdef CONFIG_MODULES 3721 if (kind[0]) { 3722 __rtnl_unlock(); 3723 request_module("rtnl-link-%s", kind); 3724 rtnl_lock(); 3725 ops = rtnl_link_ops_get(kind); 3726 if (ops) 3727 goto replay; 3728 } 3729 #endif 3730 NL_SET_ERR_MSG(extack, "Unknown device type"); 3731 return -EOPNOTSUPP; 3732 } 3733 3734 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack); 3735 } 3736 3737 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3738 struct netlink_ext_ack *extack) 3739 { 3740 struct rtnl_newlink_tbs *tbs; 3741 int ret; 3742 3743 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); 3744 if (!tbs) 3745 return -ENOMEM; 3746 3747 ret = __rtnl_newlink(skb, nlh, tbs, extack); 3748 kfree(tbs); 3749 return ret; 3750 } 3751 3752 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3753 const struct nlmsghdr *nlh, 3754 struct nlattr **tb, 3755 struct netlink_ext_ack *extack) 3756 { 3757 struct ifinfomsg *ifm; 3758 int i, err; 3759 3760 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3761 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3762 return -EINVAL; 3763 } 3764 3765 if (!netlink_strict_get_check(skb)) 3766 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3767 ifla_policy, extack); 3768 3769 ifm = nlmsg_data(nlh); 3770 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3771 ifm->ifi_change) { 3772 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3773 return -EINVAL; 3774 } 3775 3776 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3777 ifla_policy, extack); 3778 if (err) 3779 return err; 3780 3781 for (i = 0; i <= IFLA_MAX; i++) { 3782 if (!tb[i]) 3783 continue; 3784 3785 switch (i) { 3786 case IFLA_IFNAME: 3787 case IFLA_ALT_IFNAME: 3788 case IFLA_EXT_MASK: 3789 case IFLA_TARGET_NETNSID: 3790 break; 3791 default: 3792 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3793 return -EINVAL; 3794 } 3795 } 3796 3797 return 0; 3798 } 3799 3800 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3801 struct netlink_ext_ack *extack) 3802 { 3803 struct net *net = sock_net(skb->sk); 3804 struct net *tgt_net = net; 3805 struct ifinfomsg *ifm; 3806 struct nlattr *tb[IFLA_MAX+1]; 3807 struct net_device *dev = NULL; 3808 struct sk_buff *nskb; 3809 int netnsid = -1; 3810 int err; 3811 u32 ext_filter_mask = 0; 3812 3813 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3814 if (err < 0) 3815 return err; 3816 3817 err = rtnl_ensure_unique_netns(tb, extack, true); 3818 if (err < 0) 3819 return err; 3820 3821 if (tb[IFLA_TARGET_NETNSID]) { 3822 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3823 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3824 if (IS_ERR(tgt_net)) 3825 return PTR_ERR(tgt_net); 3826 } 3827 3828 if (tb[IFLA_EXT_MASK]) 3829 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3830 3831 err = -EINVAL; 3832 ifm = nlmsg_data(nlh); 3833 if (ifm->ifi_index > 0) 3834 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3835 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3836 dev = rtnl_dev_get(tgt_net, tb); 3837 else 3838 goto out; 3839 3840 err = -ENODEV; 3841 if (dev == NULL) 3842 goto out; 3843 3844 err = -ENOBUFS; 3845 nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask)); 3846 if (nskb == NULL) 3847 goto out; 3848 3849 /* Synchronize the carrier state so we don't report a state 3850 * that we're not actually going to honour immediately; if 3851 * the driver just did a carrier off->on transition, we can 3852 * only TX if link watch work has run, but without this we'd 3853 * already report carrier on, even if it doesn't work yet. 3854 */ 3855 linkwatch_sync_dev(dev); 3856 3857 err = rtnl_fill_ifinfo(nskb, dev, net, 3858 RTM_NEWLINK, NETLINK_CB(skb).portid, 3859 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3860 0, NULL, 0, netnsid, GFP_KERNEL); 3861 if (err < 0) { 3862 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3863 WARN_ON(err == -EMSGSIZE); 3864 kfree_skb(nskb); 3865 } else 3866 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3867 out: 3868 if (netnsid >= 0) 3869 put_net(tgt_net); 3870 3871 return err; 3872 } 3873 3874 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3875 bool *changed, struct netlink_ext_ack *extack) 3876 { 3877 char *alt_ifname; 3878 size_t size; 3879 int err; 3880 3881 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3882 if (err) 3883 return err; 3884 3885 if (cmd == RTM_NEWLINKPROP) { 3886 size = rtnl_prop_list_size(dev); 3887 size += nla_total_size(ALTIFNAMSIZ); 3888 if (size >= U16_MAX) { 3889 NL_SET_ERR_MSG(extack, 3890 "effective property list too long"); 3891 return -EINVAL; 3892 } 3893 } 3894 3895 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 3896 if (!alt_ifname) 3897 return -ENOMEM; 3898 3899 if (cmd == RTM_NEWLINKPROP) { 3900 err = netdev_name_node_alt_create(dev, alt_ifname); 3901 if (!err) 3902 alt_ifname = NULL; 3903 } else if (cmd == RTM_DELLINKPROP) { 3904 err = netdev_name_node_alt_destroy(dev, alt_ifname); 3905 } else { 3906 WARN_ON_ONCE(1); 3907 err = -EINVAL; 3908 } 3909 3910 kfree(alt_ifname); 3911 if (!err) 3912 *changed = true; 3913 return err; 3914 } 3915 3916 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 3917 struct netlink_ext_ack *extack) 3918 { 3919 struct net *net = sock_net(skb->sk); 3920 struct nlattr *tb[IFLA_MAX + 1]; 3921 struct net_device *dev; 3922 struct ifinfomsg *ifm; 3923 bool changed = false; 3924 struct nlattr *attr; 3925 int err, rem; 3926 3927 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3928 if (err) 3929 return err; 3930 3931 err = rtnl_ensure_unique_netns(tb, extack, true); 3932 if (err) 3933 return err; 3934 3935 ifm = nlmsg_data(nlh); 3936 if (ifm->ifi_index > 0) 3937 dev = __dev_get_by_index(net, ifm->ifi_index); 3938 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3939 dev = rtnl_dev_get(net, tb); 3940 else 3941 return -EINVAL; 3942 3943 if (!dev) 3944 return -ENODEV; 3945 3946 if (!tb[IFLA_PROP_LIST]) 3947 return 0; 3948 3949 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 3950 switch (nla_type(attr)) { 3951 case IFLA_ALT_IFNAME: 3952 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 3953 if (err) 3954 return err; 3955 break; 3956 } 3957 } 3958 3959 if (changed) 3960 netdev_state_change(dev); 3961 return 0; 3962 } 3963 3964 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3965 struct netlink_ext_ack *extack) 3966 { 3967 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 3968 } 3969 3970 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3971 struct netlink_ext_ack *extack) 3972 { 3973 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 3974 } 3975 3976 static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb, 3977 struct nlmsghdr *nlh) 3978 { 3979 struct net *net = sock_net(skb->sk); 3980 size_t min_ifinfo_dump_size = 0; 3981 u32 ext_filter_mask = 0; 3982 struct net_device *dev; 3983 struct nlattr *nla; 3984 int hdrlen, rem; 3985 3986 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3987 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3988 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3989 3990 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) 3991 return NLMSG_GOODSIZE; 3992 3993 nla_for_each_attr_type(nla, IFLA_EXT_MASK, 3994 nlmsg_attrdata(nlh, hdrlen), 3995 nlmsg_attrlen(nlh, hdrlen), rem) { 3996 if (nla_len(nla) == sizeof(u32)) 3997 ext_filter_mask = nla_get_u32(nla); 3998 } 3999 4000 if (!ext_filter_mask) 4001 return NLMSG_GOODSIZE; 4002 /* 4003 * traverse the list of net devices and compute the minimum 4004 * buffer size based upon the filter mask. 4005 */ 4006 rcu_read_lock(); 4007 for_each_netdev_rcu(net, dev) { 4008 min_ifinfo_dump_size = max(min_ifinfo_dump_size, 4009 if_nlmsg_size(dev, ext_filter_mask)); 4010 } 4011 rcu_read_unlock(); 4012 4013 return nlmsg_total_size(min_ifinfo_dump_size); 4014 } 4015 4016 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 4017 { 4018 int idx; 4019 int s_idx = cb->family; 4020 int type = cb->nlh->nlmsg_type - RTM_BASE; 4021 int ret = 0; 4022 4023 if (s_idx == 0) 4024 s_idx = 1; 4025 4026 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 4027 struct rtnl_link __rcu **tab; 4028 struct rtnl_link *link; 4029 rtnl_dumpit_func dumpit; 4030 4031 if (idx < s_idx || idx == PF_PACKET) 4032 continue; 4033 4034 if (type < 0 || type >= RTM_NR_MSGTYPES) 4035 continue; 4036 4037 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 4038 if (!tab) 4039 continue; 4040 4041 link = rcu_dereference_rtnl(tab[type]); 4042 if (!link) 4043 continue; 4044 4045 dumpit = link->dumpit; 4046 if (!dumpit) 4047 continue; 4048 4049 if (idx > s_idx) { 4050 memset(&cb->args[0], 0, sizeof(cb->args)); 4051 cb->prev_seq = 0; 4052 cb->seq = 0; 4053 } 4054 ret = dumpit(skb, cb); 4055 if (ret) 4056 break; 4057 } 4058 cb->family = idx; 4059 4060 return skb->len ? : ret; 4061 } 4062 4063 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 4064 unsigned int change, 4065 u32 event, gfp_t flags, int *new_nsid, 4066 int new_ifindex, u32 portid, 4067 const struct nlmsghdr *nlh) 4068 { 4069 struct net *net = dev_net(dev); 4070 struct sk_buff *skb; 4071 int err = -ENOBUFS; 4072 u32 seq = 0; 4073 4074 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); 4075 if (skb == NULL) 4076 goto errout; 4077 4078 if (nlmsg_report(nlh)) 4079 seq = nlmsg_seq(nlh); 4080 else 4081 portid = 0; 4082 4083 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 4084 type, portid, seq, change, 0, 0, event, 4085 new_nsid, new_ifindex, -1, flags); 4086 if (err < 0) { 4087 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 4088 WARN_ON(err == -EMSGSIZE); 4089 kfree_skb(skb); 4090 goto errout; 4091 } 4092 return skb; 4093 errout: 4094 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4095 return NULL; 4096 } 4097 4098 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags, 4099 u32 portid, const struct nlmsghdr *nlh) 4100 { 4101 struct net *net = dev_net(dev); 4102 4103 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags); 4104 } 4105 4106 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 4107 unsigned int change, u32 event, 4108 gfp_t flags, int *new_nsid, int new_ifindex, 4109 u32 portid, const struct nlmsghdr *nlh) 4110 { 4111 struct sk_buff *skb; 4112 4113 if (dev->reg_state != NETREG_REGISTERED) 4114 return; 4115 4116 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 4117 new_ifindex, portid, nlh); 4118 if (skb) 4119 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh); 4120 } 4121 4122 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 4123 gfp_t flags, u32 portid, const struct nlmsghdr *nlh) 4124 { 4125 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4126 NULL, 0, portid, nlh); 4127 } 4128 4129 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 4130 gfp_t flags, int *new_nsid, int new_ifindex) 4131 { 4132 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4133 new_nsid, new_ifindex, 0, NULL); 4134 } 4135 4136 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 4137 struct net_device *dev, 4138 u8 *addr, u16 vid, u32 pid, u32 seq, 4139 int type, unsigned int flags, 4140 int nlflags, u16 ndm_state) 4141 { 4142 struct nlmsghdr *nlh; 4143 struct ndmsg *ndm; 4144 4145 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 4146 if (!nlh) 4147 return -EMSGSIZE; 4148 4149 ndm = nlmsg_data(nlh); 4150 ndm->ndm_family = AF_BRIDGE; 4151 ndm->ndm_pad1 = 0; 4152 ndm->ndm_pad2 = 0; 4153 ndm->ndm_flags = flags; 4154 ndm->ndm_type = 0; 4155 ndm->ndm_ifindex = dev->ifindex; 4156 ndm->ndm_state = ndm_state; 4157 4158 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr)) 4159 goto nla_put_failure; 4160 if (vid) 4161 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 4162 goto nla_put_failure; 4163 4164 nlmsg_end(skb, nlh); 4165 return 0; 4166 4167 nla_put_failure: 4168 nlmsg_cancel(skb, nlh); 4169 return -EMSGSIZE; 4170 } 4171 4172 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev) 4173 { 4174 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 4175 nla_total_size(dev->addr_len) + /* NDA_LLADDR */ 4176 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 4177 0; 4178 } 4179 4180 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 4181 u16 ndm_state) 4182 { 4183 struct net *net = dev_net(dev); 4184 struct sk_buff *skb; 4185 int err = -ENOBUFS; 4186 4187 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC); 4188 if (!skb) 4189 goto errout; 4190 4191 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 4192 0, 0, type, NTF_SELF, 0, ndm_state); 4193 if (err < 0) { 4194 kfree_skb(skb); 4195 goto errout; 4196 } 4197 4198 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 4199 return; 4200 errout: 4201 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 4202 } 4203 4204 /* 4205 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 4206 */ 4207 int ndo_dflt_fdb_add(struct ndmsg *ndm, 4208 struct nlattr *tb[], 4209 struct net_device *dev, 4210 const unsigned char *addr, u16 vid, 4211 u16 flags) 4212 { 4213 int err = -EINVAL; 4214 4215 /* If aging addresses are supported device will need to 4216 * implement its own handler for this. 4217 */ 4218 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 4219 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4220 return err; 4221 } 4222 4223 if (tb[NDA_FLAGS_EXT]) { 4224 netdev_info(dev, "invalid flags given to default FDB implementation\n"); 4225 return err; 4226 } 4227 4228 if (vid) { 4229 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n"); 4230 return err; 4231 } 4232 4233 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4234 err = dev_uc_add_excl(dev, addr); 4235 else if (is_multicast_ether_addr(addr)) 4236 err = dev_mc_add_excl(dev, addr); 4237 4238 /* Only return duplicate errors if NLM_F_EXCL is set */ 4239 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 4240 err = 0; 4241 4242 return err; 4243 } 4244 EXPORT_SYMBOL(ndo_dflt_fdb_add); 4245 4246 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 4247 struct netlink_ext_ack *extack) 4248 { 4249 u16 vid = 0; 4250 4251 if (vlan_attr) { 4252 if (nla_len(vlan_attr) != sizeof(u16)) { 4253 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 4254 return -EINVAL; 4255 } 4256 4257 vid = nla_get_u16(vlan_attr); 4258 4259 if (!vid || vid >= VLAN_VID_MASK) { 4260 NL_SET_ERR_MSG(extack, "invalid vlan id"); 4261 return -EINVAL; 4262 } 4263 } 4264 *p_vid = vid; 4265 return 0; 4266 } 4267 4268 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 4269 struct netlink_ext_ack *extack) 4270 { 4271 struct net *net = sock_net(skb->sk); 4272 struct ndmsg *ndm; 4273 struct nlattr *tb[NDA_MAX+1]; 4274 struct net_device *dev; 4275 u8 *addr; 4276 u16 vid; 4277 int err; 4278 4279 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 4280 extack); 4281 if (err < 0) 4282 return err; 4283 4284 ndm = nlmsg_data(nlh); 4285 if (ndm->ndm_ifindex == 0) { 4286 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4287 return -EINVAL; 4288 } 4289 4290 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4291 if (dev == NULL) { 4292 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4293 return -ENODEV; 4294 } 4295 4296 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4297 NL_SET_ERR_MSG(extack, "invalid address"); 4298 return -EINVAL; 4299 } 4300 4301 if (dev->type != ARPHRD_ETHER) { 4302 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 4303 return -EINVAL; 4304 } 4305 4306 addr = nla_data(tb[NDA_LLADDR]); 4307 4308 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4309 if (err) 4310 return err; 4311 4312 err = -EOPNOTSUPP; 4313 4314 /* Support fdb on master device the net/bridge default case */ 4315 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4316 netif_is_bridge_port(dev)) { 4317 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4318 const struct net_device_ops *ops = br_dev->netdev_ops; 4319 4320 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 4321 nlh->nlmsg_flags, extack); 4322 if (err) 4323 goto out; 4324 else 4325 ndm->ndm_flags &= ~NTF_MASTER; 4326 } 4327 4328 /* Embedded bridge, macvlan, and any other device support */ 4329 if ((ndm->ndm_flags & NTF_SELF)) { 4330 if (dev->netdev_ops->ndo_fdb_add) 4331 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 4332 vid, 4333 nlh->nlmsg_flags, 4334 extack); 4335 else 4336 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 4337 nlh->nlmsg_flags); 4338 4339 if (!err) { 4340 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 4341 ndm->ndm_state); 4342 ndm->ndm_flags &= ~NTF_SELF; 4343 } 4344 } 4345 out: 4346 return err; 4347 } 4348 4349 /* 4350 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 4351 */ 4352 int ndo_dflt_fdb_del(struct ndmsg *ndm, 4353 struct nlattr *tb[], 4354 struct net_device *dev, 4355 const unsigned char *addr, u16 vid) 4356 { 4357 int err = -EINVAL; 4358 4359 /* If aging addresses are supported device will need to 4360 * implement its own handler for this. 4361 */ 4362 if (!(ndm->ndm_state & NUD_PERMANENT)) { 4363 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4364 return err; 4365 } 4366 4367 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4368 err = dev_uc_del(dev, addr); 4369 else if (is_multicast_ether_addr(addr)) 4370 err = dev_mc_del(dev, addr); 4371 4372 return err; 4373 } 4374 EXPORT_SYMBOL(ndo_dflt_fdb_del); 4375 4376 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 4377 struct netlink_ext_ack *extack) 4378 { 4379 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 4380 struct net *net = sock_net(skb->sk); 4381 const struct net_device_ops *ops; 4382 struct ndmsg *ndm; 4383 struct nlattr *tb[NDA_MAX+1]; 4384 struct net_device *dev; 4385 __u8 *addr = NULL; 4386 int err; 4387 u16 vid; 4388 4389 if (!netlink_capable(skb, CAP_NET_ADMIN)) 4390 return -EPERM; 4391 4392 if (!del_bulk) { 4393 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4394 NULL, extack); 4395 } else { 4396 /* For bulk delete, the drivers will parse the message with 4397 * policy. 4398 */ 4399 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); 4400 } 4401 if (err < 0) 4402 return err; 4403 4404 ndm = nlmsg_data(nlh); 4405 if (ndm->ndm_ifindex == 0) { 4406 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4407 return -EINVAL; 4408 } 4409 4410 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4411 if (dev == NULL) { 4412 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4413 return -ENODEV; 4414 } 4415 4416 if (!del_bulk) { 4417 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4418 NL_SET_ERR_MSG(extack, "invalid address"); 4419 return -EINVAL; 4420 } 4421 addr = nla_data(tb[NDA_LLADDR]); 4422 4423 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4424 if (err) 4425 return err; 4426 } 4427 4428 if (dev->type != ARPHRD_ETHER) { 4429 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 4430 return -EINVAL; 4431 } 4432 4433 err = -EOPNOTSUPP; 4434 4435 /* Support fdb on master device the net/bridge default case */ 4436 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4437 netif_is_bridge_port(dev)) { 4438 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4439 4440 ops = br_dev->netdev_ops; 4441 if (!del_bulk) { 4442 if (ops->ndo_fdb_del) 4443 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4444 } else { 4445 if (ops->ndo_fdb_del_bulk) 4446 err = ops->ndo_fdb_del_bulk(nlh, dev, extack); 4447 } 4448 4449 if (err) 4450 goto out; 4451 else 4452 ndm->ndm_flags &= ~NTF_MASTER; 4453 } 4454 4455 /* Embedded bridge, macvlan, and any other device support */ 4456 if (ndm->ndm_flags & NTF_SELF) { 4457 ops = dev->netdev_ops; 4458 if (!del_bulk) { 4459 if (ops->ndo_fdb_del) 4460 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4461 else 4462 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4463 } else { 4464 /* in case err was cleared by NTF_MASTER call */ 4465 err = -EOPNOTSUPP; 4466 if (ops->ndo_fdb_del_bulk) 4467 err = ops->ndo_fdb_del_bulk(nlh, dev, extack); 4468 } 4469 4470 if (!err) { 4471 if (!del_bulk) 4472 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4473 ndm->ndm_state); 4474 ndm->ndm_flags &= ~NTF_SELF; 4475 } 4476 } 4477 out: 4478 return err; 4479 } 4480 4481 static int nlmsg_populate_fdb(struct sk_buff *skb, 4482 struct netlink_callback *cb, 4483 struct net_device *dev, 4484 int *idx, 4485 struct netdev_hw_addr_list *list) 4486 { 4487 struct netdev_hw_addr *ha; 4488 int err; 4489 u32 portid, seq; 4490 4491 portid = NETLINK_CB(cb->skb).portid; 4492 seq = cb->nlh->nlmsg_seq; 4493 4494 list_for_each_entry(ha, &list->list, list) { 4495 if (*idx < cb->args[2]) 4496 goto skip; 4497 4498 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4499 portid, seq, 4500 RTM_NEWNEIGH, NTF_SELF, 4501 NLM_F_MULTI, NUD_PERMANENT); 4502 if (err < 0) 4503 return err; 4504 skip: 4505 *idx += 1; 4506 } 4507 return 0; 4508 } 4509 4510 /** 4511 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4512 * @skb: socket buffer to store message in 4513 * @cb: netlink callback 4514 * @dev: netdevice 4515 * @filter_dev: ignored 4516 * @idx: the number of FDB table entries dumped is added to *@idx 4517 * 4518 * Default netdevice operation to dump the existing unicast address list. 4519 * Returns number of addresses from list put in skb. 4520 */ 4521 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4522 struct netlink_callback *cb, 4523 struct net_device *dev, 4524 struct net_device *filter_dev, 4525 int *idx) 4526 { 4527 int err; 4528 4529 if (dev->type != ARPHRD_ETHER) 4530 return -EINVAL; 4531 4532 netif_addr_lock_bh(dev); 4533 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4534 if (err) 4535 goto out; 4536 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4537 out: 4538 netif_addr_unlock_bh(dev); 4539 return err; 4540 } 4541 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4542 4543 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4544 int *br_idx, int *brport_idx, 4545 struct netlink_ext_ack *extack) 4546 { 4547 struct nlattr *tb[NDA_MAX + 1]; 4548 struct ndmsg *ndm; 4549 int err, i; 4550 4551 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4552 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4553 return -EINVAL; 4554 } 4555 4556 ndm = nlmsg_data(nlh); 4557 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4558 ndm->ndm_flags || ndm->ndm_type) { 4559 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4560 return -EINVAL; 4561 } 4562 4563 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4564 NDA_MAX, NULL, extack); 4565 if (err < 0) 4566 return err; 4567 4568 *brport_idx = ndm->ndm_ifindex; 4569 for (i = 0; i <= NDA_MAX; ++i) { 4570 if (!tb[i]) 4571 continue; 4572 4573 switch (i) { 4574 case NDA_IFINDEX: 4575 if (nla_len(tb[i]) != sizeof(u32)) { 4576 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4577 return -EINVAL; 4578 } 4579 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4580 break; 4581 case NDA_MASTER: 4582 if (nla_len(tb[i]) != sizeof(u32)) { 4583 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4584 return -EINVAL; 4585 } 4586 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4587 break; 4588 default: 4589 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4590 return -EINVAL; 4591 } 4592 } 4593 4594 return 0; 4595 } 4596 4597 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4598 int *br_idx, int *brport_idx, 4599 struct netlink_ext_ack *extack) 4600 { 4601 struct nlattr *tb[IFLA_MAX+1]; 4602 int err; 4603 4604 /* A hack to preserve kernel<->userspace interface. 4605 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4606 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4607 * So, check for ndmsg with an optional u32 attribute (not used here). 4608 * Fortunately these sizes don't conflict with the size of ifinfomsg 4609 * with an optional attribute. 4610 */ 4611 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4612 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4613 nla_attr_size(sizeof(u32)))) { 4614 struct ifinfomsg *ifm; 4615 4616 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4617 tb, IFLA_MAX, ifla_policy, 4618 extack); 4619 if (err < 0) { 4620 return -EINVAL; 4621 } else if (err == 0) { 4622 if (tb[IFLA_MASTER]) 4623 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4624 } 4625 4626 ifm = nlmsg_data(nlh); 4627 *brport_idx = ifm->ifi_index; 4628 } 4629 return 0; 4630 } 4631 4632 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4633 { 4634 struct net_device *dev; 4635 struct net_device *br_dev = NULL; 4636 const struct net_device_ops *ops = NULL; 4637 const struct net_device_ops *cops = NULL; 4638 struct net *net = sock_net(skb->sk); 4639 struct hlist_head *head; 4640 int brport_idx = 0; 4641 int br_idx = 0; 4642 int h, s_h; 4643 int idx = 0, s_idx; 4644 int err = 0; 4645 int fidx = 0; 4646 4647 if (cb->strict_check) 4648 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4649 cb->extack); 4650 else 4651 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4652 cb->extack); 4653 if (err < 0) 4654 return err; 4655 4656 if (br_idx) { 4657 br_dev = __dev_get_by_index(net, br_idx); 4658 if (!br_dev) 4659 return -ENODEV; 4660 4661 ops = br_dev->netdev_ops; 4662 } 4663 4664 s_h = cb->args[0]; 4665 s_idx = cb->args[1]; 4666 4667 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4668 idx = 0; 4669 head = &net->dev_index_head[h]; 4670 hlist_for_each_entry(dev, head, index_hlist) { 4671 4672 if (brport_idx && (dev->ifindex != brport_idx)) 4673 continue; 4674 4675 if (!br_idx) { /* user did not specify a specific bridge */ 4676 if (netif_is_bridge_port(dev)) { 4677 br_dev = netdev_master_upper_dev_get(dev); 4678 cops = br_dev->netdev_ops; 4679 } 4680 } else { 4681 if (dev != br_dev && 4682 !netif_is_bridge_port(dev)) 4683 continue; 4684 4685 if (br_dev != netdev_master_upper_dev_get(dev) && 4686 !netif_is_bridge_master(dev)) 4687 continue; 4688 cops = ops; 4689 } 4690 4691 if (idx < s_idx) 4692 goto cont; 4693 4694 if (netif_is_bridge_port(dev)) { 4695 if (cops && cops->ndo_fdb_dump) { 4696 err = cops->ndo_fdb_dump(skb, cb, 4697 br_dev, dev, 4698 &fidx); 4699 if (err == -EMSGSIZE) 4700 goto out; 4701 } 4702 } 4703 4704 if (dev->netdev_ops->ndo_fdb_dump) 4705 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4706 dev, NULL, 4707 &fidx); 4708 else 4709 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4710 &fidx); 4711 if (err == -EMSGSIZE) 4712 goto out; 4713 4714 cops = NULL; 4715 4716 /* reset fdb offset to 0 for rest of the interfaces */ 4717 cb->args[2] = 0; 4718 fidx = 0; 4719 cont: 4720 idx++; 4721 } 4722 } 4723 4724 out: 4725 cb->args[0] = h; 4726 cb->args[1] = idx; 4727 cb->args[2] = fidx; 4728 4729 return skb->len; 4730 } 4731 4732 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4733 struct nlattr **tb, u8 *ndm_flags, 4734 int *br_idx, int *brport_idx, u8 **addr, 4735 u16 *vid, struct netlink_ext_ack *extack) 4736 { 4737 struct ndmsg *ndm; 4738 int err, i; 4739 4740 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4741 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4742 return -EINVAL; 4743 } 4744 4745 ndm = nlmsg_data(nlh); 4746 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4747 ndm->ndm_type) { 4748 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4749 return -EINVAL; 4750 } 4751 4752 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4753 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4754 return -EINVAL; 4755 } 4756 4757 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4758 NDA_MAX, nda_policy, extack); 4759 if (err < 0) 4760 return err; 4761 4762 *ndm_flags = ndm->ndm_flags; 4763 *brport_idx = ndm->ndm_ifindex; 4764 for (i = 0; i <= NDA_MAX; ++i) { 4765 if (!tb[i]) 4766 continue; 4767 4768 switch (i) { 4769 case NDA_MASTER: 4770 *br_idx = nla_get_u32(tb[i]); 4771 break; 4772 case NDA_LLADDR: 4773 if (nla_len(tb[i]) != ETH_ALEN) { 4774 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4775 return -EINVAL; 4776 } 4777 *addr = nla_data(tb[i]); 4778 break; 4779 case NDA_VLAN: 4780 err = fdb_vid_parse(tb[i], vid, extack); 4781 if (err) 4782 return err; 4783 break; 4784 case NDA_VNI: 4785 break; 4786 default: 4787 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4788 return -EINVAL; 4789 } 4790 } 4791 4792 return 0; 4793 } 4794 4795 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4796 struct netlink_ext_ack *extack) 4797 { 4798 struct net_device *dev = NULL, *br_dev = NULL; 4799 const struct net_device_ops *ops = NULL; 4800 struct net *net = sock_net(in_skb->sk); 4801 struct nlattr *tb[NDA_MAX + 1]; 4802 struct sk_buff *skb; 4803 int brport_idx = 0; 4804 u8 ndm_flags = 0; 4805 int br_idx = 0; 4806 u8 *addr = NULL; 4807 u16 vid = 0; 4808 int err; 4809 4810 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4811 &brport_idx, &addr, &vid, extack); 4812 if (err < 0) 4813 return err; 4814 4815 if (!addr) { 4816 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4817 return -EINVAL; 4818 } 4819 4820 if (brport_idx) { 4821 dev = __dev_get_by_index(net, brport_idx); 4822 if (!dev) { 4823 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4824 return -ENODEV; 4825 } 4826 } 4827 4828 if (br_idx) { 4829 if (dev) { 4830 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4831 return -EINVAL; 4832 } 4833 4834 br_dev = __dev_get_by_index(net, br_idx); 4835 if (!br_dev) { 4836 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4837 return -EINVAL; 4838 } 4839 ops = br_dev->netdev_ops; 4840 } 4841 4842 if (dev) { 4843 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4844 if (!netif_is_bridge_port(dev)) { 4845 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4846 return -EINVAL; 4847 } 4848 br_dev = netdev_master_upper_dev_get(dev); 4849 if (!br_dev) { 4850 NL_SET_ERR_MSG(extack, "Master of device not found"); 4851 return -EINVAL; 4852 } 4853 ops = br_dev->netdev_ops; 4854 } else { 4855 if (!(ndm_flags & NTF_SELF)) { 4856 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4857 return -EINVAL; 4858 } 4859 ops = dev->netdev_ops; 4860 } 4861 } 4862 4863 if (!br_dev && !dev) { 4864 NL_SET_ERR_MSG(extack, "No device specified"); 4865 return -ENODEV; 4866 } 4867 4868 if (!ops || !ops->ndo_fdb_get) { 4869 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4870 return -EOPNOTSUPP; 4871 } 4872 4873 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4874 if (!skb) 4875 return -ENOBUFS; 4876 4877 if (br_dev) 4878 dev = br_dev; 4879 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4880 NETLINK_CB(in_skb).portid, 4881 nlh->nlmsg_seq, extack); 4882 if (err) 4883 goto out; 4884 4885 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4886 out: 4887 kfree_skb(skb); 4888 return err; 4889 } 4890 4891 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4892 unsigned int attrnum, unsigned int flag) 4893 { 4894 if (mask & flag) 4895 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4896 return 0; 4897 } 4898 4899 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4900 struct net_device *dev, u16 mode, 4901 u32 flags, u32 mask, int nlflags, 4902 u32 filter_mask, 4903 int (*vlan_fill)(struct sk_buff *skb, 4904 struct net_device *dev, 4905 u32 filter_mask)) 4906 { 4907 struct nlmsghdr *nlh; 4908 struct ifinfomsg *ifm; 4909 struct nlattr *br_afspec; 4910 struct nlattr *protinfo; 4911 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4912 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4913 int err = 0; 4914 4915 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4916 if (nlh == NULL) 4917 return -EMSGSIZE; 4918 4919 ifm = nlmsg_data(nlh); 4920 ifm->ifi_family = AF_BRIDGE; 4921 ifm->__ifi_pad = 0; 4922 ifm->ifi_type = dev->type; 4923 ifm->ifi_index = dev->ifindex; 4924 ifm->ifi_flags = dev_get_flags(dev); 4925 ifm->ifi_change = 0; 4926 4927 4928 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4929 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4930 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4931 (br_dev && 4932 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4933 (dev->addr_len && 4934 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4935 (dev->ifindex != dev_get_iflink(dev) && 4936 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4937 goto nla_put_failure; 4938 4939 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 4940 if (!br_afspec) 4941 goto nla_put_failure; 4942 4943 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4944 nla_nest_cancel(skb, br_afspec); 4945 goto nla_put_failure; 4946 } 4947 4948 if (mode != BRIDGE_MODE_UNDEF) { 4949 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4950 nla_nest_cancel(skb, br_afspec); 4951 goto nla_put_failure; 4952 } 4953 } 4954 if (vlan_fill) { 4955 err = vlan_fill(skb, dev, filter_mask); 4956 if (err) { 4957 nla_nest_cancel(skb, br_afspec); 4958 goto nla_put_failure; 4959 } 4960 } 4961 nla_nest_end(skb, br_afspec); 4962 4963 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 4964 if (!protinfo) 4965 goto nla_put_failure; 4966 4967 if (brport_nla_put_flag(skb, flags, mask, 4968 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4969 brport_nla_put_flag(skb, flags, mask, 4970 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4971 brport_nla_put_flag(skb, flags, mask, 4972 IFLA_BRPORT_FAST_LEAVE, 4973 BR_MULTICAST_FAST_LEAVE) || 4974 brport_nla_put_flag(skb, flags, mask, 4975 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4976 brport_nla_put_flag(skb, flags, mask, 4977 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4978 brport_nla_put_flag(skb, flags, mask, 4979 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4980 brport_nla_put_flag(skb, flags, mask, 4981 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4982 brport_nla_put_flag(skb, flags, mask, 4983 IFLA_BRPORT_PROXYARP, BR_PROXYARP) || 4984 brport_nla_put_flag(skb, flags, mask, 4985 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || 4986 brport_nla_put_flag(skb, flags, mask, 4987 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { 4988 nla_nest_cancel(skb, protinfo); 4989 goto nla_put_failure; 4990 } 4991 4992 nla_nest_end(skb, protinfo); 4993 4994 nlmsg_end(skb, nlh); 4995 return 0; 4996 nla_put_failure: 4997 nlmsg_cancel(skb, nlh); 4998 return err ? err : -EMSGSIZE; 4999 } 5000 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 5001 5002 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 5003 bool strict_check, u32 *filter_mask, 5004 struct netlink_ext_ack *extack) 5005 { 5006 struct nlattr *tb[IFLA_MAX+1]; 5007 int err, i; 5008 5009 if (strict_check) { 5010 struct ifinfomsg *ifm; 5011 5012 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 5013 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 5014 return -EINVAL; 5015 } 5016 5017 ifm = nlmsg_data(nlh); 5018 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 5019 ifm->ifi_change || ifm->ifi_index) { 5020 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 5021 return -EINVAL; 5022 } 5023 5024 err = nlmsg_parse_deprecated_strict(nlh, 5025 sizeof(struct ifinfomsg), 5026 tb, IFLA_MAX, ifla_policy, 5027 extack); 5028 } else { 5029 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 5030 tb, IFLA_MAX, ifla_policy, 5031 extack); 5032 } 5033 if (err < 0) 5034 return err; 5035 5036 /* new attributes should only be added with strict checking */ 5037 for (i = 0; i <= IFLA_MAX; ++i) { 5038 if (!tb[i]) 5039 continue; 5040 5041 switch (i) { 5042 case IFLA_EXT_MASK: 5043 *filter_mask = nla_get_u32(tb[i]); 5044 break; 5045 default: 5046 if (strict_check) { 5047 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 5048 return -EINVAL; 5049 } 5050 } 5051 } 5052 5053 return 0; 5054 } 5055 5056 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 5057 { 5058 const struct nlmsghdr *nlh = cb->nlh; 5059 struct net *net = sock_net(skb->sk); 5060 struct net_device *dev; 5061 int idx = 0; 5062 u32 portid = NETLINK_CB(cb->skb).portid; 5063 u32 seq = nlh->nlmsg_seq; 5064 u32 filter_mask = 0; 5065 int err; 5066 5067 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 5068 cb->extack); 5069 if (err < 0 && cb->strict_check) 5070 return err; 5071 5072 rcu_read_lock(); 5073 for_each_netdev_rcu(net, dev) { 5074 const struct net_device_ops *ops = dev->netdev_ops; 5075 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5076 5077 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 5078 if (idx >= cb->args[0]) { 5079 err = br_dev->netdev_ops->ndo_bridge_getlink( 5080 skb, portid, seq, dev, 5081 filter_mask, NLM_F_MULTI); 5082 if (err < 0 && err != -EOPNOTSUPP) { 5083 if (likely(skb->len)) 5084 break; 5085 5086 goto out_err; 5087 } 5088 } 5089 idx++; 5090 } 5091 5092 if (ops->ndo_bridge_getlink) { 5093 if (idx >= cb->args[0]) { 5094 err = ops->ndo_bridge_getlink(skb, portid, 5095 seq, dev, 5096 filter_mask, 5097 NLM_F_MULTI); 5098 if (err < 0 && err != -EOPNOTSUPP) { 5099 if (likely(skb->len)) 5100 break; 5101 5102 goto out_err; 5103 } 5104 } 5105 idx++; 5106 } 5107 } 5108 err = skb->len; 5109 out_err: 5110 rcu_read_unlock(); 5111 cb->args[0] = idx; 5112 5113 return err; 5114 } 5115 5116 static inline size_t bridge_nlmsg_size(void) 5117 { 5118 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 5119 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 5120 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 5121 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 5122 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 5123 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 5124 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 5125 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 5126 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 5127 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 5128 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 5129 } 5130 5131 static int rtnl_bridge_notify(struct net_device *dev) 5132 { 5133 struct net *net = dev_net(dev); 5134 struct sk_buff *skb; 5135 int err = -EOPNOTSUPP; 5136 5137 if (!dev->netdev_ops->ndo_bridge_getlink) 5138 return 0; 5139 5140 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 5141 if (!skb) { 5142 err = -ENOMEM; 5143 goto errout; 5144 } 5145 5146 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 5147 if (err < 0) 5148 goto errout; 5149 5150 /* Notification info is only filled for bridge ports, not the bridge 5151 * device itself. Therefore, a zero notification length is valid and 5152 * should not result in an error. 5153 */ 5154 if (!skb->len) 5155 goto errout; 5156 5157 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 5158 return 0; 5159 errout: 5160 WARN_ON(err == -EMSGSIZE); 5161 kfree_skb(skb); 5162 if (err) 5163 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 5164 return err; 5165 } 5166 5167 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 5168 struct netlink_ext_ack *extack) 5169 { 5170 struct net *net = sock_net(skb->sk); 5171 struct ifinfomsg *ifm; 5172 struct net_device *dev; 5173 struct nlattr *br_spec, *attr, *br_flags_attr = NULL; 5174 int rem, err = -EOPNOTSUPP; 5175 u16 flags = 0; 5176 5177 if (nlmsg_len(nlh) < sizeof(*ifm)) 5178 return -EINVAL; 5179 5180 ifm = nlmsg_data(nlh); 5181 if (ifm->ifi_family != AF_BRIDGE) 5182 return -EPFNOSUPPORT; 5183 5184 dev = __dev_get_by_index(net, ifm->ifi_index); 5185 if (!dev) { 5186 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5187 return -ENODEV; 5188 } 5189 5190 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5191 if (br_spec) { 5192 nla_for_each_nested(attr, br_spec, rem) { 5193 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) { 5194 if (nla_len(attr) < sizeof(flags)) 5195 return -EINVAL; 5196 5197 br_flags_attr = attr; 5198 flags = nla_get_u16(attr); 5199 } 5200 5201 if (nla_type(attr) == IFLA_BRIDGE_MODE) { 5202 if (nla_len(attr) < sizeof(u16)) 5203 return -EINVAL; 5204 } 5205 } 5206 } 5207 5208 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5209 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5210 5211 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 5212 err = -EOPNOTSUPP; 5213 goto out; 5214 } 5215 5216 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 5217 extack); 5218 if (err) 5219 goto out; 5220 5221 flags &= ~BRIDGE_FLAGS_MASTER; 5222 } 5223 5224 if ((flags & BRIDGE_FLAGS_SELF)) { 5225 if (!dev->netdev_ops->ndo_bridge_setlink) 5226 err = -EOPNOTSUPP; 5227 else 5228 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 5229 flags, 5230 extack); 5231 if (!err) { 5232 flags &= ~BRIDGE_FLAGS_SELF; 5233 5234 /* Generate event to notify upper layer of bridge 5235 * change 5236 */ 5237 err = rtnl_bridge_notify(dev); 5238 } 5239 } 5240 5241 if (br_flags_attr) 5242 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags)); 5243 out: 5244 return err; 5245 } 5246 5247 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 5248 struct netlink_ext_ack *extack) 5249 { 5250 struct net *net = sock_net(skb->sk); 5251 struct ifinfomsg *ifm; 5252 struct net_device *dev; 5253 struct nlattr *br_spec, *attr = NULL; 5254 int rem, err = -EOPNOTSUPP; 5255 u16 flags = 0; 5256 bool have_flags = false; 5257 5258 if (nlmsg_len(nlh) < sizeof(*ifm)) 5259 return -EINVAL; 5260 5261 ifm = nlmsg_data(nlh); 5262 if (ifm->ifi_family != AF_BRIDGE) 5263 return -EPFNOSUPPORT; 5264 5265 dev = __dev_get_by_index(net, ifm->ifi_index); 5266 if (!dev) { 5267 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5268 return -ENODEV; 5269 } 5270 5271 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5272 if (br_spec) { 5273 nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec, 5274 rem) { 5275 if (nla_len(attr) < sizeof(flags)) 5276 return -EINVAL; 5277 5278 have_flags = true; 5279 flags = nla_get_u16(attr); 5280 break; 5281 } 5282 } 5283 5284 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5285 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5286 5287 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 5288 err = -EOPNOTSUPP; 5289 goto out; 5290 } 5291 5292 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 5293 if (err) 5294 goto out; 5295 5296 flags &= ~BRIDGE_FLAGS_MASTER; 5297 } 5298 5299 if ((flags & BRIDGE_FLAGS_SELF)) { 5300 if (!dev->netdev_ops->ndo_bridge_dellink) 5301 err = -EOPNOTSUPP; 5302 else 5303 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 5304 flags); 5305 5306 if (!err) { 5307 flags &= ~BRIDGE_FLAGS_SELF; 5308 5309 /* Generate event to notify upper layer of bridge 5310 * change 5311 */ 5312 err = rtnl_bridge_notify(dev); 5313 } 5314 } 5315 5316 if (have_flags) 5317 memcpy(nla_data(attr), &flags, sizeof(flags)); 5318 out: 5319 return err; 5320 } 5321 5322 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 5323 { 5324 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 5325 (!idxattr || idxattr == attrid); 5326 } 5327 5328 static bool 5329 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) 5330 { 5331 return dev->netdev_ops && 5332 dev->netdev_ops->ndo_has_offload_stats && 5333 dev->netdev_ops->ndo_get_offload_stats && 5334 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); 5335 } 5336 5337 static unsigned int 5338 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) 5339 { 5340 return rtnl_offload_xstats_have_ndo(dev, attr_id) ? 5341 sizeof(struct rtnl_link_stats64) : 0; 5342 } 5343 5344 static int 5345 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, 5346 struct sk_buff *skb) 5347 { 5348 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); 5349 struct nlattr *attr = NULL; 5350 void *attr_data; 5351 int err; 5352 5353 if (!size) 5354 return -ENODATA; 5355 5356 attr = nla_reserve_64bit(skb, attr_id, size, 5357 IFLA_OFFLOAD_XSTATS_UNSPEC); 5358 if (!attr) 5359 return -EMSGSIZE; 5360 5361 attr_data = nla_data(attr); 5362 memset(attr_data, 0, size); 5363 5364 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); 5365 if (err) 5366 return err; 5367 5368 return 0; 5369 } 5370 5371 static unsigned int 5372 rtnl_offload_xstats_get_size_stats(const struct net_device *dev, 5373 enum netdev_offload_xstats_type type) 5374 { 5375 bool enabled = netdev_offload_xstats_enabled(dev, type); 5376 5377 return enabled ? sizeof(struct rtnl_hw_stats64) : 0; 5378 } 5379 5380 struct rtnl_offload_xstats_request_used { 5381 bool request; 5382 bool used; 5383 }; 5384 5385 static int 5386 rtnl_offload_xstats_get_stats(struct net_device *dev, 5387 enum netdev_offload_xstats_type type, 5388 struct rtnl_offload_xstats_request_used *ru, 5389 struct rtnl_hw_stats64 *stats, 5390 struct netlink_ext_ack *extack) 5391 { 5392 bool request; 5393 bool used; 5394 int err; 5395 5396 request = netdev_offload_xstats_enabled(dev, type); 5397 if (!request) { 5398 used = false; 5399 goto out; 5400 } 5401 5402 err = netdev_offload_xstats_get(dev, type, stats, &used, extack); 5403 if (err) 5404 return err; 5405 5406 out: 5407 if (ru) { 5408 ru->request = request; 5409 ru->used = used; 5410 } 5411 return 0; 5412 } 5413 5414 static int 5415 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, 5416 struct rtnl_offload_xstats_request_used *ru) 5417 { 5418 struct nlattr *nest; 5419 5420 nest = nla_nest_start(skb, attr_id); 5421 if (!nest) 5422 return -EMSGSIZE; 5423 5424 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) 5425 goto nla_put_failure; 5426 5427 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) 5428 goto nla_put_failure; 5429 5430 nla_nest_end(skb, nest); 5431 return 0; 5432 5433 nla_put_failure: 5434 nla_nest_cancel(skb, nest); 5435 return -EMSGSIZE; 5436 } 5437 5438 static int 5439 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, 5440 struct netlink_ext_ack *extack) 5441 { 5442 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5443 struct rtnl_offload_xstats_request_used ru_l3; 5444 struct nlattr *nest; 5445 int err; 5446 5447 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); 5448 if (err) 5449 return err; 5450 5451 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5452 if (!nest) 5453 return -EMSGSIZE; 5454 5455 if (rtnl_offload_xstats_fill_hw_s_info_one(skb, 5456 IFLA_OFFLOAD_XSTATS_L3_STATS, 5457 &ru_l3)) 5458 goto nla_put_failure; 5459 5460 nla_nest_end(skb, nest); 5461 return 0; 5462 5463 nla_put_failure: 5464 nla_nest_cancel(skb, nest); 5465 return -EMSGSIZE; 5466 } 5467 5468 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, 5469 int *prividx, u32 off_filter_mask, 5470 struct netlink_ext_ack *extack) 5471 { 5472 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5473 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; 5474 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; 5475 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5476 bool have_data = false; 5477 int err; 5478 5479 if (*prividx <= attr_id_cpu_hit && 5480 (off_filter_mask & 5481 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { 5482 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); 5483 if (!err) { 5484 have_data = true; 5485 } else if (err != -ENODATA) { 5486 *prividx = attr_id_cpu_hit; 5487 return err; 5488 } 5489 } 5490 5491 if (*prividx <= attr_id_hw_s_info && 5492 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { 5493 *prividx = attr_id_hw_s_info; 5494 5495 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); 5496 if (err) 5497 return err; 5498 5499 have_data = true; 5500 *prividx = 0; 5501 } 5502 5503 if (*prividx <= attr_id_l3_stats && 5504 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { 5505 unsigned int size_l3; 5506 struct nlattr *attr; 5507 5508 *prividx = attr_id_l3_stats; 5509 5510 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5511 if (!size_l3) 5512 goto skip_l3_stats; 5513 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, 5514 IFLA_OFFLOAD_XSTATS_UNSPEC); 5515 if (!attr) 5516 return -EMSGSIZE; 5517 5518 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, 5519 nla_data(attr), extack); 5520 if (err) 5521 return err; 5522 5523 have_data = true; 5524 skip_l3_stats: 5525 *prividx = 0; 5526 } 5527 5528 if (!have_data) 5529 return -ENODATA; 5530 5531 *prividx = 0; 5532 return 0; 5533 } 5534 5535 static unsigned int 5536 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, 5537 enum netdev_offload_xstats_type type) 5538 { 5539 return nla_total_size(0) + 5540 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ 5541 nla_total_size(sizeof(u8)) + 5542 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ 5543 nla_total_size(sizeof(u8)) + 5544 0; 5545 } 5546 5547 static unsigned int 5548 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) 5549 { 5550 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5551 5552 return nla_total_size(0) + 5553 /* IFLA_OFFLOAD_XSTATS_L3_STATS */ 5554 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + 5555 0; 5556 } 5557 5558 static int rtnl_offload_xstats_get_size(const struct net_device *dev, 5559 u32 off_filter_mask) 5560 { 5561 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5562 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5563 int nla_size = 0; 5564 int size; 5565 5566 if (off_filter_mask & 5567 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { 5568 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); 5569 nla_size += nla_total_size_64bit(size); 5570 } 5571 5572 if (off_filter_mask & 5573 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) 5574 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); 5575 5576 if (off_filter_mask & 5577 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { 5578 size = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5579 nla_size += nla_total_size_64bit(size); 5580 } 5581 5582 if (nla_size != 0) 5583 nla_size += nla_total_size(0); 5584 5585 return nla_size; 5586 } 5587 5588 struct rtnl_stats_dump_filters { 5589 /* mask[0] filters outer attributes. Then individual nests have their 5590 * filtering mask at the index of the nested attribute. 5591 */ 5592 u32 mask[IFLA_STATS_MAX + 1]; 5593 }; 5594 5595 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 5596 int type, u32 pid, u32 seq, u32 change, 5597 unsigned int flags, 5598 const struct rtnl_stats_dump_filters *filters, 5599 int *idxattr, int *prividx, 5600 struct netlink_ext_ack *extack) 5601 { 5602 unsigned int filter_mask = filters->mask[0]; 5603 struct if_stats_msg *ifsm; 5604 struct nlmsghdr *nlh; 5605 struct nlattr *attr; 5606 int s_prividx = *prividx; 5607 int err; 5608 5609 ASSERT_RTNL(); 5610 5611 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 5612 if (!nlh) 5613 return -EMSGSIZE; 5614 5615 ifsm = nlmsg_data(nlh); 5616 ifsm->family = PF_UNSPEC; 5617 ifsm->pad1 = 0; 5618 ifsm->pad2 = 0; 5619 ifsm->ifindex = dev->ifindex; 5620 ifsm->filter_mask = filter_mask; 5621 5622 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 5623 struct rtnl_link_stats64 *sp; 5624 5625 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 5626 sizeof(struct rtnl_link_stats64), 5627 IFLA_STATS_UNSPEC); 5628 if (!attr) { 5629 err = -EMSGSIZE; 5630 goto nla_put_failure; 5631 } 5632 5633 sp = nla_data(attr); 5634 dev_get_stats(dev, sp); 5635 } 5636 5637 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5638 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5639 5640 if (ops && ops->fill_linkxstats) { 5641 *idxattr = IFLA_STATS_LINK_XSTATS; 5642 attr = nla_nest_start_noflag(skb, 5643 IFLA_STATS_LINK_XSTATS); 5644 if (!attr) { 5645 err = -EMSGSIZE; 5646 goto nla_put_failure; 5647 } 5648 5649 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5650 nla_nest_end(skb, attr); 5651 if (err) 5652 goto nla_put_failure; 5653 *idxattr = 0; 5654 } 5655 } 5656 5657 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5658 *idxattr)) { 5659 const struct rtnl_link_ops *ops = NULL; 5660 const struct net_device *master; 5661 5662 master = netdev_master_upper_dev_get(dev); 5663 if (master) 5664 ops = master->rtnl_link_ops; 5665 if (ops && ops->fill_linkxstats) { 5666 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5667 attr = nla_nest_start_noflag(skb, 5668 IFLA_STATS_LINK_XSTATS_SLAVE); 5669 if (!attr) { 5670 err = -EMSGSIZE; 5671 goto nla_put_failure; 5672 } 5673 5674 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5675 nla_nest_end(skb, attr); 5676 if (err) 5677 goto nla_put_failure; 5678 *idxattr = 0; 5679 } 5680 } 5681 5682 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5683 *idxattr)) { 5684 u32 off_filter_mask; 5685 5686 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5687 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5688 attr = nla_nest_start_noflag(skb, 5689 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5690 if (!attr) { 5691 err = -EMSGSIZE; 5692 goto nla_put_failure; 5693 } 5694 5695 err = rtnl_offload_xstats_fill(skb, dev, prividx, 5696 off_filter_mask, extack); 5697 if (err == -ENODATA) 5698 nla_nest_cancel(skb, attr); 5699 else 5700 nla_nest_end(skb, attr); 5701 5702 if (err && err != -ENODATA) 5703 goto nla_put_failure; 5704 *idxattr = 0; 5705 } 5706 5707 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5708 struct rtnl_af_ops *af_ops; 5709 5710 *idxattr = IFLA_STATS_AF_SPEC; 5711 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5712 if (!attr) { 5713 err = -EMSGSIZE; 5714 goto nla_put_failure; 5715 } 5716 5717 rcu_read_lock(); 5718 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5719 if (af_ops->fill_stats_af) { 5720 struct nlattr *af; 5721 5722 af = nla_nest_start_noflag(skb, 5723 af_ops->family); 5724 if (!af) { 5725 rcu_read_unlock(); 5726 err = -EMSGSIZE; 5727 goto nla_put_failure; 5728 } 5729 err = af_ops->fill_stats_af(skb, dev); 5730 5731 if (err == -ENODATA) { 5732 nla_nest_cancel(skb, af); 5733 } else if (err < 0) { 5734 rcu_read_unlock(); 5735 goto nla_put_failure; 5736 } 5737 5738 nla_nest_end(skb, af); 5739 } 5740 } 5741 rcu_read_unlock(); 5742 5743 nla_nest_end(skb, attr); 5744 5745 *idxattr = 0; 5746 } 5747 5748 nlmsg_end(skb, nlh); 5749 5750 return 0; 5751 5752 nla_put_failure: 5753 /* not a multi message or no progress mean a real error */ 5754 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5755 nlmsg_cancel(skb, nlh); 5756 else 5757 nlmsg_end(skb, nlh); 5758 5759 return err; 5760 } 5761 5762 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5763 const struct rtnl_stats_dump_filters *filters) 5764 { 5765 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5766 unsigned int filter_mask = filters->mask[0]; 5767 5768 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5769 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5770 5771 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5772 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5773 int attr = IFLA_STATS_LINK_XSTATS; 5774 5775 if (ops && ops->get_linkxstats_size) { 5776 size += nla_total_size(ops->get_linkxstats_size(dev, 5777 attr)); 5778 /* for IFLA_STATS_LINK_XSTATS */ 5779 size += nla_total_size(0); 5780 } 5781 } 5782 5783 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5784 struct net_device *_dev = (struct net_device *)dev; 5785 const struct rtnl_link_ops *ops = NULL; 5786 const struct net_device *master; 5787 5788 /* netdev_master_upper_dev_get can't take const */ 5789 master = netdev_master_upper_dev_get(_dev); 5790 if (master) 5791 ops = master->rtnl_link_ops; 5792 if (ops && ops->get_linkxstats_size) { 5793 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5794 5795 size += nla_total_size(ops->get_linkxstats_size(dev, 5796 attr)); 5797 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5798 size += nla_total_size(0); 5799 } 5800 } 5801 5802 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { 5803 u32 off_filter_mask; 5804 5805 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5806 size += rtnl_offload_xstats_get_size(dev, off_filter_mask); 5807 } 5808 5809 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5810 struct rtnl_af_ops *af_ops; 5811 5812 /* for IFLA_STATS_AF_SPEC */ 5813 size += nla_total_size(0); 5814 5815 rcu_read_lock(); 5816 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5817 if (af_ops->get_stats_af_size) { 5818 size += nla_total_size( 5819 af_ops->get_stats_af_size(dev)); 5820 5821 /* for AF_* */ 5822 size += nla_total_size(0); 5823 } 5824 } 5825 rcu_read_unlock(); 5826 } 5827 5828 return size; 5829 } 5830 5831 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) 5832 5833 static const struct nla_policy 5834 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { 5835 [IFLA_STATS_LINK_OFFLOAD_XSTATS] = 5836 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), 5837 }; 5838 5839 static const struct nla_policy 5840 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { 5841 [IFLA_STATS_GET_FILTERS] = 5842 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), 5843 }; 5844 5845 static const struct nla_policy 5846 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { 5847 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), 5848 }; 5849 5850 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, 5851 struct rtnl_stats_dump_filters *filters, 5852 struct netlink_ext_ack *extack) 5853 { 5854 struct nlattr *tb[IFLA_STATS_MAX + 1]; 5855 int err; 5856 int at; 5857 5858 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, 5859 rtnl_stats_get_policy_filters, extack); 5860 if (err < 0) 5861 return err; 5862 5863 for (at = 1; at <= IFLA_STATS_MAX; at++) { 5864 if (tb[at]) { 5865 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { 5866 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); 5867 return -EINVAL; 5868 } 5869 filters->mask[at] = nla_get_u32(tb[at]); 5870 } 5871 } 5872 5873 return 0; 5874 } 5875 5876 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, 5877 u32 filter_mask, 5878 struct rtnl_stats_dump_filters *filters, 5879 struct netlink_ext_ack *extack) 5880 { 5881 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5882 int err; 5883 int i; 5884 5885 filters->mask[0] = filter_mask; 5886 for (i = 1; i < ARRAY_SIZE(filters->mask); i++) 5887 filters->mask[i] = -1U; 5888 5889 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, 5890 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); 5891 if (err < 0) 5892 return err; 5893 5894 if (tb[IFLA_STATS_GET_FILTERS]) { 5895 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], 5896 filters, extack); 5897 if (err) 5898 return err; 5899 } 5900 5901 return 0; 5902 } 5903 5904 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 5905 bool is_dump, struct netlink_ext_ack *extack) 5906 { 5907 struct if_stats_msg *ifsm; 5908 5909 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 5910 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 5911 return -EINVAL; 5912 } 5913 5914 if (!strict_check) 5915 return 0; 5916 5917 ifsm = nlmsg_data(nlh); 5918 5919 /* only requests using strict checks can pass data to influence 5920 * the dump. The legacy exception is filter_mask. 5921 */ 5922 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 5923 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 5924 return -EINVAL; 5925 } 5926 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 5927 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 5928 return -EINVAL; 5929 } 5930 5931 return 0; 5932 } 5933 5934 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 5935 struct netlink_ext_ack *extack) 5936 { 5937 struct rtnl_stats_dump_filters filters; 5938 struct net *net = sock_net(skb->sk); 5939 struct net_device *dev = NULL; 5940 int idxattr = 0, prividx = 0; 5941 struct if_stats_msg *ifsm; 5942 struct sk_buff *nskb; 5943 int err; 5944 5945 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5946 false, extack); 5947 if (err) 5948 return err; 5949 5950 ifsm = nlmsg_data(nlh); 5951 if (ifsm->ifindex > 0) 5952 dev = __dev_get_by_index(net, ifsm->ifindex); 5953 else 5954 return -EINVAL; 5955 5956 if (!dev) 5957 return -ENODEV; 5958 5959 if (!ifsm->filter_mask) { 5960 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); 5961 return -EINVAL; 5962 } 5963 5964 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); 5965 if (err) 5966 return err; 5967 5968 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); 5969 if (!nskb) 5970 return -ENOBUFS; 5971 5972 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 5973 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 5974 0, &filters, &idxattr, &prividx, extack); 5975 if (err < 0) { 5976 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 5977 WARN_ON(err == -EMSGSIZE); 5978 kfree_skb(nskb); 5979 } else { 5980 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 5981 } 5982 5983 return err; 5984 } 5985 5986 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 5987 { 5988 struct netlink_ext_ack *extack = cb->extack; 5989 struct rtnl_stats_dump_filters filters; 5990 struct net *net = sock_net(skb->sk); 5991 unsigned int flags = NLM_F_MULTI; 5992 struct if_stats_msg *ifsm; 5993 struct { 5994 unsigned long ifindex; 5995 int idxattr; 5996 int prividx; 5997 } *ctx = (void *)cb->ctx; 5998 struct net_device *dev; 5999 int err; 6000 6001 cb->seq = net->dev_base_seq; 6002 6003 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 6004 if (err) 6005 return err; 6006 6007 ifsm = nlmsg_data(cb->nlh); 6008 if (!ifsm->filter_mask) { 6009 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 6010 return -EINVAL; 6011 } 6012 6013 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, 6014 extack); 6015 if (err) 6016 return err; 6017 6018 for_each_netdev_dump(net, dev, ctx->ifindex) { 6019 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 6020 NETLINK_CB(cb->skb).portid, 6021 cb->nlh->nlmsg_seq, 0, 6022 flags, &filters, 6023 &ctx->idxattr, &ctx->prividx, 6024 extack); 6025 /* If we ran out of room on the first message, 6026 * we're in trouble. 6027 */ 6028 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 6029 6030 if (err < 0) 6031 break; 6032 ctx->prividx = 0; 6033 ctx->idxattr = 0; 6034 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 6035 } 6036 6037 return err; 6038 } 6039 6040 void rtnl_offload_xstats_notify(struct net_device *dev) 6041 { 6042 struct rtnl_stats_dump_filters response_filters = {}; 6043 struct net *net = dev_net(dev); 6044 int idxattr = 0, prividx = 0; 6045 struct sk_buff *skb; 6046 int err = -ENOBUFS; 6047 6048 ASSERT_RTNL(); 6049 6050 response_filters.mask[0] |= 6051 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6052 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6053 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6054 6055 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), 6056 GFP_KERNEL); 6057 if (!skb) 6058 goto errout; 6059 6060 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, 6061 &response_filters, &idxattr, &prividx, NULL); 6062 if (err < 0) { 6063 kfree_skb(skb); 6064 goto errout; 6065 } 6066 6067 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); 6068 return; 6069 6070 errout: 6071 rtnl_set_sk_err(net, RTNLGRP_STATS, err); 6072 } 6073 EXPORT_SYMBOL(rtnl_offload_xstats_notify); 6074 6075 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, 6076 struct netlink_ext_ack *extack) 6077 { 6078 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 6079 struct rtnl_stats_dump_filters response_filters = {}; 6080 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 6081 struct net *net = sock_net(skb->sk); 6082 struct net_device *dev = NULL; 6083 struct if_stats_msg *ifsm; 6084 bool notify = false; 6085 int err; 6086 6087 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 6088 false, extack); 6089 if (err) 6090 return err; 6091 6092 ifsm = nlmsg_data(nlh); 6093 if (ifsm->family != AF_UNSPEC) { 6094 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); 6095 return -EINVAL; 6096 } 6097 6098 if (ifsm->ifindex > 0) 6099 dev = __dev_get_by_index(net, ifsm->ifindex); 6100 else 6101 return -EINVAL; 6102 6103 if (!dev) 6104 return -ENODEV; 6105 6106 if (ifsm->filter_mask) { 6107 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); 6108 return -EINVAL; 6109 } 6110 6111 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, 6112 ifla_stats_set_policy, extack); 6113 if (err < 0) 6114 return err; 6115 6116 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { 6117 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); 6118 6119 if (req) 6120 err = netdev_offload_xstats_enable(dev, t_l3, extack); 6121 else 6122 err = netdev_offload_xstats_disable(dev, t_l3); 6123 6124 if (!err) 6125 notify = true; 6126 else if (err != -EALREADY) 6127 return err; 6128 6129 response_filters.mask[0] |= 6130 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6131 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6132 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6133 } 6134 6135 if (notify) 6136 rtnl_offload_xstats_notify(dev); 6137 6138 return 0; 6139 } 6140 6141 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh, 6142 struct netlink_ext_ack *extack) 6143 { 6144 struct br_port_msg *bpm; 6145 6146 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 6147 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request"); 6148 return -EINVAL; 6149 } 6150 6151 bpm = nlmsg_data(nlh); 6152 if (bpm->ifindex) { 6153 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request"); 6154 return -EINVAL; 6155 } 6156 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 6157 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 6158 return -EINVAL; 6159 } 6160 6161 return 0; 6162 } 6163 6164 struct rtnl_mdb_dump_ctx { 6165 long idx; 6166 }; 6167 6168 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 6169 { 6170 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx; 6171 struct net *net = sock_net(skb->sk); 6172 struct net_device *dev; 6173 int idx, s_idx; 6174 int err; 6175 6176 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx); 6177 6178 if (cb->strict_check) { 6179 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack); 6180 if (err) 6181 return err; 6182 } 6183 6184 s_idx = ctx->idx; 6185 idx = 0; 6186 6187 for_each_netdev(net, dev) { 6188 if (idx < s_idx) 6189 goto skip; 6190 if (!dev->netdev_ops->ndo_mdb_dump) 6191 goto skip; 6192 6193 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb); 6194 if (err == -EMSGSIZE) 6195 goto out; 6196 /* Moving on to next device, reset markers and sequence 6197 * counters since they are all maintained per-device. 6198 */ 6199 memset(cb->ctx, 0, sizeof(cb->ctx)); 6200 cb->prev_seq = 0; 6201 cb->seq = 0; 6202 skip: 6203 idx++; 6204 } 6205 6206 out: 6207 ctx->idx = idx; 6208 return skb->len; 6209 } 6210 6211 static int rtnl_validate_mdb_entry_get(const struct nlattr *attr, 6212 struct netlink_ext_ack *extack) 6213 { 6214 struct br_mdb_entry *entry = nla_data(attr); 6215 6216 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6217 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6218 return -EINVAL; 6219 } 6220 6221 if (entry->ifindex) { 6222 NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified"); 6223 return -EINVAL; 6224 } 6225 6226 if (entry->state) { 6227 NL_SET_ERR_MSG(extack, "Entry state cannot be specified"); 6228 return -EINVAL; 6229 } 6230 6231 if (entry->flags) { 6232 NL_SET_ERR_MSG(extack, "Entry flags cannot be specified"); 6233 return -EINVAL; 6234 } 6235 6236 if (entry->vid >= VLAN_VID_MASK) { 6237 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6238 return -EINVAL; 6239 } 6240 6241 if (entry->addr.proto != htons(ETH_P_IP) && 6242 entry->addr.proto != htons(ETH_P_IPV6) && 6243 entry->addr.proto != 0) { 6244 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6245 return -EINVAL; 6246 } 6247 6248 return 0; 6249 } 6250 6251 static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = { 6252 [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6253 rtnl_validate_mdb_entry_get, 6254 sizeof(struct br_mdb_entry)), 6255 [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6256 }; 6257 6258 static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 6259 struct netlink_ext_ack *extack) 6260 { 6261 struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1]; 6262 struct net *net = sock_net(in_skb->sk); 6263 struct br_port_msg *bpm; 6264 struct net_device *dev; 6265 int err; 6266 6267 err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb, 6268 MDBA_GET_ENTRY_MAX, mdba_get_policy, extack); 6269 if (err) 6270 return err; 6271 6272 bpm = nlmsg_data(nlh); 6273 if (!bpm->ifindex) { 6274 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6275 return -EINVAL; 6276 } 6277 6278 dev = __dev_get_by_index(net, bpm->ifindex); 6279 if (!dev) { 6280 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6281 return -ENODEV; 6282 } 6283 6284 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) { 6285 NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute"); 6286 return -EINVAL; 6287 } 6288 6289 if (!dev->netdev_ops->ndo_mdb_get) { 6290 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6291 return -EOPNOTSUPP; 6292 } 6293 6294 return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid, 6295 nlh->nlmsg_seq, extack); 6296 } 6297 6298 static int rtnl_validate_mdb_entry(const struct nlattr *attr, 6299 struct netlink_ext_ack *extack) 6300 { 6301 struct br_mdb_entry *entry = nla_data(attr); 6302 6303 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6304 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6305 return -EINVAL; 6306 } 6307 6308 if (entry->ifindex == 0) { 6309 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed"); 6310 return -EINVAL; 6311 } 6312 6313 if (entry->addr.proto == htons(ETH_P_IP)) { 6314 if (!ipv4_is_multicast(entry->addr.u.ip4) && 6315 !ipv4_is_zeronet(entry->addr.u.ip4)) { 6316 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0"); 6317 return -EINVAL; 6318 } 6319 if (ipv4_is_local_multicast(entry->addr.u.ip4)) { 6320 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast"); 6321 return -EINVAL; 6322 } 6323 #if IS_ENABLED(CONFIG_IPV6) 6324 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 6325 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) { 6326 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes"); 6327 return -EINVAL; 6328 } 6329 #endif 6330 } else if (entry->addr.proto == 0) { 6331 /* L2 mdb */ 6332 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) { 6333 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast"); 6334 return -EINVAL; 6335 } 6336 } else { 6337 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6338 return -EINVAL; 6339 } 6340 6341 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6342 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6343 return -EINVAL; 6344 } 6345 if (entry->vid >= VLAN_VID_MASK) { 6346 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6347 return -EINVAL; 6348 } 6349 6350 return 0; 6351 } 6352 6353 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = { 6354 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 }, 6355 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6356 rtnl_validate_mdb_entry, 6357 sizeof(struct br_mdb_entry)), 6358 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6359 }; 6360 6361 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 6362 struct netlink_ext_ack *extack) 6363 { 6364 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6365 struct net *net = sock_net(skb->sk); 6366 struct br_port_msg *bpm; 6367 struct net_device *dev; 6368 int err; 6369 6370 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6371 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6372 if (err) 6373 return err; 6374 6375 bpm = nlmsg_data(nlh); 6376 if (!bpm->ifindex) { 6377 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6378 return -EINVAL; 6379 } 6380 6381 dev = __dev_get_by_index(net, bpm->ifindex); 6382 if (!dev) { 6383 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6384 return -ENODEV; 6385 } 6386 6387 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6388 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6389 return -EINVAL; 6390 } 6391 6392 if (!dev->netdev_ops->ndo_mdb_add) { 6393 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6394 return -EOPNOTSUPP; 6395 } 6396 6397 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack); 6398 } 6399 6400 static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr, 6401 struct netlink_ext_ack *extack) 6402 { 6403 struct br_mdb_entry *entry = nla_data(attr); 6404 struct br_mdb_entry zero_entry = {}; 6405 6406 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6407 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6408 return -EINVAL; 6409 } 6410 6411 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6412 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6413 return -EINVAL; 6414 } 6415 6416 if (entry->flags) { 6417 NL_SET_ERR_MSG(extack, "Entry flags cannot be set"); 6418 return -EINVAL; 6419 } 6420 6421 if (entry->vid >= VLAN_N_VID - 1) { 6422 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6423 return -EINVAL; 6424 } 6425 6426 if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) { 6427 NL_SET_ERR_MSG(extack, "Entry address cannot be set"); 6428 return -EINVAL; 6429 } 6430 6431 return 0; 6432 } 6433 6434 static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = { 6435 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6436 rtnl_validate_mdb_entry_del_bulk, 6437 sizeof(struct br_mdb_entry)), 6438 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6439 }; 6440 6441 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 6442 struct netlink_ext_ack *extack) 6443 { 6444 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 6445 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6446 struct net *net = sock_net(skb->sk); 6447 struct br_port_msg *bpm; 6448 struct net_device *dev; 6449 int err; 6450 6451 if (!del_bulk) 6452 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6453 MDBA_SET_ENTRY_MAX, mdba_policy, 6454 extack); 6455 else 6456 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, 6457 mdba_del_bulk_policy, extack); 6458 if (err) 6459 return err; 6460 6461 bpm = nlmsg_data(nlh); 6462 if (!bpm->ifindex) { 6463 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6464 return -EINVAL; 6465 } 6466 6467 dev = __dev_get_by_index(net, bpm->ifindex); 6468 if (!dev) { 6469 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6470 return -ENODEV; 6471 } 6472 6473 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6474 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6475 return -EINVAL; 6476 } 6477 6478 if (del_bulk) { 6479 if (!dev->netdev_ops->ndo_mdb_del_bulk) { 6480 NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion"); 6481 return -EOPNOTSUPP; 6482 } 6483 return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack); 6484 } 6485 6486 if (!dev->netdev_ops->ndo_mdb_del) { 6487 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6488 return -EOPNOTSUPP; 6489 } 6490 6491 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack); 6492 } 6493 6494 /* Process one rtnetlink message. */ 6495 6496 static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 6497 { 6498 const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED); 6499 rtnl_dumpit_func dumpit = cb->data; 6500 int err; 6501 6502 /* Previous iteration have already finished, avoid calling->dumpit() 6503 * again, it may not expect to be called after it reached the end. 6504 */ 6505 if (!dumpit) 6506 return 0; 6507 6508 if (needs_lock) 6509 rtnl_lock(); 6510 err = dumpit(skb, cb); 6511 if (needs_lock) 6512 rtnl_unlock(); 6513 6514 /* Old dump handlers used to send NLM_DONE as in a separate recvmsg(). 6515 * Some applications which parse netlink manually depend on this. 6516 */ 6517 if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) { 6518 if (err < 0 && err != -EMSGSIZE) 6519 return err; 6520 if (!err) 6521 cb->data = NULL; 6522 6523 return skb->len; 6524 } 6525 return err; 6526 } 6527 6528 static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb, 6529 const struct nlmsghdr *nlh, 6530 struct netlink_dump_control *control) 6531 { 6532 if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE || 6533 !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) { 6534 WARN_ON(control->data); 6535 control->data = control->dump; 6536 control->dump = rtnl_dumpit; 6537 } 6538 6539 return netlink_dump_start(ssk, skb, nlh, control); 6540 } 6541 6542 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 6543 struct netlink_ext_ack *extack) 6544 { 6545 struct net *net = sock_net(skb->sk); 6546 struct rtnl_link *link; 6547 enum rtnl_kinds kind; 6548 struct module *owner; 6549 int err = -EOPNOTSUPP; 6550 rtnl_doit_func doit; 6551 unsigned int flags; 6552 int family; 6553 int type; 6554 6555 type = nlh->nlmsg_type; 6556 if (type > RTM_MAX) 6557 return -EOPNOTSUPP; 6558 6559 type -= RTM_BASE; 6560 6561 /* All the messages must have at least 1 byte length */ 6562 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 6563 return 0; 6564 6565 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 6566 kind = rtnl_msgtype_kind(type); 6567 6568 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) 6569 return -EPERM; 6570 6571 rcu_read_lock(); 6572 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { 6573 struct sock *rtnl; 6574 rtnl_dumpit_func dumpit; 6575 u32 min_dump_alloc = 0; 6576 6577 link = rtnl_get_link(family, type); 6578 if (!link || !link->dumpit) { 6579 family = PF_UNSPEC; 6580 link = rtnl_get_link(family, type); 6581 if (!link || !link->dumpit) 6582 goto err_unlock; 6583 } 6584 owner = link->owner; 6585 dumpit = link->dumpit; 6586 flags = link->flags; 6587 6588 if (type == RTM_GETLINK - RTM_BASE) 6589 min_dump_alloc = rtnl_calcit(skb, nlh); 6590 6591 err = 0; 6592 /* need to do this before rcu_read_unlock() */ 6593 if (!try_module_get(owner)) 6594 err = -EPROTONOSUPPORT; 6595 6596 rcu_read_unlock(); 6597 6598 rtnl = net->rtnl; 6599 if (err == 0) { 6600 struct netlink_dump_control c = { 6601 .dump = dumpit, 6602 .min_dump_alloc = min_dump_alloc, 6603 .module = owner, 6604 .flags = flags, 6605 }; 6606 err = rtnetlink_dump_start(rtnl, skb, nlh, &c); 6607 /* netlink_dump_start() will keep a reference on 6608 * module if dump is still in progress. 6609 */ 6610 module_put(owner); 6611 } 6612 return err; 6613 } 6614 6615 link = rtnl_get_link(family, type); 6616 if (!link || !link->doit) { 6617 family = PF_UNSPEC; 6618 link = rtnl_get_link(PF_UNSPEC, type); 6619 if (!link || !link->doit) 6620 goto out_unlock; 6621 } 6622 6623 owner = link->owner; 6624 if (!try_module_get(owner)) { 6625 err = -EPROTONOSUPPORT; 6626 goto out_unlock; 6627 } 6628 6629 flags = link->flags; 6630 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && 6631 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { 6632 NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); 6633 module_put(owner); 6634 goto err_unlock; 6635 } 6636 6637 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 6638 doit = link->doit; 6639 rcu_read_unlock(); 6640 if (doit) 6641 err = doit(skb, nlh, extack); 6642 module_put(owner); 6643 return err; 6644 } 6645 rcu_read_unlock(); 6646 6647 rtnl_lock(); 6648 link = rtnl_get_link(family, type); 6649 if (link && link->doit) 6650 err = link->doit(skb, nlh, extack); 6651 rtnl_unlock(); 6652 6653 module_put(owner); 6654 6655 return err; 6656 6657 out_unlock: 6658 rcu_read_unlock(); 6659 return err; 6660 6661 err_unlock: 6662 rcu_read_unlock(); 6663 return -EOPNOTSUPP; 6664 } 6665 6666 static void rtnetlink_rcv(struct sk_buff *skb) 6667 { 6668 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 6669 } 6670 6671 static int rtnetlink_bind(struct net *net, int group) 6672 { 6673 switch (group) { 6674 case RTNLGRP_IPV4_MROUTE_R: 6675 case RTNLGRP_IPV6_MROUTE_R: 6676 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 6677 return -EPERM; 6678 break; 6679 } 6680 return 0; 6681 } 6682 6683 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 6684 { 6685 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6686 6687 switch (event) { 6688 case NETDEV_REBOOT: 6689 case NETDEV_CHANGEMTU: 6690 case NETDEV_CHANGEADDR: 6691 case NETDEV_CHANGENAME: 6692 case NETDEV_FEAT_CHANGE: 6693 case NETDEV_BONDING_FAILOVER: 6694 case NETDEV_POST_TYPE_CHANGE: 6695 case NETDEV_NOTIFY_PEERS: 6696 case NETDEV_CHANGEUPPER: 6697 case NETDEV_RESEND_IGMP: 6698 case NETDEV_CHANGEINFODATA: 6699 case NETDEV_CHANGELOWERSTATE: 6700 case NETDEV_CHANGE_TX_QUEUE_LEN: 6701 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 6702 GFP_KERNEL, NULL, 0, 0, NULL); 6703 break; 6704 default: 6705 break; 6706 } 6707 return NOTIFY_DONE; 6708 } 6709 6710 static struct notifier_block rtnetlink_dev_notifier = { 6711 .notifier_call = rtnetlink_event, 6712 }; 6713 6714 6715 static int __net_init rtnetlink_net_init(struct net *net) 6716 { 6717 struct sock *sk; 6718 struct netlink_kernel_cfg cfg = { 6719 .groups = RTNLGRP_MAX, 6720 .input = rtnetlink_rcv, 6721 .flags = NL_CFG_F_NONROOT_RECV, 6722 .bind = rtnetlink_bind, 6723 }; 6724 6725 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 6726 if (!sk) 6727 return -ENOMEM; 6728 net->rtnl = sk; 6729 return 0; 6730 } 6731 6732 static void __net_exit rtnetlink_net_exit(struct net *net) 6733 { 6734 netlink_kernel_release(net->rtnl); 6735 net->rtnl = NULL; 6736 } 6737 6738 static struct pernet_operations rtnetlink_net_ops = { 6739 .init = rtnetlink_net_init, 6740 .exit = rtnetlink_net_exit, 6741 }; 6742 6743 void __init rtnetlink_init(void) 6744 { 6745 if (register_pernet_subsys(&rtnetlink_net_ops)) 6746 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 6747 6748 register_netdevice_notifier(&rtnetlink_dev_notifier); 6749 6750 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 6751 rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE); 6752 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 6753 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 6754 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 6755 6756 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 6757 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 6758 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 6759 6760 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 6761 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 6762 6763 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 6764 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 6765 RTNL_FLAG_BULK_DEL_SUPPORTED); 6766 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 6767 6768 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 6769 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 6770 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 6771 6772 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 6773 0); 6774 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); 6775 6776 rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0); 6777 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0); 6778 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 6779 RTNL_FLAG_BULK_DEL_SUPPORTED); 6780 } 6781