1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 #include <net/devlink.h> 57 #if IS_ENABLED(CONFIG_IPV6) 58 #include <net/addrconf.h> 59 #endif 60 #include <linux/dpll.h> 61 62 #include "dev.h" 63 64 #define RTNL_MAX_TYPE 50 65 #define RTNL_SLAVE_MAX_TYPE 44 66 67 struct rtnl_link { 68 rtnl_doit_func doit; 69 rtnl_dumpit_func dumpit; 70 struct module *owner; 71 unsigned int flags; 72 struct rcu_head rcu; 73 }; 74 75 static DEFINE_MUTEX(rtnl_mutex); 76 77 void rtnl_lock(void) 78 { 79 mutex_lock(&rtnl_mutex); 80 } 81 EXPORT_SYMBOL(rtnl_lock); 82 83 int rtnl_lock_killable(void) 84 { 85 return mutex_lock_killable(&rtnl_mutex); 86 } 87 EXPORT_SYMBOL(rtnl_lock_killable); 88 89 static struct sk_buff *defer_kfree_skb_list; 90 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 91 { 92 if (head && tail) { 93 tail->next = defer_kfree_skb_list; 94 defer_kfree_skb_list = head; 95 } 96 } 97 EXPORT_SYMBOL(rtnl_kfree_skbs); 98 99 void __rtnl_unlock(void) 100 { 101 struct sk_buff *head = defer_kfree_skb_list; 102 103 defer_kfree_skb_list = NULL; 104 105 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() 106 * is used. In some places, e.g. in cfg80211, we have code that will do 107 * something like 108 * rtnl_lock() 109 * wiphy_lock() 110 * ... 111 * rtnl_unlock() 112 * 113 * and because netdev_run_todo() acquires the RTNL for items on the list 114 * we could cause a situation such as this: 115 * Thread 1 Thread 2 116 * rtnl_lock() 117 * unregister_netdevice() 118 * __rtnl_unlock() 119 * rtnl_lock() 120 * wiphy_lock() 121 * rtnl_unlock() 122 * netdev_run_todo() 123 * __rtnl_unlock() 124 * 125 * // list not empty now 126 * // because of thread 2 127 * rtnl_lock() 128 * while (!list_empty(...)) 129 * rtnl_lock() 130 * wiphy_lock() 131 * **** DEADLOCK **** 132 * 133 * However, usage of __rtnl_unlock() is rare, and so we can ensure that 134 * it's not used in cases where something is added to do the list. 135 */ 136 WARN_ON(!list_empty(&net_todo_list)); 137 138 mutex_unlock(&rtnl_mutex); 139 140 while (head) { 141 struct sk_buff *next = head->next; 142 143 kfree_skb(head); 144 cond_resched(); 145 head = next; 146 } 147 } 148 149 void rtnl_unlock(void) 150 { 151 /* This fellow will unlock it for us. */ 152 netdev_run_todo(); 153 } 154 EXPORT_SYMBOL(rtnl_unlock); 155 156 int rtnl_trylock(void) 157 { 158 return mutex_trylock(&rtnl_mutex); 159 } 160 EXPORT_SYMBOL(rtnl_trylock); 161 162 int rtnl_is_locked(void) 163 { 164 return mutex_is_locked(&rtnl_mutex); 165 } 166 EXPORT_SYMBOL(rtnl_is_locked); 167 168 bool refcount_dec_and_rtnl_lock(refcount_t *r) 169 { 170 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 171 } 172 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 173 174 #ifdef CONFIG_PROVE_LOCKING 175 bool lockdep_rtnl_is_held(void) 176 { 177 return lockdep_is_held(&rtnl_mutex); 178 } 179 EXPORT_SYMBOL(lockdep_rtnl_is_held); 180 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 181 182 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 183 184 static inline int rtm_msgindex(int msgtype) 185 { 186 int msgindex = msgtype - RTM_BASE; 187 188 /* 189 * msgindex < 0 implies someone tried to register a netlink 190 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 191 * the message type has not been added to linux/rtnetlink.h 192 */ 193 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 194 195 return msgindex; 196 } 197 198 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 199 { 200 struct rtnl_link __rcu **tab; 201 202 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 203 protocol = PF_UNSPEC; 204 205 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 206 if (!tab) 207 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 208 209 return rcu_dereference_rtnl(tab[msgtype]); 210 } 211 212 static int rtnl_register_internal(struct module *owner, 213 int protocol, int msgtype, 214 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 215 unsigned int flags) 216 { 217 struct rtnl_link *link, *old; 218 struct rtnl_link __rcu **tab; 219 int msgindex; 220 int ret = -ENOBUFS; 221 222 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 223 msgindex = rtm_msgindex(msgtype); 224 225 rtnl_lock(); 226 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 227 if (tab == NULL) { 228 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 229 if (!tab) 230 goto unlock; 231 232 /* ensures we see the 0 stores */ 233 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 234 } 235 236 old = rtnl_dereference(tab[msgindex]); 237 if (old) { 238 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 239 if (!link) 240 goto unlock; 241 } else { 242 link = kzalloc(sizeof(*link), GFP_KERNEL); 243 if (!link) 244 goto unlock; 245 } 246 247 WARN_ON(link->owner && link->owner != owner); 248 link->owner = owner; 249 250 WARN_ON(doit && link->doit && link->doit != doit); 251 if (doit) 252 link->doit = doit; 253 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 254 if (dumpit) 255 link->dumpit = dumpit; 256 257 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && 258 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); 259 link->flags |= flags; 260 261 /* publish protocol:msgtype */ 262 rcu_assign_pointer(tab[msgindex], link); 263 ret = 0; 264 if (old) 265 kfree_rcu(old, rcu); 266 unlock: 267 rtnl_unlock(); 268 return ret; 269 } 270 271 /** 272 * rtnl_register_module - Register a rtnetlink message type 273 * 274 * @owner: module registering the hook (THIS_MODULE) 275 * @protocol: Protocol family or PF_UNSPEC 276 * @msgtype: rtnetlink message type 277 * @doit: Function pointer called for each request message 278 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 279 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 280 * 281 * Like rtnl_register, but for use by removable modules. 282 */ 283 int rtnl_register_module(struct module *owner, 284 int protocol, int msgtype, 285 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 286 unsigned int flags) 287 { 288 return rtnl_register_internal(owner, protocol, msgtype, 289 doit, dumpit, flags); 290 } 291 EXPORT_SYMBOL_GPL(rtnl_register_module); 292 293 /** 294 * rtnl_register - Register a rtnetlink message type 295 * @protocol: Protocol family or PF_UNSPEC 296 * @msgtype: rtnetlink message type 297 * @doit: Function pointer called for each request message 298 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 299 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 300 * 301 * Registers the specified function pointers (at least one of them has 302 * to be non-NULL) to be called whenever a request message for the 303 * specified protocol family and message type is received. 304 * 305 * The special protocol family PF_UNSPEC may be used to define fallback 306 * function pointers for the case when no entry for the specific protocol 307 * family exists. 308 */ 309 void rtnl_register(int protocol, int msgtype, 310 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 311 unsigned int flags) 312 { 313 int err; 314 315 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 316 flags); 317 if (err) 318 pr_err("Unable to register rtnetlink message handler, " 319 "protocol = %d, message type = %d\n", protocol, msgtype); 320 } 321 322 /** 323 * rtnl_unregister - Unregister a rtnetlink message type 324 * @protocol: Protocol family or PF_UNSPEC 325 * @msgtype: rtnetlink message type 326 * 327 * Returns 0 on success or a negative error code. 328 */ 329 int rtnl_unregister(int protocol, int msgtype) 330 { 331 struct rtnl_link __rcu **tab; 332 struct rtnl_link *link; 333 int msgindex; 334 335 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 336 msgindex = rtm_msgindex(msgtype); 337 338 rtnl_lock(); 339 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 340 if (!tab) { 341 rtnl_unlock(); 342 return -ENOENT; 343 } 344 345 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); 346 rtnl_unlock(); 347 348 kfree_rcu(link, rcu); 349 350 return 0; 351 } 352 EXPORT_SYMBOL_GPL(rtnl_unregister); 353 354 /** 355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 356 * @protocol : Protocol family or PF_UNSPEC 357 * 358 * Identical to calling rtnl_unregster() for all registered message types 359 * of a certain protocol family. 360 */ 361 void rtnl_unregister_all(int protocol) 362 { 363 struct rtnl_link __rcu **tab; 364 struct rtnl_link *link; 365 int msgindex; 366 367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 368 369 rtnl_lock(); 370 tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL); 371 if (!tab) { 372 rtnl_unlock(); 373 return; 374 } 375 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 376 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); 377 kfree_rcu(link, rcu); 378 } 379 rtnl_unlock(); 380 381 synchronize_net(); 382 383 kfree(tab); 384 } 385 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 386 387 static LIST_HEAD(link_ops); 388 389 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 390 { 391 const struct rtnl_link_ops *ops; 392 393 list_for_each_entry(ops, &link_ops, list) { 394 if (!strcmp(ops->kind, kind)) 395 return ops; 396 } 397 return NULL; 398 } 399 400 /** 401 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 402 * @ops: struct rtnl_link_ops * to register 403 * 404 * The caller must hold the rtnl_mutex. This function should be used 405 * by drivers that create devices during module initialization. It 406 * must be called before registering the devices. 407 * 408 * Returns 0 on success or a negative error code. 409 */ 410 int __rtnl_link_register(struct rtnl_link_ops *ops) 411 { 412 if (rtnl_link_ops_get(ops->kind)) 413 return -EEXIST; 414 415 /* The check for alloc/setup is here because if ops 416 * does not have that filled up, it is not possible 417 * to use the ops for creating device. So do not 418 * fill up dellink as well. That disables rtnl_dellink. 419 */ 420 if ((ops->alloc || ops->setup) && !ops->dellink) 421 ops->dellink = unregister_netdevice_queue; 422 423 list_add_tail(&ops->list, &link_ops); 424 return 0; 425 } 426 EXPORT_SYMBOL_GPL(__rtnl_link_register); 427 428 /** 429 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 430 * @ops: struct rtnl_link_ops * to register 431 * 432 * Returns 0 on success or a negative error code. 433 */ 434 int rtnl_link_register(struct rtnl_link_ops *ops) 435 { 436 int err; 437 438 /* Sanity-check max sizes to avoid stack buffer overflow. */ 439 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 440 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 441 return -EINVAL; 442 443 rtnl_lock(); 444 err = __rtnl_link_register(ops); 445 rtnl_unlock(); 446 return err; 447 } 448 EXPORT_SYMBOL_GPL(rtnl_link_register); 449 450 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 451 { 452 struct net_device *dev; 453 LIST_HEAD(list_kill); 454 455 for_each_netdev(net, dev) { 456 if (dev->rtnl_link_ops == ops) 457 ops->dellink(dev, &list_kill); 458 } 459 unregister_netdevice_many(&list_kill); 460 } 461 462 /** 463 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 464 * @ops: struct rtnl_link_ops * to unregister 465 * 466 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 467 * integrity (hold pernet_ops_rwsem for writing to close the race 468 * with setup_net() and cleanup_net()). 469 */ 470 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 471 { 472 struct net *net; 473 474 for_each_net(net) { 475 __rtnl_kill_links(net, ops); 476 } 477 list_del(&ops->list); 478 } 479 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 480 481 /* Return with the rtnl_lock held when there are no network 482 * devices unregistering in any network namespace. 483 */ 484 static void rtnl_lock_unregistering_all(void) 485 { 486 DEFINE_WAIT_FUNC(wait, woken_wake_function); 487 488 add_wait_queue(&netdev_unregistering_wq, &wait); 489 for (;;) { 490 rtnl_lock(); 491 /* We held write locked pernet_ops_rwsem, and parallel 492 * setup_net() and cleanup_net() are not possible. 493 */ 494 if (!atomic_read(&dev_unreg_count)) 495 break; 496 __rtnl_unlock(); 497 498 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 499 } 500 remove_wait_queue(&netdev_unregistering_wq, &wait); 501 } 502 503 /** 504 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 505 * @ops: struct rtnl_link_ops * to unregister 506 */ 507 void rtnl_link_unregister(struct rtnl_link_ops *ops) 508 { 509 /* Close the race with setup_net() and cleanup_net() */ 510 down_write(&pernet_ops_rwsem); 511 rtnl_lock_unregistering_all(); 512 __rtnl_link_unregister(ops); 513 rtnl_unlock(); 514 up_write(&pernet_ops_rwsem); 515 } 516 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 517 518 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 519 { 520 struct net_device *master_dev; 521 const struct rtnl_link_ops *ops; 522 size_t size = 0; 523 524 rcu_read_lock(); 525 526 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 527 if (!master_dev) 528 goto out; 529 530 ops = master_dev->rtnl_link_ops; 531 if (!ops || !ops->get_slave_size) 532 goto out; 533 /* IFLA_INFO_SLAVE_DATA + nested data */ 534 size = nla_total_size(sizeof(struct nlattr)) + 535 ops->get_slave_size(master_dev, dev); 536 537 out: 538 rcu_read_unlock(); 539 return size; 540 } 541 542 static size_t rtnl_link_get_size(const struct net_device *dev) 543 { 544 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 545 size_t size; 546 547 if (!ops) 548 return 0; 549 550 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 551 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 552 553 if (ops->get_size) 554 /* IFLA_INFO_DATA + nested data */ 555 size += nla_total_size(sizeof(struct nlattr)) + 556 ops->get_size(dev); 557 558 if (ops->get_xstats_size) 559 /* IFLA_INFO_XSTATS */ 560 size += nla_total_size(ops->get_xstats_size(dev)); 561 562 size += rtnl_link_get_slave_info_data_size(dev); 563 564 return size; 565 } 566 567 static LIST_HEAD(rtnl_af_ops); 568 569 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 570 { 571 const struct rtnl_af_ops *ops; 572 573 ASSERT_RTNL(); 574 575 list_for_each_entry(ops, &rtnl_af_ops, list) { 576 if (ops->family == family) 577 return ops; 578 } 579 580 return NULL; 581 } 582 583 /** 584 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 585 * @ops: struct rtnl_af_ops * to register 586 * 587 * Returns 0 on success or a negative error code. 588 */ 589 void rtnl_af_register(struct rtnl_af_ops *ops) 590 { 591 rtnl_lock(); 592 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 593 rtnl_unlock(); 594 } 595 EXPORT_SYMBOL_GPL(rtnl_af_register); 596 597 /** 598 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 599 * @ops: struct rtnl_af_ops * to unregister 600 */ 601 void rtnl_af_unregister(struct rtnl_af_ops *ops) 602 { 603 rtnl_lock(); 604 list_del_rcu(&ops->list); 605 rtnl_unlock(); 606 607 synchronize_rcu(); 608 } 609 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 610 611 static size_t rtnl_link_get_af_size(const struct net_device *dev, 612 u32 ext_filter_mask) 613 { 614 struct rtnl_af_ops *af_ops; 615 size_t size; 616 617 /* IFLA_AF_SPEC */ 618 size = nla_total_size(sizeof(struct nlattr)); 619 620 rcu_read_lock(); 621 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 622 if (af_ops->get_link_af_size) { 623 /* AF_* + nested data */ 624 size += nla_total_size(sizeof(struct nlattr)) + 625 af_ops->get_link_af_size(dev, ext_filter_mask); 626 } 627 } 628 rcu_read_unlock(); 629 630 return size; 631 } 632 633 static bool rtnl_have_link_slave_info(const struct net_device *dev) 634 { 635 struct net_device *master_dev; 636 bool ret = false; 637 638 rcu_read_lock(); 639 640 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 641 if (master_dev && master_dev->rtnl_link_ops) 642 ret = true; 643 rcu_read_unlock(); 644 return ret; 645 } 646 647 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 648 const struct net_device *dev) 649 { 650 struct net_device *master_dev; 651 const struct rtnl_link_ops *ops; 652 struct nlattr *slave_data; 653 int err; 654 655 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 656 if (!master_dev) 657 return 0; 658 ops = master_dev->rtnl_link_ops; 659 if (!ops) 660 return 0; 661 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 662 return -EMSGSIZE; 663 if (ops->fill_slave_info) { 664 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 665 if (!slave_data) 666 return -EMSGSIZE; 667 err = ops->fill_slave_info(skb, master_dev, dev); 668 if (err < 0) 669 goto err_cancel_slave_data; 670 nla_nest_end(skb, slave_data); 671 } 672 return 0; 673 674 err_cancel_slave_data: 675 nla_nest_cancel(skb, slave_data); 676 return err; 677 } 678 679 static int rtnl_link_info_fill(struct sk_buff *skb, 680 const struct net_device *dev) 681 { 682 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 683 struct nlattr *data; 684 int err; 685 686 if (!ops) 687 return 0; 688 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 689 return -EMSGSIZE; 690 if (ops->fill_xstats) { 691 err = ops->fill_xstats(skb, dev); 692 if (err < 0) 693 return err; 694 } 695 if (ops->fill_info) { 696 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 697 if (data == NULL) 698 return -EMSGSIZE; 699 err = ops->fill_info(skb, dev); 700 if (err < 0) 701 goto err_cancel_data; 702 nla_nest_end(skb, data); 703 } 704 return 0; 705 706 err_cancel_data: 707 nla_nest_cancel(skb, data); 708 return err; 709 } 710 711 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 712 { 713 struct nlattr *linkinfo; 714 int err = -EMSGSIZE; 715 716 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 717 if (linkinfo == NULL) 718 goto out; 719 720 err = rtnl_link_info_fill(skb, dev); 721 if (err < 0) 722 goto err_cancel_link; 723 724 err = rtnl_link_slave_info_fill(skb, dev); 725 if (err < 0) 726 goto err_cancel_link; 727 728 nla_nest_end(skb, linkinfo); 729 return 0; 730 731 err_cancel_link: 732 nla_nest_cancel(skb, linkinfo); 733 out: 734 return err; 735 } 736 737 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 738 { 739 struct sock *rtnl = net->rtnl; 740 741 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); 742 } 743 744 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 745 { 746 struct sock *rtnl = net->rtnl; 747 748 return nlmsg_unicast(rtnl, skb, pid); 749 } 750 EXPORT_SYMBOL(rtnl_unicast); 751 752 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 753 const struct nlmsghdr *nlh, gfp_t flags) 754 { 755 struct sock *rtnl = net->rtnl; 756 757 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); 758 } 759 EXPORT_SYMBOL(rtnl_notify); 760 761 void rtnl_set_sk_err(struct net *net, u32 group, int error) 762 { 763 struct sock *rtnl = net->rtnl; 764 765 netlink_set_err(rtnl, 0, group, error); 766 } 767 EXPORT_SYMBOL(rtnl_set_sk_err); 768 769 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 770 { 771 struct nlattr *mx; 772 int i, valid = 0; 773 774 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 775 if (metrics == dst_default_metrics.metrics) 776 return 0; 777 778 mx = nla_nest_start_noflag(skb, RTA_METRICS); 779 if (mx == NULL) 780 return -ENOBUFS; 781 782 for (i = 0; i < RTAX_MAX; i++) { 783 if (metrics[i]) { 784 if (i == RTAX_CC_ALGO - 1) { 785 char tmp[TCP_CA_NAME_MAX], *name; 786 787 name = tcp_ca_get_name_by_key(metrics[i], tmp); 788 if (!name) 789 continue; 790 if (nla_put_string(skb, i + 1, name)) 791 goto nla_put_failure; 792 } else if (i == RTAX_FEATURES - 1) { 793 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 794 795 if (!user_features) 796 continue; 797 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 798 if (nla_put_u32(skb, i + 1, user_features)) 799 goto nla_put_failure; 800 } else { 801 if (nla_put_u32(skb, i + 1, metrics[i])) 802 goto nla_put_failure; 803 } 804 valid++; 805 } 806 } 807 808 if (!valid) { 809 nla_nest_cancel(skb, mx); 810 return 0; 811 } 812 813 return nla_nest_end(skb, mx); 814 815 nla_put_failure: 816 nla_nest_cancel(skb, mx); 817 return -EMSGSIZE; 818 } 819 EXPORT_SYMBOL(rtnetlink_put_metrics); 820 821 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 822 long expires, u32 error) 823 { 824 struct rta_cacheinfo ci = { 825 .rta_error = error, 826 .rta_id = id, 827 }; 828 829 if (dst) { 830 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 831 ci.rta_used = dst->__use; 832 ci.rta_clntref = rcuref_read(&dst->__rcuref); 833 } 834 if (expires) { 835 unsigned long clock; 836 837 clock = jiffies_to_clock_t(abs(expires)); 838 clock = min_t(unsigned long, clock, INT_MAX); 839 ci.rta_expires = (expires > 0) ? clock : -clock; 840 } 841 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 842 } 843 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 844 845 void netdev_set_operstate(struct net_device *dev, int newstate) 846 { 847 unsigned int old = READ_ONCE(dev->operstate); 848 849 do { 850 if (old == newstate) 851 return; 852 } while (!try_cmpxchg(&dev->operstate, &old, newstate)); 853 854 netdev_state_change(dev); 855 } 856 EXPORT_SYMBOL(netdev_set_operstate); 857 858 static void set_operstate(struct net_device *dev, unsigned char transition) 859 { 860 unsigned char operstate = READ_ONCE(dev->operstate); 861 862 switch (transition) { 863 case IF_OPER_UP: 864 if ((operstate == IF_OPER_DORMANT || 865 operstate == IF_OPER_TESTING || 866 operstate == IF_OPER_UNKNOWN) && 867 !netif_dormant(dev) && !netif_testing(dev)) 868 operstate = IF_OPER_UP; 869 break; 870 871 case IF_OPER_TESTING: 872 if (netif_oper_up(dev)) 873 operstate = IF_OPER_TESTING; 874 break; 875 876 case IF_OPER_DORMANT: 877 if (netif_oper_up(dev)) 878 operstate = IF_OPER_DORMANT; 879 break; 880 } 881 882 netdev_set_operstate(dev, operstate); 883 } 884 885 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 886 { 887 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 888 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 889 } 890 891 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 892 const struct ifinfomsg *ifm) 893 { 894 unsigned int flags = ifm->ifi_flags; 895 896 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 897 if (ifm->ifi_change) 898 flags = (flags & ifm->ifi_change) | 899 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 900 901 return flags; 902 } 903 904 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 905 const struct rtnl_link_stats64 *b) 906 { 907 a->rx_packets = b->rx_packets; 908 a->tx_packets = b->tx_packets; 909 a->rx_bytes = b->rx_bytes; 910 a->tx_bytes = b->tx_bytes; 911 a->rx_errors = b->rx_errors; 912 a->tx_errors = b->tx_errors; 913 a->rx_dropped = b->rx_dropped; 914 a->tx_dropped = b->tx_dropped; 915 916 a->multicast = b->multicast; 917 a->collisions = b->collisions; 918 919 a->rx_length_errors = b->rx_length_errors; 920 a->rx_over_errors = b->rx_over_errors; 921 a->rx_crc_errors = b->rx_crc_errors; 922 a->rx_frame_errors = b->rx_frame_errors; 923 a->rx_fifo_errors = b->rx_fifo_errors; 924 a->rx_missed_errors = b->rx_missed_errors; 925 926 a->tx_aborted_errors = b->tx_aborted_errors; 927 a->tx_carrier_errors = b->tx_carrier_errors; 928 a->tx_fifo_errors = b->tx_fifo_errors; 929 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 930 a->tx_window_errors = b->tx_window_errors; 931 932 a->rx_compressed = b->rx_compressed; 933 a->tx_compressed = b->tx_compressed; 934 935 a->rx_nohandler = b->rx_nohandler; 936 } 937 938 /* All VF info */ 939 static inline int rtnl_vfinfo_size(const struct net_device *dev, 940 u32 ext_filter_mask) 941 { 942 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 943 int num_vfs = dev_num_vf(dev->dev.parent); 944 size_t size = nla_total_size(0); 945 size += num_vfs * 946 (nla_total_size(0) + 947 nla_total_size(sizeof(struct ifla_vf_mac)) + 948 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 949 nla_total_size(sizeof(struct ifla_vf_vlan)) + 950 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 951 nla_total_size(MAX_VLAN_LIST_LEN * 952 sizeof(struct ifla_vf_vlan_info)) + 953 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 954 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 955 nla_total_size(sizeof(struct ifla_vf_rate)) + 956 nla_total_size(sizeof(struct ifla_vf_link_state)) + 957 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 958 nla_total_size(sizeof(struct ifla_vf_trust))); 959 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 960 size += num_vfs * 961 (nla_total_size(0) + /* nest IFLA_VF_STATS */ 962 /* IFLA_VF_STATS_RX_PACKETS */ 963 nla_total_size_64bit(sizeof(__u64)) + 964 /* IFLA_VF_STATS_TX_PACKETS */ 965 nla_total_size_64bit(sizeof(__u64)) + 966 /* IFLA_VF_STATS_RX_BYTES */ 967 nla_total_size_64bit(sizeof(__u64)) + 968 /* IFLA_VF_STATS_TX_BYTES */ 969 nla_total_size_64bit(sizeof(__u64)) + 970 /* IFLA_VF_STATS_BROADCAST */ 971 nla_total_size_64bit(sizeof(__u64)) + 972 /* IFLA_VF_STATS_MULTICAST */ 973 nla_total_size_64bit(sizeof(__u64)) + 974 /* IFLA_VF_STATS_RX_DROPPED */ 975 nla_total_size_64bit(sizeof(__u64)) + 976 /* IFLA_VF_STATS_TX_DROPPED */ 977 nla_total_size_64bit(sizeof(__u64))); 978 } 979 return size; 980 } else 981 return 0; 982 } 983 984 static size_t rtnl_port_size(const struct net_device *dev, 985 u32 ext_filter_mask) 986 { 987 size_t port_size = nla_total_size(4) /* PORT_VF */ 988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 991 + nla_total_size(1) /* PROT_VDP_REQUEST */ 992 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 995 + port_size; 996 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 997 + port_size; 998 999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1000 !(ext_filter_mask & RTEXT_FILTER_VF)) 1001 return 0; 1002 if (dev_num_vf(dev->dev.parent)) 1003 return port_self_size + vf_ports_size + 1004 vf_port_size * dev_num_vf(dev->dev.parent); 1005 else 1006 return port_self_size; 1007 } 1008 1009 static size_t rtnl_xdp_size(void) 1010 { 1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 1012 nla_total_size(1) + /* XDP_ATTACHED */ 1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 1015 1016 return xdp_size; 1017 } 1018 1019 static size_t rtnl_prop_list_size(const struct net_device *dev) 1020 { 1021 struct netdev_name_node *name_node; 1022 unsigned int cnt = 0; 1023 1024 rcu_read_lock(); 1025 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) 1026 cnt++; 1027 rcu_read_unlock(); 1028 1029 if (!cnt) 1030 return 0; 1031 1032 return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ); 1033 } 1034 1035 static size_t rtnl_proto_down_size(const struct net_device *dev) 1036 { 1037 size_t size = nla_total_size(1); 1038 1039 if (dev->proto_down_reason) 1040 size += nla_total_size(0) + nla_total_size(4); 1041 1042 return size; 1043 } 1044 1045 static size_t rtnl_devlink_port_size(const struct net_device *dev) 1046 { 1047 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */ 1048 1049 if (dev->devlink_port) 1050 size += devlink_nl_port_handle_size(dev->devlink_port); 1051 1052 return size; 1053 } 1054 1055 static size_t rtnl_dpll_pin_size(const struct net_device *dev) 1056 { 1057 size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */ 1058 1059 size += dpll_netdev_pin_handle_size(dev); 1060 1061 return size; 1062 } 1063 1064 static noinline size_t if_nlmsg_size(const struct net_device *dev, 1065 u32 ext_filter_mask) 1066 { 1067 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1068 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1069 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1070 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1071 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1072 + nla_total_size(sizeof(struct rtnl_link_stats)) 1073 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1074 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1075 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1076 + nla_total_size(4) /* IFLA_TXQLEN */ 1077 + nla_total_size(4) /* IFLA_WEIGHT */ 1078 + nla_total_size(4) /* IFLA_MTU */ 1079 + nla_total_size(4) /* IFLA_LINK */ 1080 + nla_total_size(4) /* IFLA_MASTER */ 1081 + nla_total_size(1) /* IFLA_CARRIER */ 1082 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1083 + nla_total_size(4) /* IFLA_ALLMULTI */ 1084 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1085 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1086 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1087 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1088 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ 1089 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */ 1090 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */ 1091 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */ 1092 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ 1093 + nla_total_size(1) /* IFLA_OPERSTATE */ 1094 + nla_total_size(1) /* IFLA_LINKMODE */ 1095 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1096 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1097 + nla_total_size(4) /* IFLA_GROUP */ 1098 + nla_total_size(ext_filter_mask 1099 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1100 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1101 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1102 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1103 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1104 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1105 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1106 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1107 + rtnl_xdp_size() /* IFLA_XDP */ 1108 + nla_total_size(4) /* IFLA_EVENT */ 1109 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1110 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1111 + rtnl_proto_down_size(dev) /* proto down */ 1112 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1113 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1114 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1115 + nla_total_size(4) /* IFLA_MIN_MTU */ 1116 + nla_total_size(4) /* IFLA_MAX_MTU */ 1117 + rtnl_prop_list_size(dev) 1118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ 1119 + rtnl_devlink_port_size(dev) 1120 + rtnl_dpll_pin_size(dev) 1121 + 0; 1122 } 1123 1124 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1125 { 1126 struct nlattr *vf_ports; 1127 struct nlattr *vf_port; 1128 int vf; 1129 int err; 1130 1131 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1132 if (!vf_ports) 1133 return -EMSGSIZE; 1134 1135 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1136 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1137 if (!vf_port) 1138 goto nla_put_failure; 1139 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1140 goto nla_put_failure; 1141 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1142 if (err == -EMSGSIZE) 1143 goto nla_put_failure; 1144 if (err) { 1145 nla_nest_cancel(skb, vf_port); 1146 continue; 1147 } 1148 nla_nest_end(skb, vf_port); 1149 } 1150 1151 nla_nest_end(skb, vf_ports); 1152 1153 return 0; 1154 1155 nla_put_failure: 1156 nla_nest_cancel(skb, vf_ports); 1157 return -EMSGSIZE; 1158 } 1159 1160 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1161 { 1162 struct nlattr *port_self; 1163 int err; 1164 1165 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1166 if (!port_self) 1167 return -EMSGSIZE; 1168 1169 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1170 if (err) { 1171 nla_nest_cancel(skb, port_self); 1172 return (err == -EMSGSIZE) ? err : 0; 1173 } 1174 1175 nla_nest_end(skb, port_self); 1176 1177 return 0; 1178 } 1179 1180 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1181 u32 ext_filter_mask) 1182 { 1183 int err; 1184 1185 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1186 !(ext_filter_mask & RTEXT_FILTER_VF)) 1187 return 0; 1188 1189 err = rtnl_port_self_fill(skb, dev); 1190 if (err) 1191 return err; 1192 1193 if (dev_num_vf(dev->dev.parent)) { 1194 err = rtnl_vf_ports_fill(skb, dev); 1195 if (err) 1196 return err; 1197 } 1198 1199 return 0; 1200 } 1201 1202 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1203 { 1204 int err; 1205 struct netdev_phys_item_id ppid; 1206 1207 err = dev_get_phys_port_id(dev, &ppid); 1208 if (err) { 1209 if (err == -EOPNOTSUPP) 1210 return 0; 1211 return err; 1212 } 1213 1214 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1215 return -EMSGSIZE; 1216 1217 return 0; 1218 } 1219 1220 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1221 { 1222 char name[IFNAMSIZ]; 1223 int err; 1224 1225 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1226 if (err) { 1227 if (err == -EOPNOTSUPP) 1228 return 0; 1229 return err; 1230 } 1231 1232 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1233 return -EMSGSIZE; 1234 1235 return 0; 1236 } 1237 1238 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1239 { 1240 struct netdev_phys_item_id ppid = { }; 1241 int err; 1242 1243 err = dev_get_port_parent_id(dev, &ppid, false); 1244 if (err) { 1245 if (err == -EOPNOTSUPP) 1246 return 0; 1247 return err; 1248 } 1249 1250 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1251 return -EMSGSIZE; 1252 1253 return 0; 1254 } 1255 1256 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1257 struct net_device *dev) 1258 { 1259 struct rtnl_link_stats64 *sp; 1260 struct nlattr *attr; 1261 1262 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1263 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1264 if (!attr) 1265 return -EMSGSIZE; 1266 1267 sp = nla_data(attr); 1268 dev_get_stats(dev, sp); 1269 1270 attr = nla_reserve(skb, IFLA_STATS, 1271 sizeof(struct rtnl_link_stats)); 1272 if (!attr) 1273 return -EMSGSIZE; 1274 1275 copy_rtnl_link_stats(nla_data(attr), sp); 1276 1277 return 0; 1278 } 1279 1280 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1281 struct net_device *dev, 1282 int vfs_num, 1283 u32 ext_filter_mask) 1284 { 1285 struct ifla_vf_rss_query_en vf_rss_query_en; 1286 struct nlattr *vf, *vfstats, *vfvlanlist; 1287 struct ifla_vf_link_state vf_linkstate; 1288 struct ifla_vf_vlan_info vf_vlan_info; 1289 struct ifla_vf_spoofchk vf_spoofchk; 1290 struct ifla_vf_tx_rate vf_tx_rate; 1291 struct ifla_vf_stats vf_stats; 1292 struct ifla_vf_trust vf_trust; 1293 struct ifla_vf_vlan vf_vlan; 1294 struct ifla_vf_rate vf_rate; 1295 struct ifla_vf_mac vf_mac; 1296 struct ifla_vf_broadcast vf_broadcast; 1297 struct ifla_vf_info ivi; 1298 struct ifla_vf_guid node_guid; 1299 struct ifla_vf_guid port_guid; 1300 1301 memset(&ivi, 0, sizeof(ivi)); 1302 1303 /* Not all SR-IOV capable drivers support the 1304 * spoofcheck and "RSS query enable" query. Preset to 1305 * -1 so the user space tool can detect that the driver 1306 * didn't report anything. 1307 */ 1308 ivi.spoofchk = -1; 1309 ivi.rss_query_en = -1; 1310 ivi.trusted = -1; 1311 /* The default value for VF link state is "auto" 1312 * IFLA_VF_LINK_STATE_AUTO which equals zero 1313 */ 1314 ivi.linkstate = 0; 1315 /* VLAN Protocol by default is 802.1Q */ 1316 ivi.vlan_proto = htons(ETH_P_8021Q); 1317 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1318 return 0; 1319 1320 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1321 memset(&node_guid, 0, sizeof(node_guid)); 1322 memset(&port_guid, 0, sizeof(port_guid)); 1323 1324 vf_mac.vf = 1325 vf_vlan.vf = 1326 vf_vlan_info.vf = 1327 vf_rate.vf = 1328 vf_tx_rate.vf = 1329 vf_spoofchk.vf = 1330 vf_linkstate.vf = 1331 vf_rss_query_en.vf = 1332 vf_trust.vf = 1333 node_guid.vf = 1334 port_guid.vf = ivi.vf; 1335 1336 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1337 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1338 vf_vlan.vlan = ivi.vlan; 1339 vf_vlan.qos = ivi.qos; 1340 vf_vlan_info.vlan = ivi.vlan; 1341 vf_vlan_info.qos = ivi.qos; 1342 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1343 vf_tx_rate.rate = ivi.max_tx_rate; 1344 vf_rate.min_tx_rate = ivi.min_tx_rate; 1345 vf_rate.max_tx_rate = ivi.max_tx_rate; 1346 vf_spoofchk.setting = ivi.spoofchk; 1347 vf_linkstate.link_state = ivi.linkstate; 1348 vf_rss_query_en.setting = ivi.rss_query_en; 1349 vf_trust.setting = ivi.trusted; 1350 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1351 if (!vf) 1352 return -EMSGSIZE; 1353 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1354 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1355 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1356 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1357 &vf_rate) || 1358 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1359 &vf_tx_rate) || 1360 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1361 &vf_spoofchk) || 1362 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1363 &vf_linkstate) || 1364 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1365 sizeof(vf_rss_query_en), 1366 &vf_rss_query_en) || 1367 nla_put(skb, IFLA_VF_TRUST, 1368 sizeof(vf_trust), &vf_trust)) 1369 goto nla_put_vf_failure; 1370 1371 if (dev->netdev_ops->ndo_get_vf_guid && 1372 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1373 &port_guid)) { 1374 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1375 &node_guid) || 1376 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1377 &port_guid)) 1378 goto nla_put_vf_failure; 1379 } 1380 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1381 if (!vfvlanlist) 1382 goto nla_put_vf_failure; 1383 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1384 &vf_vlan_info)) { 1385 nla_nest_cancel(skb, vfvlanlist); 1386 goto nla_put_vf_failure; 1387 } 1388 nla_nest_end(skb, vfvlanlist); 1389 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 1390 memset(&vf_stats, 0, sizeof(vf_stats)); 1391 if (dev->netdev_ops->ndo_get_vf_stats) 1392 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1393 &vf_stats); 1394 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1395 if (!vfstats) 1396 goto nla_put_vf_failure; 1397 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1398 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1399 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1400 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1401 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1402 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1403 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1404 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1405 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1406 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1407 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1408 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1409 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1410 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1411 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1412 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1413 nla_nest_cancel(skb, vfstats); 1414 goto nla_put_vf_failure; 1415 } 1416 nla_nest_end(skb, vfstats); 1417 } 1418 nla_nest_end(skb, vf); 1419 return 0; 1420 1421 nla_put_vf_failure: 1422 nla_nest_cancel(skb, vf); 1423 return -EMSGSIZE; 1424 } 1425 1426 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1427 struct net_device *dev, 1428 u32 ext_filter_mask) 1429 { 1430 struct nlattr *vfinfo; 1431 int i, num_vfs; 1432 1433 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1434 return 0; 1435 1436 num_vfs = dev_num_vf(dev->dev.parent); 1437 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1438 return -EMSGSIZE; 1439 1440 if (!dev->netdev_ops->ndo_get_vf_config) 1441 return 0; 1442 1443 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1444 if (!vfinfo) 1445 return -EMSGSIZE; 1446 1447 for (i = 0; i < num_vfs; i++) { 1448 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) { 1449 nla_nest_cancel(skb, vfinfo); 1450 return -EMSGSIZE; 1451 } 1452 } 1453 1454 nla_nest_end(skb, vfinfo); 1455 return 0; 1456 } 1457 1458 static int rtnl_fill_link_ifmap(struct sk_buff *skb, 1459 const struct net_device *dev) 1460 { 1461 struct rtnl_link_ifmap map; 1462 1463 memset(&map, 0, sizeof(map)); 1464 map.mem_start = READ_ONCE(dev->mem_start); 1465 map.mem_end = READ_ONCE(dev->mem_end); 1466 map.base_addr = READ_ONCE(dev->base_addr); 1467 map.irq = READ_ONCE(dev->irq); 1468 map.dma = READ_ONCE(dev->dma); 1469 map.port = READ_ONCE(dev->if_port); 1470 1471 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1472 return -EMSGSIZE; 1473 1474 return 0; 1475 } 1476 1477 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1478 { 1479 const struct bpf_prog *generic_xdp_prog; 1480 1481 ASSERT_RTNL(); 1482 1483 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1484 if (!generic_xdp_prog) 1485 return 0; 1486 return generic_xdp_prog->aux->id; 1487 } 1488 1489 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1490 { 1491 return dev_xdp_prog_id(dev, XDP_MODE_DRV); 1492 } 1493 1494 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1495 { 1496 return dev_xdp_prog_id(dev, XDP_MODE_HW); 1497 } 1498 1499 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1500 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1501 u32 (*get_prog_id)(struct net_device *dev)) 1502 { 1503 u32 curr_id; 1504 int err; 1505 1506 curr_id = get_prog_id(dev); 1507 if (!curr_id) 1508 return 0; 1509 1510 *prog_id = curr_id; 1511 err = nla_put_u32(skb, attr, curr_id); 1512 if (err) 1513 return err; 1514 1515 if (*mode != XDP_ATTACHED_NONE) 1516 *mode = XDP_ATTACHED_MULTI; 1517 else 1518 *mode = tgt_mode; 1519 1520 return 0; 1521 } 1522 1523 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1524 { 1525 struct nlattr *xdp; 1526 u32 prog_id; 1527 int err; 1528 u8 mode; 1529 1530 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1531 if (!xdp) 1532 return -EMSGSIZE; 1533 1534 prog_id = 0; 1535 mode = XDP_ATTACHED_NONE; 1536 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1537 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1538 if (err) 1539 goto err_cancel; 1540 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1541 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1542 if (err) 1543 goto err_cancel; 1544 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1545 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1546 if (err) 1547 goto err_cancel; 1548 1549 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1550 if (err) 1551 goto err_cancel; 1552 1553 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1554 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1555 if (err) 1556 goto err_cancel; 1557 } 1558 1559 nla_nest_end(skb, xdp); 1560 return 0; 1561 1562 err_cancel: 1563 nla_nest_cancel(skb, xdp); 1564 return err; 1565 } 1566 1567 static u32 rtnl_get_event(unsigned long event) 1568 { 1569 u32 rtnl_event_type = IFLA_EVENT_NONE; 1570 1571 switch (event) { 1572 case NETDEV_REBOOT: 1573 rtnl_event_type = IFLA_EVENT_REBOOT; 1574 break; 1575 case NETDEV_FEAT_CHANGE: 1576 rtnl_event_type = IFLA_EVENT_FEATURES; 1577 break; 1578 case NETDEV_BONDING_FAILOVER: 1579 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1580 break; 1581 case NETDEV_NOTIFY_PEERS: 1582 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1583 break; 1584 case NETDEV_RESEND_IGMP: 1585 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1586 break; 1587 case NETDEV_CHANGEINFODATA: 1588 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1589 break; 1590 default: 1591 break; 1592 } 1593 1594 return rtnl_event_type; 1595 } 1596 1597 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1598 { 1599 const struct net_device *upper_dev; 1600 int ret = 0; 1601 1602 rcu_read_lock(); 1603 1604 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1605 if (upper_dev) 1606 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1607 1608 rcu_read_unlock(); 1609 return ret; 1610 } 1611 1612 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1613 bool force) 1614 { 1615 int iflink = dev_get_iflink(dev); 1616 1617 if (force || READ_ONCE(dev->ifindex) != iflink) 1618 return nla_put_u32(skb, IFLA_LINK, iflink); 1619 1620 return 0; 1621 } 1622 1623 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1624 struct net_device *dev) 1625 { 1626 char buf[IFALIASZ]; 1627 int ret; 1628 1629 ret = dev_get_alias(dev, buf, sizeof(buf)); 1630 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1631 } 1632 1633 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1634 const struct net_device *dev, 1635 struct net *src_net, gfp_t gfp) 1636 { 1637 bool put_iflink = false; 1638 1639 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1640 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1641 1642 if (!net_eq(dev_net(dev), link_net)) { 1643 int id = peernet2id_alloc(src_net, link_net, gfp); 1644 1645 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1646 return -EMSGSIZE; 1647 1648 put_iflink = true; 1649 } 1650 } 1651 1652 return nla_put_iflink(skb, dev, put_iflink); 1653 } 1654 1655 static int rtnl_fill_link_af(struct sk_buff *skb, 1656 const struct net_device *dev, 1657 u32 ext_filter_mask) 1658 { 1659 const struct rtnl_af_ops *af_ops; 1660 struct nlattr *af_spec; 1661 1662 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1663 if (!af_spec) 1664 return -EMSGSIZE; 1665 1666 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1667 struct nlattr *af; 1668 int err; 1669 1670 if (!af_ops->fill_link_af) 1671 continue; 1672 1673 af = nla_nest_start_noflag(skb, af_ops->family); 1674 if (!af) 1675 return -EMSGSIZE; 1676 1677 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1678 /* 1679 * Caller may return ENODATA to indicate that there 1680 * was no data to be dumped. This is not an error, it 1681 * means we should trim the attribute header and 1682 * continue. 1683 */ 1684 if (err == -ENODATA) 1685 nla_nest_cancel(skb, af); 1686 else if (err < 0) 1687 return -EMSGSIZE; 1688 1689 nla_nest_end(skb, af); 1690 } 1691 1692 nla_nest_end(skb, af_spec); 1693 return 0; 1694 } 1695 1696 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1697 const struct net_device *dev) 1698 { 1699 struct netdev_name_node *name_node; 1700 int count = 0; 1701 1702 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) { 1703 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1704 return -EMSGSIZE; 1705 count++; 1706 } 1707 return count; 1708 } 1709 1710 /* RCU protected. */ 1711 static int rtnl_fill_prop_list(struct sk_buff *skb, 1712 const struct net_device *dev) 1713 { 1714 struct nlattr *prop_list; 1715 int ret; 1716 1717 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1718 if (!prop_list) 1719 return -EMSGSIZE; 1720 1721 ret = rtnl_fill_alt_ifnames(skb, dev); 1722 if (ret <= 0) 1723 goto nest_cancel; 1724 1725 nla_nest_end(skb, prop_list); 1726 return 0; 1727 1728 nest_cancel: 1729 nla_nest_cancel(skb, prop_list); 1730 return ret; 1731 } 1732 1733 static int rtnl_fill_proto_down(struct sk_buff *skb, 1734 const struct net_device *dev) 1735 { 1736 struct nlattr *pr; 1737 u32 preason; 1738 1739 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1740 goto nla_put_failure; 1741 1742 preason = dev->proto_down_reason; 1743 if (!preason) 1744 return 0; 1745 1746 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); 1747 if (!pr) 1748 return -EMSGSIZE; 1749 1750 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { 1751 nla_nest_cancel(skb, pr); 1752 goto nla_put_failure; 1753 } 1754 1755 nla_nest_end(skb, pr); 1756 return 0; 1757 1758 nla_put_failure: 1759 return -EMSGSIZE; 1760 } 1761 1762 static int rtnl_fill_devlink_port(struct sk_buff *skb, 1763 const struct net_device *dev) 1764 { 1765 struct nlattr *devlink_port_nest; 1766 int ret; 1767 1768 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT); 1769 if (!devlink_port_nest) 1770 return -EMSGSIZE; 1771 1772 if (dev->devlink_port) { 1773 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port); 1774 if (ret < 0) 1775 goto nest_cancel; 1776 } 1777 1778 nla_nest_end(skb, devlink_port_nest); 1779 return 0; 1780 1781 nest_cancel: 1782 nla_nest_cancel(skb, devlink_port_nest); 1783 return ret; 1784 } 1785 1786 static int rtnl_fill_dpll_pin(struct sk_buff *skb, 1787 const struct net_device *dev) 1788 { 1789 struct nlattr *dpll_pin_nest; 1790 int ret; 1791 1792 dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN); 1793 if (!dpll_pin_nest) 1794 return -EMSGSIZE; 1795 1796 ret = dpll_netdev_add_pin_handle(skb, dev); 1797 if (ret < 0) 1798 goto nest_cancel; 1799 1800 nla_nest_end(skb, dpll_pin_nest); 1801 return 0; 1802 1803 nest_cancel: 1804 nla_nest_cancel(skb, dpll_pin_nest); 1805 return ret; 1806 } 1807 1808 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1809 struct net_device *dev, struct net *src_net, 1810 int type, u32 pid, u32 seq, u32 change, 1811 unsigned int flags, u32 ext_filter_mask, 1812 u32 event, int *new_nsid, int new_ifindex, 1813 int tgt_netnsid, gfp_t gfp) 1814 { 1815 struct ifinfomsg *ifm; 1816 struct nlmsghdr *nlh; 1817 struct Qdisc *qdisc; 1818 1819 ASSERT_RTNL(); 1820 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1821 if (nlh == NULL) 1822 return -EMSGSIZE; 1823 1824 ifm = nlmsg_data(nlh); 1825 ifm->ifi_family = AF_UNSPEC; 1826 ifm->__ifi_pad = 0; 1827 ifm->ifi_type = dev->type; 1828 ifm->ifi_index = dev->ifindex; 1829 ifm->ifi_flags = dev_get_flags(dev); 1830 ifm->ifi_change = change; 1831 1832 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1833 goto nla_put_failure; 1834 1835 qdisc = rtnl_dereference(dev->qdisc); 1836 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1837 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1838 nla_put_u8(skb, IFLA_OPERSTATE, 1839 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1840 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1841 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1842 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || 1843 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || 1844 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1845 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1846 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) || 1847 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1848 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1849 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1850 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) || 1851 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) || 1852 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) || 1853 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) || 1854 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) || 1855 #ifdef CONFIG_RPS 1856 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1857 #endif 1858 put_master_ifindex(skb, dev) || 1859 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1860 (qdisc && 1861 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || 1862 nla_put_ifalias(skb, dev) || 1863 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1864 atomic_read(&dev->carrier_up_count) + 1865 atomic_read(&dev->carrier_down_count)) || 1866 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1867 atomic_read(&dev->carrier_up_count)) || 1868 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1869 atomic_read(&dev->carrier_down_count))) 1870 goto nla_put_failure; 1871 1872 if (rtnl_fill_proto_down(skb, dev)) 1873 goto nla_put_failure; 1874 1875 if (event != IFLA_EVENT_NONE) { 1876 if (nla_put_u32(skb, IFLA_EVENT, event)) 1877 goto nla_put_failure; 1878 } 1879 1880 if (dev->addr_len) { 1881 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1882 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1883 goto nla_put_failure; 1884 } 1885 1886 if (rtnl_phys_port_id_fill(skb, dev)) 1887 goto nla_put_failure; 1888 1889 if (rtnl_phys_port_name_fill(skb, dev)) 1890 goto nla_put_failure; 1891 1892 if (rtnl_phys_switch_id_fill(skb, dev)) 1893 goto nla_put_failure; 1894 1895 if (rtnl_fill_stats(skb, dev)) 1896 goto nla_put_failure; 1897 1898 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1899 goto nla_put_failure; 1900 1901 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1902 goto nla_put_failure; 1903 1904 if (rtnl_xdp_fill(skb, dev)) 1905 goto nla_put_failure; 1906 1907 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1908 if (rtnl_link_fill(skb, dev) < 0) 1909 goto nla_put_failure; 1910 } 1911 1912 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp)) 1913 goto nla_put_failure; 1914 1915 if (new_nsid && 1916 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1917 goto nla_put_failure; 1918 if (new_ifindex && 1919 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1920 goto nla_put_failure; 1921 1922 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) && 1923 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr)) 1924 goto nla_put_failure; 1925 1926 rcu_read_lock(); 1927 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1928 goto nla_put_failure_rcu; 1929 if (rtnl_fill_link_ifmap(skb, dev)) 1930 goto nla_put_failure_rcu; 1931 if (rtnl_fill_prop_list(skb, dev)) 1932 goto nla_put_failure_rcu; 1933 rcu_read_unlock(); 1934 1935 if (dev->dev.parent && 1936 nla_put_string(skb, IFLA_PARENT_DEV_NAME, 1937 dev_name(dev->dev.parent))) 1938 goto nla_put_failure; 1939 1940 if (dev->dev.parent && dev->dev.parent->bus && 1941 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, 1942 dev->dev.parent->bus->name)) 1943 goto nla_put_failure; 1944 1945 if (rtnl_fill_devlink_port(skb, dev)) 1946 goto nla_put_failure; 1947 1948 if (rtnl_fill_dpll_pin(skb, dev)) 1949 goto nla_put_failure; 1950 1951 nlmsg_end(skb, nlh); 1952 return 0; 1953 1954 nla_put_failure_rcu: 1955 rcu_read_unlock(); 1956 nla_put_failure: 1957 nlmsg_cancel(skb, nlh); 1958 return -EMSGSIZE; 1959 } 1960 1961 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1962 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1963 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1964 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1965 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1966 [IFLA_MTU] = { .type = NLA_U32 }, 1967 [IFLA_LINK] = { .type = NLA_U32 }, 1968 [IFLA_MASTER] = { .type = NLA_U32 }, 1969 [IFLA_CARRIER] = { .type = NLA_U8 }, 1970 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1971 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1972 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1973 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1974 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1975 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1976 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1977 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1978 * allow 0-length string (needed to remove an alias). 1979 */ 1980 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1981 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1982 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1983 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1984 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1985 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1986 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1987 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1988 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1989 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1990 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 1991 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1992 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1993 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1994 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1995 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1996 [IFLA_XDP] = { .type = NLA_NESTED }, 1997 [IFLA_EVENT] = { .type = NLA_U32 }, 1998 [IFLA_GROUP] = { .type = NLA_U32 }, 1999 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 2000 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 2001 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 2002 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 2003 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 2004 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 2005 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 2006 .len = ALTIFNAMSIZ - 1 }, 2007 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, 2008 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, 2009 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 2010 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, 2011 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, 2012 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, 2013 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, 2014 [IFLA_ALLMULTI] = { .type = NLA_REJECT }, 2015 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 2016 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 2017 }; 2018 2019 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 2020 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 2021 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 2022 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 2023 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 2024 }; 2025 2026 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 2027 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 2028 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 2029 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 2030 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 2031 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 2032 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 2033 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 2034 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 2035 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 2036 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 2037 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 2038 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2039 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2040 }; 2041 2042 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 2043 [IFLA_PORT_VF] = { .type = NLA_U32 }, 2044 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 2045 .len = PORT_PROFILE_MAX }, 2046 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 2047 .len = PORT_UUID_MAX }, 2048 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 2049 .len = PORT_UUID_MAX }, 2050 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 2051 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 2052 2053 /* Unused, but we need to keep it here since user space could 2054 * fill it. It's also broken with regard to NLA_BINARY use in 2055 * combination with structs. 2056 */ 2057 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 2058 .len = sizeof(struct ifla_port_vsi) }, 2059 }; 2060 2061 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 2062 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, 2063 [IFLA_XDP_FD] = { .type = NLA_S32 }, 2064 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, 2065 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 2066 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 2067 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 2068 }; 2069 2070 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 2071 { 2072 const struct rtnl_link_ops *ops = NULL; 2073 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 2074 2075 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 2076 return NULL; 2077 2078 if (linfo[IFLA_INFO_KIND]) { 2079 char kind[MODULE_NAME_LEN]; 2080 2081 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 2082 ops = rtnl_link_ops_get(kind); 2083 } 2084 2085 return ops; 2086 } 2087 2088 static bool link_master_filtered(struct net_device *dev, int master_idx) 2089 { 2090 struct net_device *master; 2091 2092 if (!master_idx) 2093 return false; 2094 2095 master = netdev_master_upper_dev_get(dev); 2096 2097 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need 2098 * another invalid value for ifindex to denote "no master". 2099 */ 2100 if (master_idx == -1) 2101 return !!master; 2102 2103 if (!master || master->ifindex != master_idx) 2104 return true; 2105 2106 return false; 2107 } 2108 2109 static bool link_kind_filtered(const struct net_device *dev, 2110 const struct rtnl_link_ops *kind_ops) 2111 { 2112 if (kind_ops && dev->rtnl_link_ops != kind_ops) 2113 return true; 2114 2115 return false; 2116 } 2117 2118 static bool link_dump_filtered(struct net_device *dev, 2119 int master_idx, 2120 const struct rtnl_link_ops *kind_ops) 2121 { 2122 if (link_master_filtered(dev, master_idx) || 2123 link_kind_filtered(dev, kind_ops)) 2124 return true; 2125 2126 return false; 2127 } 2128 2129 /** 2130 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 2131 * @sk: netlink socket 2132 * @netnsid: network namespace identifier 2133 * 2134 * Returns the network namespace identified by netnsid on success or an error 2135 * pointer on failure. 2136 */ 2137 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 2138 { 2139 struct net *net; 2140 2141 net = get_net_ns_by_id(sock_net(sk), netnsid); 2142 if (!net) 2143 return ERR_PTR(-EINVAL); 2144 2145 /* For now, the caller is required to have CAP_NET_ADMIN in 2146 * the user namespace owning the target net ns. 2147 */ 2148 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 2149 put_net(net); 2150 return ERR_PTR(-EACCES); 2151 } 2152 return net; 2153 } 2154 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 2155 2156 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 2157 bool strict_check, struct nlattr **tb, 2158 struct netlink_ext_ack *extack) 2159 { 2160 int hdrlen; 2161 2162 if (strict_check) { 2163 struct ifinfomsg *ifm; 2164 2165 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2166 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 2167 return -EINVAL; 2168 } 2169 2170 ifm = nlmsg_data(nlh); 2171 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2172 ifm->ifi_change) { 2173 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 2174 return -EINVAL; 2175 } 2176 if (ifm->ifi_index) { 2177 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 2178 return -EINVAL; 2179 } 2180 2181 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 2182 IFLA_MAX, ifla_policy, 2183 extack); 2184 } 2185 2186 /* A hack to preserve kernel<->userspace interface. 2187 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 2188 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 2189 * what iproute2 < v3.9.0 used. 2190 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 2191 * attribute, its netlink message is shorter than struct ifinfomsg. 2192 */ 2193 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 2194 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 2195 2196 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 2197 extack); 2198 } 2199 2200 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 2201 { 2202 const struct rtnl_link_ops *kind_ops = NULL; 2203 struct netlink_ext_ack *extack = cb->extack; 2204 const struct nlmsghdr *nlh = cb->nlh; 2205 struct net *net = sock_net(skb->sk); 2206 unsigned int flags = NLM_F_MULTI; 2207 struct nlattr *tb[IFLA_MAX+1]; 2208 struct { 2209 unsigned long ifindex; 2210 } *ctx = (void *)cb->ctx; 2211 struct net *tgt_net = net; 2212 u32 ext_filter_mask = 0; 2213 struct net_device *dev; 2214 int master_idx = 0; 2215 int netnsid = -1; 2216 int err, i; 2217 2218 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2219 if (err < 0) { 2220 if (cb->strict_check) 2221 return err; 2222 2223 goto walk_entries; 2224 } 2225 2226 for (i = 0; i <= IFLA_MAX; ++i) { 2227 if (!tb[i]) 2228 continue; 2229 2230 /* new attributes should only be added with strict checking */ 2231 switch (i) { 2232 case IFLA_TARGET_NETNSID: 2233 netnsid = nla_get_s32(tb[i]); 2234 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2235 if (IS_ERR(tgt_net)) { 2236 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2237 return PTR_ERR(tgt_net); 2238 } 2239 break; 2240 case IFLA_EXT_MASK: 2241 ext_filter_mask = nla_get_u32(tb[i]); 2242 break; 2243 case IFLA_MASTER: 2244 master_idx = nla_get_u32(tb[i]); 2245 break; 2246 case IFLA_LINKINFO: 2247 kind_ops = linkinfo_to_kind_ops(tb[i]); 2248 break; 2249 default: 2250 if (cb->strict_check) { 2251 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2252 return -EINVAL; 2253 } 2254 } 2255 } 2256 2257 if (master_idx || kind_ops) 2258 flags |= NLM_F_DUMP_FILTERED; 2259 2260 walk_entries: 2261 err = 0; 2262 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) { 2263 if (link_dump_filtered(dev, master_idx, kind_ops)) 2264 continue; 2265 err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK, 2266 NETLINK_CB(cb->skb).portid, 2267 nlh->nlmsg_seq, 0, flags, 2268 ext_filter_mask, 0, NULL, 0, 2269 netnsid, GFP_KERNEL); 2270 if (err < 0) 2271 break; 2272 } 2273 cb->seq = tgt_net->dev_base_seq; 2274 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2275 if (netnsid >= 0) 2276 put_net(tgt_net); 2277 2278 return err; 2279 } 2280 2281 int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, 2282 struct netlink_ext_ack *exterr) 2283 { 2284 const struct ifinfomsg *ifmp; 2285 const struct nlattr *attrs; 2286 size_t len; 2287 2288 ifmp = nla_data(nla_peer); 2289 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); 2290 len = nla_len(nla_peer) - sizeof(struct ifinfomsg); 2291 2292 if (ifmp->ifi_index < 0) { 2293 NL_SET_ERR_MSG_ATTR(exterr, nla_peer, 2294 "ifindex can't be negative"); 2295 return -EINVAL; 2296 } 2297 2298 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, 2299 exterr); 2300 } 2301 EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); 2302 2303 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2304 { 2305 struct net *net; 2306 /* Examine the link attributes and figure out which 2307 * network namespace we are talking about. 2308 */ 2309 if (tb[IFLA_NET_NS_PID]) 2310 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2311 else if (tb[IFLA_NET_NS_FD]) 2312 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2313 else 2314 net = get_net(src_net); 2315 return net; 2316 } 2317 EXPORT_SYMBOL(rtnl_link_get_net); 2318 2319 /* Figure out which network namespace we are talking about by 2320 * examining the link attributes in the following order: 2321 * 2322 * 1. IFLA_NET_NS_PID 2323 * 2. IFLA_NET_NS_FD 2324 * 3. IFLA_TARGET_NETNSID 2325 */ 2326 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2327 struct nlattr *tb[]) 2328 { 2329 struct net *net; 2330 2331 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2332 return rtnl_link_get_net(src_net, tb); 2333 2334 if (!tb[IFLA_TARGET_NETNSID]) 2335 return get_net(src_net); 2336 2337 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2338 if (!net) 2339 return ERR_PTR(-EINVAL); 2340 2341 return net; 2342 } 2343 2344 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2345 struct net *src_net, 2346 struct nlattr *tb[], int cap) 2347 { 2348 struct net *net; 2349 2350 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2351 if (IS_ERR(net)) 2352 return net; 2353 2354 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2355 put_net(net); 2356 return ERR_PTR(-EPERM); 2357 } 2358 2359 return net; 2360 } 2361 2362 /* Verify that rtnetlink requests do not pass additional properties 2363 * potentially referring to different network namespaces. 2364 */ 2365 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2366 struct netlink_ext_ack *extack, 2367 bool netns_id_only) 2368 { 2369 2370 if (netns_id_only) { 2371 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2372 return 0; 2373 2374 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2375 return -EOPNOTSUPP; 2376 } 2377 2378 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2379 goto invalid_attr; 2380 2381 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2382 goto invalid_attr; 2383 2384 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2385 goto invalid_attr; 2386 2387 return 0; 2388 2389 invalid_attr: 2390 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2391 return -EINVAL; 2392 } 2393 2394 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2395 int max_tx_rate) 2396 { 2397 const struct net_device_ops *ops = dev->netdev_ops; 2398 2399 if (!ops->ndo_set_vf_rate) 2400 return -EOPNOTSUPP; 2401 if (max_tx_rate && max_tx_rate < min_tx_rate) 2402 return -EINVAL; 2403 2404 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate); 2405 } 2406 2407 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], 2408 struct netlink_ext_ack *extack) 2409 { 2410 if (tb[IFLA_ADDRESS] && 2411 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2412 return -EINVAL; 2413 2414 if (tb[IFLA_BROADCAST] && 2415 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2416 return -EINVAL; 2417 2418 if (tb[IFLA_GSO_MAX_SIZE] && 2419 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { 2420 NL_SET_ERR_MSG(extack, "too big gso_max_size"); 2421 return -EINVAL; 2422 } 2423 2424 if (tb[IFLA_GSO_MAX_SEGS] && 2425 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || 2426 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { 2427 NL_SET_ERR_MSG(extack, "too big gso_max_segs"); 2428 return -EINVAL; 2429 } 2430 2431 if (tb[IFLA_GRO_MAX_SIZE] && 2432 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { 2433 NL_SET_ERR_MSG(extack, "too big gro_max_size"); 2434 return -EINVAL; 2435 } 2436 2437 if (tb[IFLA_GSO_IPV4_MAX_SIZE] && 2438 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { 2439 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); 2440 return -EINVAL; 2441 } 2442 2443 if (tb[IFLA_GRO_IPV4_MAX_SIZE] && 2444 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { 2445 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size"); 2446 return -EINVAL; 2447 } 2448 2449 if (tb[IFLA_AF_SPEC]) { 2450 struct nlattr *af; 2451 int rem, err; 2452 2453 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2454 const struct rtnl_af_ops *af_ops; 2455 2456 af_ops = rtnl_af_lookup(nla_type(af)); 2457 if (!af_ops) 2458 return -EAFNOSUPPORT; 2459 2460 if (!af_ops->set_link_af) 2461 return -EOPNOTSUPP; 2462 2463 if (af_ops->validate_link_af) { 2464 err = af_ops->validate_link_af(dev, af, extack); 2465 if (err < 0) 2466 return err; 2467 } 2468 } 2469 } 2470 2471 return 0; 2472 } 2473 2474 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2475 int guid_type) 2476 { 2477 const struct net_device_ops *ops = dev->netdev_ops; 2478 2479 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2480 } 2481 2482 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2483 { 2484 if (dev->type != ARPHRD_INFINIBAND) 2485 return -EOPNOTSUPP; 2486 2487 return handle_infiniband_guid(dev, ivt, guid_type); 2488 } 2489 2490 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2491 { 2492 const struct net_device_ops *ops = dev->netdev_ops; 2493 int err = -EINVAL; 2494 2495 if (tb[IFLA_VF_MAC]) { 2496 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2497 2498 if (ivm->vf >= INT_MAX) 2499 return -EINVAL; 2500 err = -EOPNOTSUPP; 2501 if (ops->ndo_set_vf_mac) 2502 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2503 ivm->mac); 2504 if (err < 0) 2505 return err; 2506 } 2507 2508 if (tb[IFLA_VF_VLAN]) { 2509 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2510 2511 if (ivv->vf >= INT_MAX) 2512 return -EINVAL; 2513 err = -EOPNOTSUPP; 2514 if (ops->ndo_set_vf_vlan) 2515 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2516 ivv->qos, 2517 htons(ETH_P_8021Q)); 2518 if (err < 0) 2519 return err; 2520 } 2521 2522 if (tb[IFLA_VF_VLAN_LIST]) { 2523 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2524 struct nlattr *attr; 2525 int rem, len = 0; 2526 2527 err = -EOPNOTSUPP; 2528 if (!ops->ndo_set_vf_vlan) 2529 return err; 2530 2531 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2532 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2533 nla_len(attr) < NLA_HDRLEN) { 2534 return -EINVAL; 2535 } 2536 if (len >= MAX_VLAN_LIST_LEN) 2537 return -EOPNOTSUPP; 2538 ivvl[len] = nla_data(attr); 2539 2540 len++; 2541 } 2542 if (len == 0) 2543 return -EINVAL; 2544 2545 if (ivvl[0]->vf >= INT_MAX) 2546 return -EINVAL; 2547 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2548 ivvl[0]->qos, ivvl[0]->vlan_proto); 2549 if (err < 0) 2550 return err; 2551 } 2552 2553 if (tb[IFLA_VF_TX_RATE]) { 2554 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2555 struct ifla_vf_info ivf; 2556 2557 if (ivt->vf >= INT_MAX) 2558 return -EINVAL; 2559 err = -EOPNOTSUPP; 2560 if (ops->ndo_get_vf_config) 2561 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2562 if (err < 0) 2563 return err; 2564 2565 err = rtnl_set_vf_rate(dev, ivt->vf, 2566 ivf.min_tx_rate, ivt->rate); 2567 if (err < 0) 2568 return err; 2569 } 2570 2571 if (tb[IFLA_VF_RATE]) { 2572 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2573 2574 if (ivt->vf >= INT_MAX) 2575 return -EINVAL; 2576 2577 err = rtnl_set_vf_rate(dev, ivt->vf, 2578 ivt->min_tx_rate, ivt->max_tx_rate); 2579 if (err < 0) 2580 return err; 2581 } 2582 2583 if (tb[IFLA_VF_SPOOFCHK]) { 2584 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2585 2586 if (ivs->vf >= INT_MAX) 2587 return -EINVAL; 2588 err = -EOPNOTSUPP; 2589 if (ops->ndo_set_vf_spoofchk) 2590 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2591 ivs->setting); 2592 if (err < 0) 2593 return err; 2594 } 2595 2596 if (tb[IFLA_VF_LINK_STATE]) { 2597 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2598 2599 if (ivl->vf >= INT_MAX) 2600 return -EINVAL; 2601 err = -EOPNOTSUPP; 2602 if (ops->ndo_set_vf_link_state) 2603 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2604 ivl->link_state); 2605 if (err < 0) 2606 return err; 2607 } 2608 2609 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2610 struct ifla_vf_rss_query_en *ivrssq_en; 2611 2612 err = -EOPNOTSUPP; 2613 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2614 if (ivrssq_en->vf >= INT_MAX) 2615 return -EINVAL; 2616 if (ops->ndo_set_vf_rss_query_en) 2617 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2618 ivrssq_en->setting); 2619 if (err < 0) 2620 return err; 2621 } 2622 2623 if (tb[IFLA_VF_TRUST]) { 2624 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2625 2626 if (ivt->vf >= INT_MAX) 2627 return -EINVAL; 2628 err = -EOPNOTSUPP; 2629 if (ops->ndo_set_vf_trust) 2630 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2631 if (err < 0) 2632 return err; 2633 } 2634 2635 if (tb[IFLA_VF_IB_NODE_GUID]) { 2636 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2637 2638 if (ivt->vf >= INT_MAX) 2639 return -EINVAL; 2640 if (!ops->ndo_set_vf_guid) 2641 return -EOPNOTSUPP; 2642 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2643 } 2644 2645 if (tb[IFLA_VF_IB_PORT_GUID]) { 2646 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2647 2648 if (ivt->vf >= INT_MAX) 2649 return -EINVAL; 2650 if (!ops->ndo_set_vf_guid) 2651 return -EOPNOTSUPP; 2652 2653 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2654 } 2655 2656 return err; 2657 } 2658 2659 static int do_set_master(struct net_device *dev, int ifindex, 2660 struct netlink_ext_ack *extack) 2661 { 2662 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2663 const struct net_device_ops *ops; 2664 int err; 2665 2666 if (upper_dev) { 2667 if (upper_dev->ifindex == ifindex) 2668 return 0; 2669 ops = upper_dev->netdev_ops; 2670 if (ops->ndo_del_slave) { 2671 err = ops->ndo_del_slave(upper_dev, dev); 2672 if (err) 2673 return err; 2674 } else { 2675 return -EOPNOTSUPP; 2676 } 2677 } 2678 2679 if (ifindex) { 2680 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2681 if (!upper_dev) 2682 return -EINVAL; 2683 ops = upper_dev->netdev_ops; 2684 if (ops->ndo_add_slave) { 2685 err = ops->ndo_add_slave(upper_dev, dev, extack); 2686 if (err) 2687 return err; 2688 } else { 2689 return -EOPNOTSUPP; 2690 } 2691 } 2692 return 0; 2693 } 2694 2695 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { 2696 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, 2697 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, 2698 }; 2699 2700 static int do_set_proto_down(struct net_device *dev, 2701 struct nlattr *nl_proto_down, 2702 struct nlattr *nl_proto_down_reason, 2703 struct netlink_ext_ack *extack) 2704 { 2705 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; 2706 unsigned long mask = 0; 2707 u32 value; 2708 bool proto_down; 2709 int err; 2710 2711 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) { 2712 NL_SET_ERR_MSG(extack, "Protodown not supported by device"); 2713 return -EOPNOTSUPP; 2714 } 2715 2716 if (nl_proto_down_reason) { 2717 err = nla_parse_nested_deprecated(pdreason, 2718 IFLA_PROTO_DOWN_REASON_MAX, 2719 nl_proto_down_reason, 2720 ifla_proto_down_reason_policy, 2721 NULL); 2722 if (err < 0) 2723 return err; 2724 2725 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { 2726 NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); 2727 return -EINVAL; 2728 } 2729 2730 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); 2731 2732 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) 2733 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); 2734 2735 dev_change_proto_down_reason(dev, mask, value); 2736 } 2737 2738 if (nl_proto_down) { 2739 proto_down = nla_get_u8(nl_proto_down); 2740 2741 /* Don't turn off protodown if there are active reasons */ 2742 if (!proto_down && dev->proto_down_reason) { 2743 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); 2744 return -EBUSY; 2745 } 2746 err = dev_change_proto_down(dev, 2747 proto_down); 2748 if (err) 2749 return err; 2750 } 2751 2752 return 0; 2753 } 2754 2755 #define DO_SETLINK_MODIFIED 0x01 2756 /* notify flag means notify + modified. */ 2757 #define DO_SETLINK_NOTIFY 0x03 2758 static int do_setlink(const struct sk_buff *skb, 2759 struct net_device *dev, struct ifinfomsg *ifm, 2760 struct netlink_ext_ack *extack, 2761 struct nlattr **tb, int status) 2762 { 2763 const struct net_device_ops *ops = dev->netdev_ops; 2764 char ifname[IFNAMSIZ]; 2765 int err; 2766 2767 if (tb[IFLA_IFNAME]) 2768 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2769 else 2770 ifname[0] = '\0'; 2771 2772 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2773 const char *pat = ifname[0] ? ifname : NULL; 2774 struct net *net; 2775 int new_ifindex; 2776 2777 net = rtnl_link_get_net_capable(skb, dev_net(dev), 2778 tb, CAP_NET_ADMIN); 2779 if (IS_ERR(net)) { 2780 err = PTR_ERR(net); 2781 goto errout; 2782 } 2783 2784 if (tb[IFLA_NEW_IFINDEX]) 2785 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); 2786 else 2787 new_ifindex = 0; 2788 2789 err = __dev_change_net_namespace(dev, net, pat, new_ifindex); 2790 put_net(net); 2791 if (err) 2792 goto errout; 2793 status |= DO_SETLINK_MODIFIED; 2794 } 2795 2796 if (tb[IFLA_MAP]) { 2797 struct rtnl_link_ifmap *u_map; 2798 struct ifmap k_map; 2799 2800 if (!ops->ndo_set_config) { 2801 err = -EOPNOTSUPP; 2802 goto errout; 2803 } 2804 2805 if (!netif_device_present(dev)) { 2806 err = -ENODEV; 2807 goto errout; 2808 } 2809 2810 u_map = nla_data(tb[IFLA_MAP]); 2811 k_map.mem_start = (unsigned long) u_map->mem_start; 2812 k_map.mem_end = (unsigned long) u_map->mem_end; 2813 k_map.base_addr = (unsigned short) u_map->base_addr; 2814 k_map.irq = (unsigned char) u_map->irq; 2815 k_map.dma = (unsigned char) u_map->dma; 2816 k_map.port = (unsigned char) u_map->port; 2817 2818 err = ops->ndo_set_config(dev, &k_map); 2819 if (err < 0) 2820 goto errout; 2821 2822 status |= DO_SETLINK_NOTIFY; 2823 } 2824 2825 if (tb[IFLA_ADDRESS]) { 2826 struct sockaddr *sa; 2827 int len; 2828 2829 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2830 sizeof(*sa)); 2831 sa = kmalloc(len, GFP_KERNEL); 2832 if (!sa) { 2833 err = -ENOMEM; 2834 goto errout; 2835 } 2836 sa->sa_family = dev->type; 2837 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2838 dev->addr_len); 2839 err = dev_set_mac_address_user(dev, sa, extack); 2840 kfree(sa); 2841 if (err) 2842 goto errout; 2843 status |= DO_SETLINK_MODIFIED; 2844 } 2845 2846 if (tb[IFLA_MTU]) { 2847 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2848 if (err < 0) 2849 goto errout; 2850 status |= DO_SETLINK_MODIFIED; 2851 } 2852 2853 if (tb[IFLA_GROUP]) { 2854 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2855 status |= DO_SETLINK_NOTIFY; 2856 } 2857 2858 /* 2859 * Interface selected by interface index but interface 2860 * name provided implies that a name change has been 2861 * requested. 2862 */ 2863 if (ifm->ifi_index > 0 && ifname[0]) { 2864 err = dev_change_name(dev, ifname); 2865 if (err < 0) 2866 goto errout; 2867 status |= DO_SETLINK_MODIFIED; 2868 } 2869 2870 if (tb[IFLA_IFALIAS]) { 2871 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2872 nla_len(tb[IFLA_IFALIAS])); 2873 if (err < 0) 2874 goto errout; 2875 status |= DO_SETLINK_NOTIFY; 2876 } 2877 2878 if (tb[IFLA_BROADCAST]) { 2879 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2880 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2881 } 2882 2883 if (ifm->ifi_flags || ifm->ifi_change) { 2884 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2885 extack); 2886 if (err < 0) 2887 goto errout; 2888 } 2889 2890 if (tb[IFLA_MASTER]) { 2891 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2892 if (err) 2893 goto errout; 2894 status |= DO_SETLINK_MODIFIED; 2895 } 2896 2897 if (tb[IFLA_CARRIER]) { 2898 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2899 if (err) 2900 goto errout; 2901 status |= DO_SETLINK_MODIFIED; 2902 } 2903 2904 if (tb[IFLA_TXQLEN]) { 2905 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2906 2907 err = dev_change_tx_queue_len(dev, value); 2908 if (err) 2909 goto errout; 2910 status |= DO_SETLINK_MODIFIED; 2911 } 2912 2913 if (tb[IFLA_GSO_MAX_SIZE]) { 2914 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2915 2916 if (dev->gso_max_size ^ max_size) { 2917 netif_set_gso_max_size(dev, max_size); 2918 status |= DO_SETLINK_MODIFIED; 2919 } 2920 } 2921 2922 if (tb[IFLA_GSO_MAX_SEGS]) { 2923 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2924 2925 if (dev->gso_max_segs ^ max_segs) { 2926 netif_set_gso_max_segs(dev, max_segs); 2927 status |= DO_SETLINK_MODIFIED; 2928 } 2929 } 2930 2931 if (tb[IFLA_GRO_MAX_SIZE]) { 2932 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 2933 2934 if (dev->gro_max_size ^ gro_max_size) { 2935 netif_set_gro_max_size(dev, gro_max_size); 2936 status |= DO_SETLINK_MODIFIED; 2937 } 2938 } 2939 2940 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) { 2941 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]); 2942 2943 if (dev->gso_ipv4_max_size ^ max_size) { 2944 netif_set_gso_ipv4_max_size(dev, max_size); 2945 status |= DO_SETLINK_MODIFIED; 2946 } 2947 } 2948 2949 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) { 2950 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]); 2951 2952 if (dev->gro_ipv4_max_size ^ gro_max_size) { 2953 netif_set_gro_ipv4_max_size(dev, gro_max_size); 2954 status |= DO_SETLINK_MODIFIED; 2955 } 2956 } 2957 2958 if (tb[IFLA_OPERSTATE]) 2959 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2960 2961 if (tb[IFLA_LINKMODE]) { 2962 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2963 2964 if (dev->link_mode ^ value) 2965 status |= DO_SETLINK_NOTIFY; 2966 WRITE_ONCE(dev->link_mode, value); 2967 } 2968 2969 if (tb[IFLA_VFINFO_LIST]) { 2970 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2971 struct nlattr *attr; 2972 int rem; 2973 2974 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2975 if (nla_type(attr) != IFLA_VF_INFO || 2976 nla_len(attr) < NLA_HDRLEN) { 2977 err = -EINVAL; 2978 goto errout; 2979 } 2980 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 2981 attr, 2982 ifla_vf_policy, 2983 NULL); 2984 if (err < 0) 2985 goto errout; 2986 err = do_setvfinfo(dev, vfinfo); 2987 if (err < 0) 2988 goto errout; 2989 status |= DO_SETLINK_NOTIFY; 2990 } 2991 } 2992 err = 0; 2993 2994 if (tb[IFLA_VF_PORTS]) { 2995 struct nlattr *port[IFLA_PORT_MAX+1]; 2996 struct nlattr *attr; 2997 int vf; 2998 int rem; 2999 3000 err = -EOPNOTSUPP; 3001 if (!ops->ndo_set_vf_port) 3002 goto errout; 3003 3004 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 3005 if (nla_type(attr) != IFLA_VF_PORT || 3006 nla_len(attr) < NLA_HDRLEN) { 3007 err = -EINVAL; 3008 goto errout; 3009 } 3010 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3011 attr, 3012 ifla_port_policy, 3013 NULL); 3014 if (err < 0) 3015 goto errout; 3016 if (!port[IFLA_PORT_VF]) { 3017 err = -EOPNOTSUPP; 3018 goto errout; 3019 } 3020 vf = nla_get_u32(port[IFLA_PORT_VF]); 3021 err = ops->ndo_set_vf_port(dev, vf, port); 3022 if (err < 0) 3023 goto errout; 3024 status |= DO_SETLINK_NOTIFY; 3025 } 3026 } 3027 err = 0; 3028 3029 if (tb[IFLA_PORT_SELF]) { 3030 struct nlattr *port[IFLA_PORT_MAX+1]; 3031 3032 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3033 tb[IFLA_PORT_SELF], 3034 ifla_port_policy, NULL); 3035 if (err < 0) 3036 goto errout; 3037 3038 err = -EOPNOTSUPP; 3039 if (ops->ndo_set_vf_port) 3040 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 3041 if (err < 0) 3042 goto errout; 3043 status |= DO_SETLINK_NOTIFY; 3044 } 3045 3046 if (tb[IFLA_AF_SPEC]) { 3047 struct nlattr *af; 3048 int rem; 3049 3050 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 3051 const struct rtnl_af_ops *af_ops; 3052 3053 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 3054 3055 err = af_ops->set_link_af(dev, af, extack); 3056 if (err < 0) 3057 goto errout; 3058 3059 status |= DO_SETLINK_NOTIFY; 3060 } 3061 } 3062 err = 0; 3063 3064 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { 3065 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], 3066 tb[IFLA_PROTO_DOWN_REASON], extack); 3067 if (err) 3068 goto errout; 3069 status |= DO_SETLINK_NOTIFY; 3070 } 3071 3072 if (tb[IFLA_XDP]) { 3073 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 3074 u32 xdp_flags = 0; 3075 3076 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 3077 tb[IFLA_XDP], 3078 ifla_xdp_policy, NULL); 3079 if (err < 0) 3080 goto errout; 3081 3082 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 3083 err = -EINVAL; 3084 goto errout; 3085 } 3086 3087 if (xdp[IFLA_XDP_FLAGS]) { 3088 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 3089 if (xdp_flags & ~XDP_FLAGS_MASK) { 3090 err = -EINVAL; 3091 goto errout; 3092 } 3093 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 3094 err = -EINVAL; 3095 goto errout; 3096 } 3097 } 3098 3099 if (xdp[IFLA_XDP_FD]) { 3100 int expected_fd = -1; 3101 3102 if (xdp_flags & XDP_FLAGS_REPLACE) { 3103 if (!xdp[IFLA_XDP_EXPECTED_FD]) { 3104 err = -EINVAL; 3105 goto errout; 3106 } 3107 expected_fd = 3108 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]); 3109 } 3110 3111 err = dev_change_xdp_fd(dev, extack, 3112 nla_get_s32(xdp[IFLA_XDP_FD]), 3113 expected_fd, 3114 xdp_flags); 3115 if (err) 3116 goto errout; 3117 status |= DO_SETLINK_NOTIFY; 3118 } 3119 } 3120 3121 errout: 3122 if (status & DO_SETLINK_MODIFIED) { 3123 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 3124 netdev_state_change(dev); 3125 3126 if (err < 0) 3127 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 3128 dev->name); 3129 } 3130 3131 return err; 3132 } 3133 3134 static struct net_device *rtnl_dev_get(struct net *net, 3135 struct nlattr *tb[]) 3136 { 3137 char ifname[ALTIFNAMSIZ]; 3138 3139 if (tb[IFLA_IFNAME]) 3140 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3141 else if (tb[IFLA_ALT_IFNAME]) 3142 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ); 3143 else 3144 return NULL; 3145 3146 return __dev_get_by_name(net, ifname); 3147 } 3148 3149 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3150 struct netlink_ext_ack *extack) 3151 { 3152 struct net *net = sock_net(skb->sk); 3153 struct ifinfomsg *ifm; 3154 struct net_device *dev; 3155 int err; 3156 struct nlattr *tb[IFLA_MAX+1]; 3157 3158 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3159 ifla_policy, extack); 3160 if (err < 0) 3161 goto errout; 3162 3163 err = rtnl_ensure_unique_netns(tb, extack, false); 3164 if (err < 0) 3165 goto errout; 3166 3167 err = -EINVAL; 3168 ifm = nlmsg_data(nlh); 3169 if (ifm->ifi_index > 0) 3170 dev = __dev_get_by_index(net, ifm->ifi_index); 3171 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3172 dev = rtnl_dev_get(net, tb); 3173 else 3174 goto errout; 3175 3176 if (dev == NULL) { 3177 err = -ENODEV; 3178 goto errout; 3179 } 3180 3181 err = validate_linkmsg(dev, tb, extack); 3182 if (err < 0) 3183 goto errout; 3184 3185 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3186 errout: 3187 return err; 3188 } 3189 3190 static int rtnl_group_dellink(const struct net *net, int group) 3191 { 3192 struct net_device *dev, *aux; 3193 LIST_HEAD(list_kill); 3194 bool found = false; 3195 3196 if (!group) 3197 return -EPERM; 3198 3199 for_each_netdev(net, dev) { 3200 if (dev->group == group) { 3201 const struct rtnl_link_ops *ops; 3202 3203 found = true; 3204 ops = dev->rtnl_link_ops; 3205 if (!ops || !ops->dellink) 3206 return -EOPNOTSUPP; 3207 } 3208 } 3209 3210 if (!found) 3211 return -ENODEV; 3212 3213 for_each_netdev_safe(net, dev, aux) { 3214 if (dev->group == group) { 3215 const struct rtnl_link_ops *ops; 3216 3217 ops = dev->rtnl_link_ops; 3218 ops->dellink(dev, &list_kill); 3219 } 3220 } 3221 unregister_netdevice_many(&list_kill); 3222 3223 return 0; 3224 } 3225 3226 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh) 3227 { 3228 const struct rtnl_link_ops *ops; 3229 LIST_HEAD(list_kill); 3230 3231 ops = dev->rtnl_link_ops; 3232 if (!ops || !ops->dellink) 3233 return -EOPNOTSUPP; 3234 3235 ops->dellink(dev, &list_kill); 3236 unregister_netdevice_many_notify(&list_kill, portid, nlh); 3237 3238 return 0; 3239 } 3240 EXPORT_SYMBOL_GPL(rtnl_delete_link); 3241 3242 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 3243 struct netlink_ext_ack *extack) 3244 { 3245 struct net *net = sock_net(skb->sk); 3246 u32 portid = NETLINK_CB(skb).portid; 3247 struct net *tgt_net = net; 3248 struct net_device *dev = NULL; 3249 struct ifinfomsg *ifm; 3250 struct nlattr *tb[IFLA_MAX+1]; 3251 int err; 3252 int netnsid = -1; 3253 3254 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3255 ifla_policy, extack); 3256 if (err < 0) 3257 return err; 3258 3259 err = rtnl_ensure_unique_netns(tb, extack, true); 3260 if (err < 0) 3261 return err; 3262 3263 if (tb[IFLA_TARGET_NETNSID]) { 3264 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3265 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3266 if (IS_ERR(tgt_net)) 3267 return PTR_ERR(tgt_net); 3268 } 3269 3270 err = -EINVAL; 3271 ifm = nlmsg_data(nlh); 3272 if (ifm->ifi_index > 0) 3273 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3274 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3275 dev = rtnl_dev_get(net, tb); 3276 else if (tb[IFLA_GROUP]) 3277 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3278 else 3279 goto out; 3280 3281 if (!dev) { 3282 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0) 3283 err = -ENODEV; 3284 3285 goto out; 3286 } 3287 3288 err = rtnl_delete_link(dev, portid, nlh); 3289 3290 out: 3291 if (netnsid >= 0) 3292 put_net(tgt_net); 3293 3294 return err; 3295 } 3296 3297 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, 3298 u32 portid, const struct nlmsghdr *nlh) 3299 { 3300 unsigned int old_flags; 3301 int err; 3302 3303 old_flags = dev->flags; 3304 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 3305 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3306 NULL); 3307 if (err < 0) 3308 return err; 3309 } 3310 3311 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3312 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh); 3313 } else { 3314 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3315 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh); 3316 } 3317 return 0; 3318 } 3319 EXPORT_SYMBOL(rtnl_configure_link); 3320 3321 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3322 unsigned char name_assign_type, 3323 const struct rtnl_link_ops *ops, 3324 struct nlattr *tb[], 3325 struct netlink_ext_ack *extack) 3326 { 3327 struct net_device *dev; 3328 unsigned int num_tx_queues = 1; 3329 unsigned int num_rx_queues = 1; 3330 int err; 3331 3332 if (tb[IFLA_NUM_TX_QUEUES]) 3333 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3334 else if (ops->get_num_tx_queues) 3335 num_tx_queues = ops->get_num_tx_queues(); 3336 3337 if (tb[IFLA_NUM_RX_QUEUES]) 3338 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3339 else if (ops->get_num_rx_queues) 3340 num_rx_queues = ops->get_num_rx_queues(); 3341 3342 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3343 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3344 return ERR_PTR(-EINVAL); 3345 } 3346 3347 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3348 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3349 return ERR_PTR(-EINVAL); 3350 } 3351 3352 if (ops->alloc) { 3353 dev = ops->alloc(tb, ifname, name_assign_type, 3354 num_tx_queues, num_rx_queues); 3355 if (IS_ERR(dev)) 3356 return dev; 3357 } else { 3358 dev = alloc_netdev_mqs(ops->priv_size, ifname, 3359 name_assign_type, ops->setup, 3360 num_tx_queues, num_rx_queues); 3361 } 3362 3363 if (!dev) 3364 return ERR_PTR(-ENOMEM); 3365 3366 err = validate_linkmsg(dev, tb, extack); 3367 if (err < 0) { 3368 free_netdev(dev); 3369 return ERR_PTR(err); 3370 } 3371 3372 dev_net_set(dev, net); 3373 dev->rtnl_link_ops = ops; 3374 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3375 3376 if (tb[IFLA_MTU]) { 3377 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3378 3379 err = dev_validate_mtu(dev, mtu, extack); 3380 if (err) { 3381 free_netdev(dev); 3382 return ERR_PTR(err); 3383 } 3384 dev->mtu = mtu; 3385 } 3386 if (tb[IFLA_ADDRESS]) { 3387 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), 3388 nla_len(tb[IFLA_ADDRESS])); 3389 dev->addr_assign_type = NET_ADDR_SET; 3390 } 3391 if (tb[IFLA_BROADCAST]) 3392 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3393 nla_len(tb[IFLA_BROADCAST])); 3394 if (tb[IFLA_TXQLEN]) 3395 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3396 if (tb[IFLA_OPERSTATE]) 3397 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3398 if (tb[IFLA_LINKMODE]) 3399 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3400 if (tb[IFLA_GROUP]) 3401 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3402 if (tb[IFLA_GSO_MAX_SIZE]) 3403 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3404 if (tb[IFLA_GSO_MAX_SEGS]) 3405 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); 3406 if (tb[IFLA_GRO_MAX_SIZE]) 3407 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE])); 3408 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) 3409 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE])); 3410 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) 3411 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE])); 3412 3413 return dev; 3414 } 3415 EXPORT_SYMBOL(rtnl_create_link); 3416 3417 static int rtnl_group_changelink(const struct sk_buff *skb, 3418 struct net *net, int group, 3419 struct ifinfomsg *ifm, 3420 struct netlink_ext_ack *extack, 3421 struct nlattr **tb) 3422 { 3423 struct net_device *dev, *aux; 3424 int err; 3425 3426 for_each_netdev_safe(net, dev, aux) { 3427 if (dev->group == group) { 3428 err = validate_linkmsg(dev, tb, extack); 3429 if (err < 0) 3430 return err; 3431 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3432 if (err < 0) 3433 return err; 3434 } 3435 } 3436 3437 return 0; 3438 } 3439 3440 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, 3441 const struct rtnl_link_ops *ops, 3442 const struct nlmsghdr *nlh, 3443 struct nlattr **tb, struct nlattr **data, 3444 struct netlink_ext_ack *extack) 3445 { 3446 unsigned char name_assign_type = NET_NAME_USER; 3447 struct net *net = sock_net(skb->sk); 3448 u32 portid = NETLINK_CB(skb).portid; 3449 struct net *dest_net, *link_net; 3450 struct net_device *dev; 3451 char ifname[IFNAMSIZ]; 3452 int err; 3453 3454 if (!ops->alloc && !ops->setup) 3455 return -EOPNOTSUPP; 3456 3457 if (tb[IFLA_IFNAME]) { 3458 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3459 } else { 3460 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3461 name_assign_type = NET_NAME_ENUM; 3462 } 3463 3464 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3465 if (IS_ERR(dest_net)) 3466 return PTR_ERR(dest_net); 3467 3468 if (tb[IFLA_LINK_NETNSID]) { 3469 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3470 3471 link_net = get_net_ns_by_id(dest_net, id); 3472 if (!link_net) { 3473 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3474 err = -EINVAL; 3475 goto out; 3476 } 3477 err = -EPERM; 3478 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3479 goto out; 3480 } else { 3481 link_net = NULL; 3482 } 3483 3484 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3485 name_assign_type, ops, tb, extack); 3486 if (IS_ERR(dev)) { 3487 err = PTR_ERR(dev); 3488 goto out; 3489 } 3490 3491 dev->ifindex = ifm->ifi_index; 3492 3493 if (ops->newlink) 3494 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3495 else 3496 err = register_netdevice(dev); 3497 if (err < 0) { 3498 free_netdev(dev); 3499 goto out; 3500 } 3501 3502 err = rtnl_configure_link(dev, ifm, portid, nlh); 3503 if (err < 0) 3504 goto out_unregister; 3505 if (link_net) { 3506 err = dev_change_net_namespace(dev, dest_net, ifname); 3507 if (err < 0) 3508 goto out_unregister; 3509 } 3510 if (tb[IFLA_MASTER]) { 3511 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3512 if (err) 3513 goto out_unregister; 3514 } 3515 out: 3516 if (link_net) 3517 put_net(link_net); 3518 put_net(dest_net); 3519 return err; 3520 out_unregister: 3521 if (ops->newlink) { 3522 LIST_HEAD(list_kill); 3523 3524 ops->dellink(dev, &list_kill); 3525 unregister_netdevice_many(&list_kill); 3526 } else { 3527 unregister_netdevice(dev); 3528 } 3529 goto out; 3530 } 3531 3532 struct rtnl_newlink_tbs { 3533 struct nlattr *tb[IFLA_MAX + 1]; 3534 struct nlattr *attr[RTNL_MAX_TYPE + 1]; 3535 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3536 }; 3537 3538 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3539 struct rtnl_newlink_tbs *tbs, 3540 struct netlink_ext_ack *extack) 3541 { 3542 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3543 struct nlattr ** const tb = tbs->tb; 3544 const struct rtnl_link_ops *m_ops; 3545 struct net_device *master_dev; 3546 struct net *net = sock_net(skb->sk); 3547 const struct rtnl_link_ops *ops; 3548 struct nlattr **slave_data; 3549 char kind[MODULE_NAME_LEN]; 3550 struct net_device *dev; 3551 struct ifinfomsg *ifm; 3552 struct nlattr **data; 3553 bool link_specified; 3554 int err; 3555 3556 #ifdef CONFIG_MODULES 3557 replay: 3558 #endif 3559 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3560 ifla_policy, extack); 3561 if (err < 0) 3562 return err; 3563 3564 err = rtnl_ensure_unique_netns(tb, extack, false); 3565 if (err < 0) 3566 return err; 3567 3568 ifm = nlmsg_data(nlh); 3569 if (ifm->ifi_index > 0) { 3570 link_specified = true; 3571 dev = __dev_get_by_index(net, ifm->ifi_index); 3572 } else if (ifm->ifi_index < 0) { 3573 NL_SET_ERR_MSG(extack, "ifindex can't be negative"); 3574 return -EINVAL; 3575 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { 3576 link_specified = true; 3577 dev = rtnl_dev_get(net, tb); 3578 } else { 3579 link_specified = false; 3580 dev = NULL; 3581 } 3582 3583 master_dev = NULL; 3584 m_ops = NULL; 3585 if (dev) { 3586 master_dev = netdev_master_upper_dev_get(dev); 3587 if (master_dev) 3588 m_ops = master_dev->rtnl_link_ops; 3589 } 3590 3591 if (tb[IFLA_LINKINFO]) { 3592 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3593 tb[IFLA_LINKINFO], 3594 ifla_info_policy, NULL); 3595 if (err < 0) 3596 return err; 3597 } else 3598 memset(linkinfo, 0, sizeof(linkinfo)); 3599 3600 if (linkinfo[IFLA_INFO_KIND]) { 3601 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3602 ops = rtnl_link_ops_get(kind); 3603 } else { 3604 kind[0] = '\0'; 3605 ops = NULL; 3606 } 3607 3608 data = NULL; 3609 if (ops) { 3610 if (ops->maxtype > RTNL_MAX_TYPE) 3611 return -EINVAL; 3612 3613 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3614 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, 3615 linkinfo[IFLA_INFO_DATA], 3616 ops->policy, extack); 3617 if (err < 0) 3618 return err; 3619 data = tbs->attr; 3620 } 3621 if (ops->validate) { 3622 err = ops->validate(tb, data, extack); 3623 if (err < 0) 3624 return err; 3625 } 3626 } 3627 3628 slave_data = NULL; 3629 if (m_ops) { 3630 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3631 return -EINVAL; 3632 3633 if (m_ops->slave_maxtype && 3634 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3635 err = nla_parse_nested_deprecated(tbs->slave_attr, 3636 m_ops->slave_maxtype, 3637 linkinfo[IFLA_INFO_SLAVE_DATA], 3638 m_ops->slave_policy, 3639 extack); 3640 if (err < 0) 3641 return err; 3642 slave_data = tbs->slave_attr; 3643 } 3644 } 3645 3646 if (dev) { 3647 int status = 0; 3648 3649 if (nlh->nlmsg_flags & NLM_F_EXCL) 3650 return -EEXIST; 3651 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3652 return -EOPNOTSUPP; 3653 3654 err = validate_linkmsg(dev, tb, extack); 3655 if (err < 0) 3656 return err; 3657 3658 if (linkinfo[IFLA_INFO_DATA]) { 3659 if (!ops || ops != dev->rtnl_link_ops || 3660 !ops->changelink) 3661 return -EOPNOTSUPP; 3662 3663 err = ops->changelink(dev, tb, data, extack); 3664 if (err < 0) 3665 return err; 3666 status |= DO_SETLINK_NOTIFY; 3667 } 3668 3669 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3670 if (!m_ops || !m_ops->slave_changelink) 3671 return -EOPNOTSUPP; 3672 3673 err = m_ops->slave_changelink(master_dev, dev, tb, 3674 slave_data, extack); 3675 if (err < 0) 3676 return err; 3677 status |= DO_SETLINK_NOTIFY; 3678 } 3679 3680 return do_setlink(skb, dev, ifm, extack, tb, status); 3681 } 3682 3683 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3684 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, 3685 * or it's for a group 3686 */ 3687 if (link_specified) 3688 return -ENODEV; 3689 if (tb[IFLA_GROUP]) 3690 return rtnl_group_changelink(skb, net, 3691 nla_get_u32(tb[IFLA_GROUP]), 3692 ifm, extack, tb); 3693 return -ENODEV; 3694 } 3695 3696 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3697 return -EOPNOTSUPP; 3698 3699 if (!ops) { 3700 #ifdef CONFIG_MODULES 3701 if (kind[0]) { 3702 __rtnl_unlock(); 3703 request_module("rtnl-link-%s", kind); 3704 rtnl_lock(); 3705 ops = rtnl_link_ops_get(kind); 3706 if (ops) 3707 goto replay; 3708 } 3709 #endif 3710 NL_SET_ERR_MSG(extack, "Unknown device type"); 3711 return -EOPNOTSUPP; 3712 } 3713 3714 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack); 3715 } 3716 3717 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3718 struct netlink_ext_ack *extack) 3719 { 3720 struct rtnl_newlink_tbs *tbs; 3721 int ret; 3722 3723 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); 3724 if (!tbs) 3725 return -ENOMEM; 3726 3727 ret = __rtnl_newlink(skb, nlh, tbs, extack); 3728 kfree(tbs); 3729 return ret; 3730 } 3731 3732 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3733 const struct nlmsghdr *nlh, 3734 struct nlattr **tb, 3735 struct netlink_ext_ack *extack) 3736 { 3737 struct ifinfomsg *ifm; 3738 int i, err; 3739 3740 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3741 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3742 return -EINVAL; 3743 } 3744 3745 if (!netlink_strict_get_check(skb)) 3746 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3747 ifla_policy, extack); 3748 3749 ifm = nlmsg_data(nlh); 3750 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3751 ifm->ifi_change) { 3752 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3753 return -EINVAL; 3754 } 3755 3756 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3757 ifla_policy, extack); 3758 if (err) 3759 return err; 3760 3761 for (i = 0; i <= IFLA_MAX; i++) { 3762 if (!tb[i]) 3763 continue; 3764 3765 switch (i) { 3766 case IFLA_IFNAME: 3767 case IFLA_ALT_IFNAME: 3768 case IFLA_EXT_MASK: 3769 case IFLA_TARGET_NETNSID: 3770 break; 3771 default: 3772 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3773 return -EINVAL; 3774 } 3775 } 3776 3777 return 0; 3778 } 3779 3780 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3781 struct netlink_ext_ack *extack) 3782 { 3783 struct net *net = sock_net(skb->sk); 3784 struct net *tgt_net = net; 3785 struct ifinfomsg *ifm; 3786 struct nlattr *tb[IFLA_MAX+1]; 3787 struct net_device *dev = NULL; 3788 struct sk_buff *nskb; 3789 int netnsid = -1; 3790 int err; 3791 u32 ext_filter_mask = 0; 3792 3793 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3794 if (err < 0) 3795 return err; 3796 3797 err = rtnl_ensure_unique_netns(tb, extack, true); 3798 if (err < 0) 3799 return err; 3800 3801 if (tb[IFLA_TARGET_NETNSID]) { 3802 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3803 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3804 if (IS_ERR(tgt_net)) 3805 return PTR_ERR(tgt_net); 3806 } 3807 3808 if (tb[IFLA_EXT_MASK]) 3809 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3810 3811 err = -EINVAL; 3812 ifm = nlmsg_data(nlh); 3813 if (ifm->ifi_index > 0) 3814 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3815 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3816 dev = rtnl_dev_get(tgt_net, tb); 3817 else 3818 goto out; 3819 3820 err = -ENODEV; 3821 if (dev == NULL) 3822 goto out; 3823 3824 err = -ENOBUFS; 3825 nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask)); 3826 if (nskb == NULL) 3827 goto out; 3828 3829 /* Synchronize the carrier state so we don't report a state 3830 * that we're not actually going to honour immediately; if 3831 * the driver just did a carrier off->on transition, we can 3832 * only TX if link watch work has run, but without this we'd 3833 * already report carrier on, even if it doesn't work yet. 3834 */ 3835 linkwatch_sync_dev(dev); 3836 3837 err = rtnl_fill_ifinfo(nskb, dev, net, 3838 RTM_NEWLINK, NETLINK_CB(skb).portid, 3839 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3840 0, NULL, 0, netnsid, GFP_KERNEL); 3841 if (err < 0) { 3842 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3843 WARN_ON(err == -EMSGSIZE); 3844 kfree_skb(nskb); 3845 } else 3846 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3847 out: 3848 if (netnsid >= 0) 3849 put_net(tgt_net); 3850 3851 return err; 3852 } 3853 3854 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3855 bool *changed, struct netlink_ext_ack *extack) 3856 { 3857 char *alt_ifname; 3858 size_t size; 3859 int err; 3860 3861 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3862 if (err) 3863 return err; 3864 3865 if (cmd == RTM_NEWLINKPROP) { 3866 size = rtnl_prop_list_size(dev); 3867 size += nla_total_size(ALTIFNAMSIZ); 3868 if (size >= U16_MAX) { 3869 NL_SET_ERR_MSG(extack, 3870 "effective property list too long"); 3871 return -EINVAL; 3872 } 3873 } 3874 3875 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 3876 if (!alt_ifname) 3877 return -ENOMEM; 3878 3879 if (cmd == RTM_NEWLINKPROP) { 3880 err = netdev_name_node_alt_create(dev, alt_ifname); 3881 if (!err) 3882 alt_ifname = NULL; 3883 } else if (cmd == RTM_DELLINKPROP) { 3884 err = netdev_name_node_alt_destroy(dev, alt_ifname); 3885 } else { 3886 WARN_ON_ONCE(1); 3887 err = -EINVAL; 3888 } 3889 3890 kfree(alt_ifname); 3891 if (!err) 3892 *changed = true; 3893 return err; 3894 } 3895 3896 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 3897 struct netlink_ext_ack *extack) 3898 { 3899 struct net *net = sock_net(skb->sk); 3900 struct nlattr *tb[IFLA_MAX + 1]; 3901 struct net_device *dev; 3902 struct ifinfomsg *ifm; 3903 bool changed = false; 3904 struct nlattr *attr; 3905 int err, rem; 3906 3907 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3908 if (err) 3909 return err; 3910 3911 err = rtnl_ensure_unique_netns(tb, extack, true); 3912 if (err) 3913 return err; 3914 3915 ifm = nlmsg_data(nlh); 3916 if (ifm->ifi_index > 0) 3917 dev = __dev_get_by_index(net, ifm->ifi_index); 3918 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3919 dev = rtnl_dev_get(net, tb); 3920 else 3921 return -EINVAL; 3922 3923 if (!dev) 3924 return -ENODEV; 3925 3926 if (!tb[IFLA_PROP_LIST]) 3927 return 0; 3928 3929 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 3930 switch (nla_type(attr)) { 3931 case IFLA_ALT_IFNAME: 3932 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 3933 if (err) 3934 return err; 3935 break; 3936 } 3937 } 3938 3939 if (changed) 3940 netdev_state_change(dev); 3941 return 0; 3942 } 3943 3944 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3945 struct netlink_ext_ack *extack) 3946 { 3947 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 3948 } 3949 3950 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3951 struct netlink_ext_ack *extack) 3952 { 3953 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 3954 } 3955 3956 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3957 { 3958 struct net *net = sock_net(skb->sk); 3959 size_t min_ifinfo_dump_size = 0; 3960 struct nlattr *tb[IFLA_MAX+1]; 3961 u32 ext_filter_mask = 0; 3962 struct net_device *dev; 3963 int hdrlen; 3964 3965 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3966 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3967 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3968 3969 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3970 if (tb[IFLA_EXT_MASK]) 3971 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3972 } 3973 3974 if (!ext_filter_mask) 3975 return NLMSG_GOODSIZE; 3976 /* 3977 * traverse the list of net devices and compute the minimum 3978 * buffer size based upon the filter mask. 3979 */ 3980 rcu_read_lock(); 3981 for_each_netdev_rcu(net, dev) { 3982 min_ifinfo_dump_size = max(min_ifinfo_dump_size, 3983 if_nlmsg_size(dev, ext_filter_mask)); 3984 } 3985 rcu_read_unlock(); 3986 3987 return nlmsg_total_size(min_ifinfo_dump_size); 3988 } 3989 3990 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 3991 { 3992 int idx; 3993 int s_idx = cb->family; 3994 int type = cb->nlh->nlmsg_type - RTM_BASE; 3995 int ret = 0; 3996 3997 if (s_idx == 0) 3998 s_idx = 1; 3999 4000 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 4001 struct rtnl_link __rcu **tab; 4002 struct rtnl_link *link; 4003 rtnl_dumpit_func dumpit; 4004 4005 if (idx < s_idx || idx == PF_PACKET) 4006 continue; 4007 4008 if (type < 0 || type >= RTM_NR_MSGTYPES) 4009 continue; 4010 4011 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 4012 if (!tab) 4013 continue; 4014 4015 link = rcu_dereference_rtnl(tab[type]); 4016 if (!link) 4017 continue; 4018 4019 dumpit = link->dumpit; 4020 if (!dumpit) 4021 continue; 4022 4023 if (idx > s_idx) { 4024 memset(&cb->args[0], 0, sizeof(cb->args)); 4025 cb->prev_seq = 0; 4026 cb->seq = 0; 4027 } 4028 ret = dumpit(skb, cb); 4029 if (ret) 4030 break; 4031 } 4032 cb->family = idx; 4033 4034 return skb->len ? : ret; 4035 } 4036 4037 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 4038 unsigned int change, 4039 u32 event, gfp_t flags, int *new_nsid, 4040 int new_ifindex, u32 portid, 4041 const struct nlmsghdr *nlh) 4042 { 4043 struct net *net = dev_net(dev); 4044 struct sk_buff *skb; 4045 int err = -ENOBUFS; 4046 u32 seq = 0; 4047 4048 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); 4049 if (skb == NULL) 4050 goto errout; 4051 4052 if (nlmsg_report(nlh)) 4053 seq = nlmsg_seq(nlh); 4054 else 4055 portid = 0; 4056 4057 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 4058 type, portid, seq, change, 0, 0, event, 4059 new_nsid, new_ifindex, -1, flags); 4060 if (err < 0) { 4061 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 4062 WARN_ON(err == -EMSGSIZE); 4063 kfree_skb(skb); 4064 goto errout; 4065 } 4066 return skb; 4067 errout: 4068 if (err < 0) 4069 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4070 return NULL; 4071 } 4072 4073 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags, 4074 u32 portid, const struct nlmsghdr *nlh) 4075 { 4076 struct net *net = dev_net(dev); 4077 4078 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags); 4079 } 4080 4081 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 4082 unsigned int change, u32 event, 4083 gfp_t flags, int *new_nsid, int new_ifindex, 4084 u32 portid, const struct nlmsghdr *nlh) 4085 { 4086 struct sk_buff *skb; 4087 4088 if (dev->reg_state != NETREG_REGISTERED) 4089 return; 4090 4091 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 4092 new_ifindex, portid, nlh); 4093 if (skb) 4094 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh); 4095 } 4096 4097 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 4098 gfp_t flags, u32 portid, const struct nlmsghdr *nlh) 4099 { 4100 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4101 NULL, 0, portid, nlh); 4102 } 4103 4104 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 4105 gfp_t flags, int *new_nsid, int new_ifindex) 4106 { 4107 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4108 new_nsid, new_ifindex, 0, NULL); 4109 } 4110 4111 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 4112 struct net_device *dev, 4113 u8 *addr, u16 vid, u32 pid, u32 seq, 4114 int type, unsigned int flags, 4115 int nlflags, u16 ndm_state) 4116 { 4117 struct nlmsghdr *nlh; 4118 struct ndmsg *ndm; 4119 4120 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 4121 if (!nlh) 4122 return -EMSGSIZE; 4123 4124 ndm = nlmsg_data(nlh); 4125 ndm->ndm_family = AF_BRIDGE; 4126 ndm->ndm_pad1 = 0; 4127 ndm->ndm_pad2 = 0; 4128 ndm->ndm_flags = flags; 4129 ndm->ndm_type = 0; 4130 ndm->ndm_ifindex = dev->ifindex; 4131 ndm->ndm_state = ndm_state; 4132 4133 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr)) 4134 goto nla_put_failure; 4135 if (vid) 4136 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 4137 goto nla_put_failure; 4138 4139 nlmsg_end(skb, nlh); 4140 return 0; 4141 4142 nla_put_failure: 4143 nlmsg_cancel(skb, nlh); 4144 return -EMSGSIZE; 4145 } 4146 4147 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev) 4148 { 4149 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 4150 nla_total_size(dev->addr_len) + /* NDA_LLADDR */ 4151 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 4152 0; 4153 } 4154 4155 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 4156 u16 ndm_state) 4157 { 4158 struct net *net = dev_net(dev); 4159 struct sk_buff *skb; 4160 int err = -ENOBUFS; 4161 4162 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC); 4163 if (!skb) 4164 goto errout; 4165 4166 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 4167 0, 0, type, NTF_SELF, 0, ndm_state); 4168 if (err < 0) { 4169 kfree_skb(skb); 4170 goto errout; 4171 } 4172 4173 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 4174 return; 4175 errout: 4176 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 4177 } 4178 4179 /* 4180 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 4181 */ 4182 int ndo_dflt_fdb_add(struct ndmsg *ndm, 4183 struct nlattr *tb[], 4184 struct net_device *dev, 4185 const unsigned char *addr, u16 vid, 4186 u16 flags) 4187 { 4188 int err = -EINVAL; 4189 4190 /* If aging addresses are supported device will need to 4191 * implement its own handler for this. 4192 */ 4193 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 4194 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4195 return err; 4196 } 4197 4198 if (tb[NDA_FLAGS_EXT]) { 4199 netdev_info(dev, "invalid flags given to default FDB implementation\n"); 4200 return err; 4201 } 4202 4203 if (vid) { 4204 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n"); 4205 return err; 4206 } 4207 4208 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4209 err = dev_uc_add_excl(dev, addr); 4210 else if (is_multicast_ether_addr(addr)) 4211 err = dev_mc_add_excl(dev, addr); 4212 4213 /* Only return duplicate errors if NLM_F_EXCL is set */ 4214 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 4215 err = 0; 4216 4217 return err; 4218 } 4219 EXPORT_SYMBOL(ndo_dflt_fdb_add); 4220 4221 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 4222 struct netlink_ext_ack *extack) 4223 { 4224 u16 vid = 0; 4225 4226 if (vlan_attr) { 4227 if (nla_len(vlan_attr) != sizeof(u16)) { 4228 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 4229 return -EINVAL; 4230 } 4231 4232 vid = nla_get_u16(vlan_attr); 4233 4234 if (!vid || vid >= VLAN_VID_MASK) { 4235 NL_SET_ERR_MSG(extack, "invalid vlan id"); 4236 return -EINVAL; 4237 } 4238 } 4239 *p_vid = vid; 4240 return 0; 4241 } 4242 4243 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 4244 struct netlink_ext_ack *extack) 4245 { 4246 struct net *net = sock_net(skb->sk); 4247 struct ndmsg *ndm; 4248 struct nlattr *tb[NDA_MAX+1]; 4249 struct net_device *dev; 4250 u8 *addr; 4251 u16 vid; 4252 int err; 4253 4254 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 4255 extack); 4256 if (err < 0) 4257 return err; 4258 4259 ndm = nlmsg_data(nlh); 4260 if (ndm->ndm_ifindex == 0) { 4261 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4262 return -EINVAL; 4263 } 4264 4265 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4266 if (dev == NULL) { 4267 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4268 return -ENODEV; 4269 } 4270 4271 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4272 NL_SET_ERR_MSG(extack, "invalid address"); 4273 return -EINVAL; 4274 } 4275 4276 if (dev->type != ARPHRD_ETHER) { 4277 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 4278 return -EINVAL; 4279 } 4280 4281 addr = nla_data(tb[NDA_LLADDR]); 4282 4283 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4284 if (err) 4285 return err; 4286 4287 err = -EOPNOTSUPP; 4288 4289 /* Support fdb on master device the net/bridge default case */ 4290 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4291 netif_is_bridge_port(dev)) { 4292 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4293 const struct net_device_ops *ops = br_dev->netdev_ops; 4294 4295 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 4296 nlh->nlmsg_flags, extack); 4297 if (err) 4298 goto out; 4299 else 4300 ndm->ndm_flags &= ~NTF_MASTER; 4301 } 4302 4303 /* Embedded bridge, macvlan, and any other device support */ 4304 if ((ndm->ndm_flags & NTF_SELF)) { 4305 if (dev->netdev_ops->ndo_fdb_add) 4306 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 4307 vid, 4308 nlh->nlmsg_flags, 4309 extack); 4310 else 4311 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 4312 nlh->nlmsg_flags); 4313 4314 if (!err) { 4315 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 4316 ndm->ndm_state); 4317 ndm->ndm_flags &= ~NTF_SELF; 4318 } 4319 } 4320 out: 4321 return err; 4322 } 4323 4324 /* 4325 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 4326 */ 4327 int ndo_dflt_fdb_del(struct ndmsg *ndm, 4328 struct nlattr *tb[], 4329 struct net_device *dev, 4330 const unsigned char *addr, u16 vid) 4331 { 4332 int err = -EINVAL; 4333 4334 /* If aging addresses are supported device will need to 4335 * implement its own handler for this. 4336 */ 4337 if (!(ndm->ndm_state & NUD_PERMANENT)) { 4338 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4339 return err; 4340 } 4341 4342 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4343 err = dev_uc_del(dev, addr); 4344 else if (is_multicast_ether_addr(addr)) 4345 err = dev_mc_del(dev, addr); 4346 4347 return err; 4348 } 4349 EXPORT_SYMBOL(ndo_dflt_fdb_del); 4350 4351 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 4352 struct netlink_ext_ack *extack) 4353 { 4354 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 4355 struct net *net = sock_net(skb->sk); 4356 const struct net_device_ops *ops; 4357 struct ndmsg *ndm; 4358 struct nlattr *tb[NDA_MAX+1]; 4359 struct net_device *dev; 4360 __u8 *addr = NULL; 4361 int err; 4362 u16 vid; 4363 4364 if (!netlink_capable(skb, CAP_NET_ADMIN)) 4365 return -EPERM; 4366 4367 if (!del_bulk) { 4368 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4369 NULL, extack); 4370 } else { 4371 /* For bulk delete, the drivers will parse the message with 4372 * policy. 4373 */ 4374 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); 4375 } 4376 if (err < 0) 4377 return err; 4378 4379 ndm = nlmsg_data(nlh); 4380 if (ndm->ndm_ifindex == 0) { 4381 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4382 return -EINVAL; 4383 } 4384 4385 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4386 if (dev == NULL) { 4387 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4388 return -ENODEV; 4389 } 4390 4391 if (!del_bulk) { 4392 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4393 NL_SET_ERR_MSG(extack, "invalid address"); 4394 return -EINVAL; 4395 } 4396 addr = nla_data(tb[NDA_LLADDR]); 4397 4398 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4399 if (err) 4400 return err; 4401 } 4402 4403 if (dev->type != ARPHRD_ETHER) { 4404 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 4405 return -EINVAL; 4406 } 4407 4408 err = -EOPNOTSUPP; 4409 4410 /* Support fdb on master device the net/bridge default case */ 4411 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4412 netif_is_bridge_port(dev)) { 4413 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4414 4415 ops = br_dev->netdev_ops; 4416 if (!del_bulk) { 4417 if (ops->ndo_fdb_del) 4418 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4419 } else { 4420 if (ops->ndo_fdb_del_bulk) 4421 err = ops->ndo_fdb_del_bulk(nlh, dev, extack); 4422 } 4423 4424 if (err) 4425 goto out; 4426 else 4427 ndm->ndm_flags &= ~NTF_MASTER; 4428 } 4429 4430 /* Embedded bridge, macvlan, and any other device support */ 4431 if (ndm->ndm_flags & NTF_SELF) { 4432 ops = dev->netdev_ops; 4433 if (!del_bulk) { 4434 if (ops->ndo_fdb_del) 4435 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4436 else 4437 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4438 } else { 4439 /* in case err was cleared by NTF_MASTER call */ 4440 err = -EOPNOTSUPP; 4441 if (ops->ndo_fdb_del_bulk) 4442 err = ops->ndo_fdb_del_bulk(nlh, dev, extack); 4443 } 4444 4445 if (!err) { 4446 if (!del_bulk) 4447 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4448 ndm->ndm_state); 4449 ndm->ndm_flags &= ~NTF_SELF; 4450 } 4451 } 4452 out: 4453 return err; 4454 } 4455 4456 static int nlmsg_populate_fdb(struct sk_buff *skb, 4457 struct netlink_callback *cb, 4458 struct net_device *dev, 4459 int *idx, 4460 struct netdev_hw_addr_list *list) 4461 { 4462 struct netdev_hw_addr *ha; 4463 int err; 4464 u32 portid, seq; 4465 4466 portid = NETLINK_CB(cb->skb).portid; 4467 seq = cb->nlh->nlmsg_seq; 4468 4469 list_for_each_entry(ha, &list->list, list) { 4470 if (*idx < cb->args[2]) 4471 goto skip; 4472 4473 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4474 portid, seq, 4475 RTM_NEWNEIGH, NTF_SELF, 4476 NLM_F_MULTI, NUD_PERMANENT); 4477 if (err < 0) 4478 return err; 4479 skip: 4480 *idx += 1; 4481 } 4482 return 0; 4483 } 4484 4485 /** 4486 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4487 * @skb: socket buffer to store message in 4488 * @cb: netlink callback 4489 * @dev: netdevice 4490 * @filter_dev: ignored 4491 * @idx: the number of FDB table entries dumped is added to *@idx 4492 * 4493 * Default netdevice operation to dump the existing unicast address list. 4494 * Returns number of addresses from list put in skb. 4495 */ 4496 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4497 struct netlink_callback *cb, 4498 struct net_device *dev, 4499 struct net_device *filter_dev, 4500 int *idx) 4501 { 4502 int err; 4503 4504 if (dev->type != ARPHRD_ETHER) 4505 return -EINVAL; 4506 4507 netif_addr_lock_bh(dev); 4508 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4509 if (err) 4510 goto out; 4511 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4512 out: 4513 netif_addr_unlock_bh(dev); 4514 return err; 4515 } 4516 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4517 4518 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4519 int *br_idx, int *brport_idx, 4520 struct netlink_ext_ack *extack) 4521 { 4522 struct nlattr *tb[NDA_MAX + 1]; 4523 struct ndmsg *ndm; 4524 int err, i; 4525 4526 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4527 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4528 return -EINVAL; 4529 } 4530 4531 ndm = nlmsg_data(nlh); 4532 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4533 ndm->ndm_flags || ndm->ndm_type) { 4534 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4535 return -EINVAL; 4536 } 4537 4538 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4539 NDA_MAX, NULL, extack); 4540 if (err < 0) 4541 return err; 4542 4543 *brport_idx = ndm->ndm_ifindex; 4544 for (i = 0; i <= NDA_MAX; ++i) { 4545 if (!tb[i]) 4546 continue; 4547 4548 switch (i) { 4549 case NDA_IFINDEX: 4550 if (nla_len(tb[i]) != sizeof(u32)) { 4551 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4552 return -EINVAL; 4553 } 4554 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4555 break; 4556 case NDA_MASTER: 4557 if (nla_len(tb[i]) != sizeof(u32)) { 4558 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4559 return -EINVAL; 4560 } 4561 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4562 break; 4563 default: 4564 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4565 return -EINVAL; 4566 } 4567 } 4568 4569 return 0; 4570 } 4571 4572 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4573 int *br_idx, int *brport_idx, 4574 struct netlink_ext_ack *extack) 4575 { 4576 struct nlattr *tb[IFLA_MAX+1]; 4577 int err; 4578 4579 /* A hack to preserve kernel<->userspace interface. 4580 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4581 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4582 * So, check for ndmsg with an optional u32 attribute (not used here). 4583 * Fortunately these sizes don't conflict with the size of ifinfomsg 4584 * with an optional attribute. 4585 */ 4586 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4587 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4588 nla_attr_size(sizeof(u32)))) { 4589 struct ifinfomsg *ifm; 4590 4591 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4592 tb, IFLA_MAX, ifla_policy, 4593 extack); 4594 if (err < 0) { 4595 return -EINVAL; 4596 } else if (err == 0) { 4597 if (tb[IFLA_MASTER]) 4598 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4599 } 4600 4601 ifm = nlmsg_data(nlh); 4602 *brport_idx = ifm->ifi_index; 4603 } 4604 return 0; 4605 } 4606 4607 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4608 { 4609 struct net_device *dev; 4610 struct net_device *br_dev = NULL; 4611 const struct net_device_ops *ops = NULL; 4612 const struct net_device_ops *cops = NULL; 4613 struct net *net = sock_net(skb->sk); 4614 struct hlist_head *head; 4615 int brport_idx = 0; 4616 int br_idx = 0; 4617 int h, s_h; 4618 int idx = 0, s_idx; 4619 int err = 0; 4620 int fidx = 0; 4621 4622 if (cb->strict_check) 4623 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4624 cb->extack); 4625 else 4626 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4627 cb->extack); 4628 if (err < 0) 4629 return err; 4630 4631 if (br_idx) { 4632 br_dev = __dev_get_by_index(net, br_idx); 4633 if (!br_dev) 4634 return -ENODEV; 4635 4636 ops = br_dev->netdev_ops; 4637 } 4638 4639 s_h = cb->args[0]; 4640 s_idx = cb->args[1]; 4641 4642 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4643 idx = 0; 4644 head = &net->dev_index_head[h]; 4645 hlist_for_each_entry(dev, head, index_hlist) { 4646 4647 if (brport_idx && (dev->ifindex != brport_idx)) 4648 continue; 4649 4650 if (!br_idx) { /* user did not specify a specific bridge */ 4651 if (netif_is_bridge_port(dev)) { 4652 br_dev = netdev_master_upper_dev_get(dev); 4653 cops = br_dev->netdev_ops; 4654 } 4655 } else { 4656 if (dev != br_dev && 4657 !netif_is_bridge_port(dev)) 4658 continue; 4659 4660 if (br_dev != netdev_master_upper_dev_get(dev) && 4661 !netif_is_bridge_master(dev)) 4662 continue; 4663 cops = ops; 4664 } 4665 4666 if (idx < s_idx) 4667 goto cont; 4668 4669 if (netif_is_bridge_port(dev)) { 4670 if (cops && cops->ndo_fdb_dump) { 4671 err = cops->ndo_fdb_dump(skb, cb, 4672 br_dev, dev, 4673 &fidx); 4674 if (err == -EMSGSIZE) 4675 goto out; 4676 } 4677 } 4678 4679 if (dev->netdev_ops->ndo_fdb_dump) 4680 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4681 dev, NULL, 4682 &fidx); 4683 else 4684 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4685 &fidx); 4686 if (err == -EMSGSIZE) 4687 goto out; 4688 4689 cops = NULL; 4690 4691 /* reset fdb offset to 0 for rest of the interfaces */ 4692 cb->args[2] = 0; 4693 fidx = 0; 4694 cont: 4695 idx++; 4696 } 4697 } 4698 4699 out: 4700 cb->args[0] = h; 4701 cb->args[1] = idx; 4702 cb->args[2] = fidx; 4703 4704 return skb->len; 4705 } 4706 4707 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4708 struct nlattr **tb, u8 *ndm_flags, 4709 int *br_idx, int *brport_idx, u8 **addr, 4710 u16 *vid, struct netlink_ext_ack *extack) 4711 { 4712 struct ndmsg *ndm; 4713 int err, i; 4714 4715 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4716 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4717 return -EINVAL; 4718 } 4719 4720 ndm = nlmsg_data(nlh); 4721 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4722 ndm->ndm_type) { 4723 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4724 return -EINVAL; 4725 } 4726 4727 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4728 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4729 return -EINVAL; 4730 } 4731 4732 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4733 NDA_MAX, nda_policy, extack); 4734 if (err < 0) 4735 return err; 4736 4737 *ndm_flags = ndm->ndm_flags; 4738 *brport_idx = ndm->ndm_ifindex; 4739 for (i = 0; i <= NDA_MAX; ++i) { 4740 if (!tb[i]) 4741 continue; 4742 4743 switch (i) { 4744 case NDA_MASTER: 4745 *br_idx = nla_get_u32(tb[i]); 4746 break; 4747 case NDA_LLADDR: 4748 if (nla_len(tb[i]) != ETH_ALEN) { 4749 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4750 return -EINVAL; 4751 } 4752 *addr = nla_data(tb[i]); 4753 break; 4754 case NDA_VLAN: 4755 err = fdb_vid_parse(tb[i], vid, extack); 4756 if (err) 4757 return err; 4758 break; 4759 case NDA_VNI: 4760 break; 4761 default: 4762 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4763 return -EINVAL; 4764 } 4765 } 4766 4767 return 0; 4768 } 4769 4770 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4771 struct netlink_ext_ack *extack) 4772 { 4773 struct net_device *dev = NULL, *br_dev = NULL; 4774 const struct net_device_ops *ops = NULL; 4775 struct net *net = sock_net(in_skb->sk); 4776 struct nlattr *tb[NDA_MAX + 1]; 4777 struct sk_buff *skb; 4778 int brport_idx = 0; 4779 u8 ndm_flags = 0; 4780 int br_idx = 0; 4781 u8 *addr = NULL; 4782 u16 vid = 0; 4783 int err; 4784 4785 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4786 &brport_idx, &addr, &vid, extack); 4787 if (err < 0) 4788 return err; 4789 4790 if (!addr) { 4791 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4792 return -EINVAL; 4793 } 4794 4795 if (brport_idx) { 4796 dev = __dev_get_by_index(net, brport_idx); 4797 if (!dev) { 4798 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4799 return -ENODEV; 4800 } 4801 } 4802 4803 if (br_idx) { 4804 if (dev) { 4805 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4806 return -EINVAL; 4807 } 4808 4809 br_dev = __dev_get_by_index(net, br_idx); 4810 if (!br_dev) { 4811 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4812 return -EINVAL; 4813 } 4814 ops = br_dev->netdev_ops; 4815 } 4816 4817 if (dev) { 4818 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4819 if (!netif_is_bridge_port(dev)) { 4820 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4821 return -EINVAL; 4822 } 4823 br_dev = netdev_master_upper_dev_get(dev); 4824 if (!br_dev) { 4825 NL_SET_ERR_MSG(extack, "Master of device not found"); 4826 return -EINVAL; 4827 } 4828 ops = br_dev->netdev_ops; 4829 } else { 4830 if (!(ndm_flags & NTF_SELF)) { 4831 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4832 return -EINVAL; 4833 } 4834 ops = dev->netdev_ops; 4835 } 4836 } 4837 4838 if (!br_dev && !dev) { 4839 NL_SET_ERR_MSG(extack, "No device specified"); 4840 return -ENODEV; 4841 } 4842 4843 if (!ops || !ops->ndo_fdb_get) { 4844 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4845 return -EOPNOTSUPP; 4846 } 4847 4848 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4849 if (!skb) 4850 return -ENOBUFS; 4851 4852 if (br_dev) 4853 dev = br_dev; 4854 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4855 NETLINK_CB(in_skb).portid, 4856 nlh->nlmsg_seq, extack); 4857 if (err) 4858 goto out; 4859 4860 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4861 out: 4862 kfree_skb(skb); 4863 return err; 4864 } 4865 4866 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4867 unsigned int attrnum, unsigned int flag) 4868 { 4869 if (mask & flag) 4870 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4871 return 0; 4872 } 4873 4874 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4875 struct net_device *dev, u16 mode, 4876 u32 flags, u32 mask, int nlflags, 4877 u32 filter_mask, 4878 int (*vlan_fill)(struct sk_buff *skb, 4879 struct net_device *dev, 4880 u32 filter_mask)) 4881 { 4882 struct nlmsghdr *nlh; 4883 struct ifinfomsg *ifm; 4884 struct nlattr *br_afspec; 4885 struct nlattr *protinfo; 4886 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4887 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4888 int err = 0; 4889 4890 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4891 if (nlh == NULL) 4892 return -EMSGSIZE; 4893 4894 ifm = nlmsg_data(nlh); 4895 ifm->ifi_family = AF_BRIDGE; 4896 ifm->__ifi_pad = 0; 4897 ifm->ifi_type = dev->type; 4898 ifm->ifi_index = dev->ifindex; 4899 ifm->ifi_flags = dev_get_flags(dev); 4900 ifm->ifi_change = 0; 4901 4902 4903 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4904 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4905 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4906 (br_dev && 4907 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4908 (dev->addr_len && 4909 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4910 (dev->ifindex != dev_get_iflink(dev) && 4911 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4912 goto nla_put_failure; 4913 4914 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 4915 if (!br_afspec) 4916 goto nla_put_failure; 4917 4918 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4919 nla_nest_cancel(skb, br_afspec); 4920 goto nla_put_failure; 4921 } 4922 4923 if (mode != BRIDGE_MODE_UNDEF) { 4924 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4925 nla_nest_cancel(skb, br_afspec); 4926 goto nla_put_failure; 4927 } 4928 } 4929 if (vlan_fill) { 4930 err = vlan_fill(skb, dev, filter_mask); 4931 if (err) { 4932 nla_nest_cancel(skb, br_afspec); 4933 goto nla_put_failure; 4934 } 4935 } 4936 nla_nest_end(skb, br_afspec); 4937 4938 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 4939 if (!protinfo) 4940 goto nla_put_failure; 4941 4942 if (brport_nla_put_flag(skb, flags, mask, 4943 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4944 brport_nla_put_flag(skb, flags, mask, 4945 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4946 brport_nla_put_flag(skb, flags, mask, 4947 IFLA_BRPORT_FAST_LEAVE, 4948 BR_MULTICAST_FAST_LEAVE) || 4949 brport_nla_put_flag(skb, flags, mask, 4950 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4951 brport_nla_put_flag(skb, flags, mask, 4952 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4953 brport_nla_put_flag(skb, flags, mask, 4954 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4955 brport_nla_put_flag(skb, flags, mask, 4956 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4957 brport_nla_put_flag(skb, flags, mask, 4958 IFLA_BRPORT_PROXYARP, BR_PROXYARP) || 4959 brport_nla_put_flag(skb, flags, mask, 4960 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || 4961 brport_nla_put_flag(skb, flags, mask, 4962 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { 4963 nla_nest_cancel(skb, protinfo); 4964 goto nla_put_failure; 4965 } 4966 4967 nla_nest_end(skb, protinfo); 4968 4969 nlmsg_end(skb, nlh); 4970 return 0; 4971 nla_put_failure: 4972 nlmsg_cancel(skb, nlh); 4973 return err ? err : -EMSGSIZE; 4974 } 4975 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 4976 4977 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 4978 bool strict_check, u32 *filter_mask, 4979 struct netlink_ext_ack *extack) 4980 { 4981 struct nlattr *tb[IFLA_MAX+1]; 4982 int err, i; 4983 4984 if (strict_check) { 4985 struct ifinfomsg *ifm; 4986 4987 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 4988 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 4989 return -EINVAL; 4990 } 4991 4992 ifm = nlmsg_data(nlh); 4993 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 4994 ifm->ifi_change || ifm->ifi_index) { 4995 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 4996 return -EINVAL; 4997 } 4998 4999 err = nlmsg_parse_deprecated_strict(nlh, 5000 sizeof(struct ifinfomsg), 5001 tb, IFLA_MAX, ifla_policy, 5002 extack); 5003 } else { 5004 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 5005 tb, IFLA_MAX, ifla_policy, 5006 extack); 5007 } 5008 if (err < 0) 5009 return err; 5010 5011 /* new attributes should only be added with strict checking */ 5012 for (i = 0; i <= IFLA_MAX; ++i) { 5013 if (!tb[i]) 5014 continue; 5015 5016 switch (i) { 5017 case IFLA_EXT_MASK: 5018 *filter_mask = nla_get_u32(tb[i]); 5019 break; 5020 default: 5021 if (strict_check) { 5022 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 5023 return -EINVAL; 5024 } 5025 } 5026 } 5027 5028 return 0; 5029 } 5030 5031 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 5032 { 5033 const struct nlmsghdr *nlh = cb->nlh; 5034 struct net *net = sock_net(skb->sk); 5035 struct net_device *dev; 5036 int idx = 0; 5037 u32 portid = NETLINK_CB(cb->skb).portid; 5038 u32 seq = nlh->nlmsg_seq; 5039 u32 filter_mask = 0; 5040 int err; 5041 5042 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 5043 cb->extack); 5044 if (err < 0 && cb->strict_check) 5045 return err; 5046 5047 rcu_read_lock(); 5048 for_each_netdev_rcu(net, dev) { 5049 const struct net_device_ops *ops = dev->netdev_ops; 5050 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5051 5052 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 5053 if (idx >= cb->args[0]) { 5054 err = br_dev->netdev_ops->ndo_bridge_getlink( 5055 skb, portid, seq, dev, 5056 filter_mask, NLM_F_MULTI); 5057 if (err < 0 && err != -EOPNOTSUPP) { 5058 if (likely(skb->len)) 5059 break; 5060 5061 goto out_err; 5062 } 5063 } 5064 idx++; 5065 } 5066 5067 if (ops->ndo_bridge_getlink) { 5068 if (idx >= cb->args[0]) { 5069 err = ops->ndo_bridge_getlink(skb, portid, 5070 seq, dev, 5071 filter_mask, 5072 NLM_F_MULTI); 5073 if (err < 0 && err != -EOPNOTSUPP) { 5074 if (likely(skb->len)) 5075 break; 5076 5077 goto out_err; 5078 } 5079 } 5080 idx++; 5081 } 5082 } 5083 err = skb->len; 5084 out_err: 5085 rcu_read_unlock(); 5086 cb->args[0] = idx; 5087 5088 return err; 5089 } 5090 5091 static inline size_t bridge_nlmsg_size(void) 5092 { 5093 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 5094 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 5095 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 5096 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 5097 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 5098 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 5099 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 5100 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 5101 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 5102 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 5103 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 5104 } 5105 5106 static int rtnl_bridge_notify(struct net_device *dev) 5107 { 5108 struct net *net = dev_net(dev); 5109 struct sk_buff *skb; 5110 int err = -EOPNOTSUPP; 5111 5112 if (!dev->netdev_ops->ndo_bridge_getlink) 5113 return 0; 5114 5115 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 5116 if (!skb) { 5117 err = -ENOMEM; 5118 goto errout; 5119 } 5120 5121 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 5122 if (err < 0) 5123 goto errout; 5124 5125 /* Notification info is only filled for bridge ports, not the bridge 5126 * device itself. Therefore, a zero notification length is valid and 5127 * should not result in an error. 5128 */ 5129 if (!skb->len) 5130 goto errout; 5131 5132 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 5133 return 0; 5134 errout: 5135 WARN_ON(err == -EMSGSIZE); 5136 kfree_skb(skb); 5137 if (err) 5138 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 5139 return err; 5140 } 5141 5142 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 5143 struct netlink_ext_ack *extack) 5144 { 5145 struct net *net = sock_net(skb->sk); 5146 struct ifinfomsg *ifm; 5147 struct net_device *dev; 5148 struct nlattr *br_spec, *attr, *br_flags_attr = NULL; 5149 int rem, err = -EOPNOTSUPP; 5150 u16 flags = 0; 5151 5152 if (nlmsg_len(nlh) < sizeof(*ifm)) 5153 return -EINVAL; 5154 5155 ifm = nlmsg_data(nlh); 5156 if (ifm->ifi_family != AF_BRIDGE) 5157 return -EPFNOSUPPORT; 5158 5159 dev = __dev_get_by_index(net, ifm->ifi_index); 5160 if (!dev) { 5161 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5162 return -ENODEV; 5163 } 5164 5165 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5166 if (br_spec) { 5167 nla_for_each_nested(attr, br_spec, rem) { 5168 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) { 5169 if (nla_len(attr) < sizeof(flags)) 5170 return -EINVAL; 5171 5172 br_flags_attr = attr; 5173 flags = nla_get_u16(attr); 5174 } 5175 5176 if (nla_type(attr) == IFLA_BRIDGE_MODE) { 5177 if (nla_len(attr) < sizeof(u16)) 5178 return -EINVAL; 5179 } 5180 } 5181 } 5182 5183 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5184 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5185 5186 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 5187 err = -EOPNOTSUPP; 5188 goto out; 5189 } 5190 5191 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 5192 extack); 5193 if (err) 5194 goto out; 5195 5196 flags &= ~BRIDGE_FLAGS_MASTER; 5197 } 5198 5199 if ((flags & BRIDGE_FLAGS_SELF)) { 5200 if (!dev->netdev_ops->ndo_bridge_setlink) 5201 err = -EOPNOTSUPP; 5202 else 5203 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 5204 flags, 5205 extack); 5206 if (!err) { 5207 flags &= ~BRIDGE_FLAGS_SELF; 5208 5209 /* Generate event to notify upper layer of bridge 5210 * change 5211 */ 5212 err = rtnl_bridge_notify(dev); 5213 } 5214 } 5215 5216 if (br_flags_attr) 5217 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags)); 5218 out: 5219 return err; 5220 } 5221 5222 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 5223 struct netlink_ext_ack *extack) 5224 { 5225 struct net *net = sock_net(skb->sk); 5226 struct ifinfomsg *ifm; 5227 struct net_device *dev; 5228 struct nlattr *br_spec, *attr = NULL; 5229 int rem, err = -EOPNOTSUPP; 5230 u16 flags = 0; 5231 bool have_flags = false; 5232 5233 if (nlmsg_len(nlh) < sizeof(*ifm)) 5234 return -EINVAL; 5235 5236 ifm = nlmsg_data(nlh); 5237 if (ifm->ifi_family != AF_BRIDGE) 5238 return -EPFNOSUPPORT; 5239 5240 dev = __dev_get_by_index(net, ifm->ifi_index); 5241 if (!dev) { 5242 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5243 return -ENODEV; 5244 } 5245 5246 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5247 if (br_spec) { 5248 nla_for_each_nested(attr, br_spec, rem) { 5249 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5250 if (nla_len(attr) < sizeof(flags)) 5251 return -EINVAL; 5252 5253 have_flags = true; 5254 flags = nla_get_u16(attr); 5255 break; 5256 } 5257 } 5258 } 5259 5260 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5261 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5262 5263 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 5264 err = -EOPNOTSUPP; 5265 goto out; 5266 } 5267 5268 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 5269 if (err) 5270 goto out; 5271 5272 flags &= ~BRIDGE_FLAGS_MASTER; 5273 } 5274 5275 if ((flags & BRIDGE_FLAGS_SELF)) { 5276 if (!dev->netdev_ops->ndo_bridge_dellink) 5277 err = -EOPNOTSUPP; 5278 else 5279 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 5280 flags); 5281 5282 if (!err) { 5283 flags &= ~BRIDGE_FLAGS_SELF; 5284 5285 /* Generate event to notify upper layer of bridge 5286 * change 5287 */ 5288 err = rtnl_bridge_notify(dev); 5289 } 5290 } 5291 5292 if (have_flags) 5293 memcpy(nla_data(attr), &flags, sizeof(flags)); 5294 out: 5295 return err; 5296 } 5297 5298 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 5299 { 5300 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 5301 (!idxattr || idxattr == attrid); 5302 } 5303 5304 static bool 5305 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) 5306 { 5307 return dev->netdev_ops && 5308 dev->netdev_ops->ndo_has_offload_stats && 5309 dev->netdev_ops->ndo_get_offload_stats && 5310 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); 5311 } 5312 5313 static unsigned int 5314 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) 5315 { 5316 return rtnl_offload_xstats_have_ndo(dev, attr_id) ? 5317 sizeof(struct rtnl_link_stats64) : 0; 5318 } 5319 5320 static int 5321 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, 5322 struct sk_buff *skb) 5323 { 5324 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); 5325 struct nlattr *attr = NULL; 5326 void *attr_data; 5327 int err; 5328 5329 if (!size) 5330 return -ENODATA; 5331 5332 attr = nla_reserve_64bit(skb, attr_id, size, 5333 IFLA_OFFLOAD_XSTATS_UNSPEC); 5334 if (!attr) 5335 return -EMSGSIZE; 5336 5337 attr_data = nla_data(attr); 5338 memset(attr_data, 0, size); 5339 5340 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); 5341 if (err) 5342 return err; 5343 5344 return 0; 5345 } 5346 5347 static unsigned int 5348 rtnl_offload_xstats_get_size_stats(const struct net_device *dev, 5349 enum netdev_offload_xstats_type type) 5350 { 5351 bool enabled = netdev_offload_xstats_enabled(dev, type); 5352 5353 return enabled ? sizeof(struct rtnl_hw_stats64) : 0; 5354 } 5355 5356 struct rtnl_offload_xstats_request_used { 5357 bool request; 5358 bool used; 5359 }; 5360 5361 static int 5362 rtnl_offload_xstats_get_stats(struct net_device *dev, 5363 enum netdev_offload_xstats_type type, 5364 struct rtnl_offload_xstats_request_used *ru, 5365 struct rtnl_hw_stats64 *stats, 5366 struct netlink_ext_ack *extack) 5367 { 5368 bool request; 5369 bool used; 5370 int err; 5371 5372 request = netdev_offload_xstats_enabled(dev, type); 5373 if (!request) { 5374 used = false; 5375 goto out; 5376 } 5377 5378 err = netdev_offload_xstats_get(dev, type, stats, &used, extack); 5379 if (err) 5380 return err; 5381 5382 out: 5383 if (ru) { 5384 ru->request = request; 5385 ru->used = used; 5386 } 5387 return 0; 5388 } 5389 5390 static int 5391 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, 5392 struct rtnl_offload_xstats_request_used *ru) 5393 { 5394 struct nlattr *nest; 5395 5396 nest = nla_nest_start(skb, attr_id); 5397 if (!nest) 5398 return -EMSGSIZE; 5399 5400 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) 5401 goto nla_put_failure; 5402 5403 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) 5404 goto nla_put_failure; 5405 5406 nla_nest_end(skb, nest); 5407 return 0; 5408 5409 nla_put_failure: 5410 nla_nest_cancel(skb, nest); 5411 return -EMSGSIZE; 5412 } 5413 5414 static int 5415 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, 5416 struct netlink_ext_ack *extack) 5417 { 5418 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5419 struct rtnl_offload_xstats_request_used ru_l3; 5420 struct nlattr *nest; 5421 int err; 5422 5423 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); 5424 if (err) 5425 return err; 5426 5427 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5428 if (!nest) 5429 return -EMSGSIZE; 5430 5431 if (rtnl_offload_xstats_fill_hw_s_info_one(skb, 5432 IFLA_OFFLOAD_XSTATS_L3_STATS, 5433 &ru_l3)) 5434 goto nla_put_failure; 5435 5436 nla_nest_end(skb, nest); 5437 return 0; 5438 5439 nla_put_failure: 5440 nla_nest_cancel(skb, nest); 5441 return -EMSGSIZE; 5442 } 5443 5444 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, 5445 int *prividx, u32 off_filter_mask, 5446 struct netlink_ext_ack *extack) 5447 { 5448 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5449 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; 5450 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; 5451 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5452 bool have_data = false; 5453 int err; 5454 5455 if (*prividx <= attr_id_cpu_hit && 5456 (off_filter_mask & 5457 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { 5458 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); 5459 if (!err) { 5460 have_data = true; 5461 } else if (err != -ENODATA) { 5462 *prividx = attr_id_cpu_hit; 5463 return err; 5464 } 5465 } 5466 5467 if (*prividx <= attr_id_hw_s_info && 5468 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { 5469 *prividx = attr_id_hw_s_info; 5470 5471 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); 5472 if (err) 5473 return err; 5474 5475 have_data = true; 5476 *prividx = 0; 5477 } 5478 5479 if (*prividx <= attr_id_l3_stats && 5480 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { 5481 unsigned int size_l3; 5482 struct nlattr *attr; 5483 5484 *prividx = attr_id_l3_stats; 5485 5486 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5487 if (!size_l3) 5488 goto skip_l3_stats; 5489 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, 5490 IFLA_OFFLOAD_XSTATS_UNSPEC); 5491 if (!attr) 5492 return -EMSGSIZE; 5493 5494 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, 5495 nla_data(attr), extack); 5496 if (err) 5497 return err; 5498 5499 have_data = true; 5500 skip_l3_stats: 5501 *prividx = 0; 5502 } 5503 5504 if (!have_data) 5505 return -ENODATA; 5506 5507 *prividx = 0; 5508 return 0; 5509 } 5510 5511 static unsigned int 5512 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, 5513 enum netdev_offload_xstats_type type) 5514 { 5515 return nla_total_size(0) + 5516 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ 5517 nla_total_size(sizeof(u8)) + 5518 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ 5519 nla_total_size(sizeof(u8)) + 5520 0; 5521 } 5522 5523 static unsigned int 5524 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) 5525 { 5526 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5527 5528 return nla_total_size(0) + 5529 /* IFLA_OFFLOAD_XSTATS_L3_STATS */ 5530 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + 5531 0; 5532 } 5533 5534 static int rtnl_offload_xstats_get_size(const struct net_device *dev, 5535 u32 off_filter_mask) 5536 { 5537 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5538 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5539 int nla_size = 0; 5540 int size; 5541 5542 if (off_filter_mask & 5543 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { 5544 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); 5545 nla_size += nla_total_size_64bit(size); 5546 } 5547 5548 if (off_filter_mask & 5549 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) 5550 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); 5551 5552 if (off_filter_mask & 5553 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { 5554 size = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5555 nla_size += nla_total_size_64bit(size); 5556 } 5557 5558 if (nla_size != 0) 5559 nla_size += nla_total_size(0); 5560 5561 return nla_size; 5562 } 5563 5564 struct rtnl_stats_dump_filters { 5565 /* mask[0] filters outer attributes. Then individual nests have their 5566 * filtering mask at the index of the nested attribute. 5567 */ 5568 u32 mask[IFLA_STATS_MAX + 1]; 5569 }; 5570 5571 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 5572 int type, u32 pid, u32 seq, u32 change, 5573 unsigned int flags, 5574 const struct rtnl_stats_dump_filters *filters, 5575 int *idxattr, int *prividx, 5576 struct netlink_ext_ack *extack) 5577 { 5578 unsigned int filter_mask = filters->mask[0]; 5579 struct if_stats_msg *ifsm; 5580 struct nlmsghdr *nlh; 5581 struct nlattr *attr; 5582 int s_prividx = *prividx; 5583 int err; 5584 5585 ASSERT_RTNL(); 5586 5587 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 5588 if (!nlh) 5589 return -EMSGSIZE; 5590 5591 ifsm = nlmsg_data(nlh); 5592 ifsm->family = PF_UNSPEC; 5593 ifsm->pad1 = 0; 5594 ifsm->pad2 = 0; 5595 ifsm->ifindex = dev->ifindex; 5596 ifsm->filter_mask = filter_mask; 5597 5598 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 5599 struct rtnl_link_stats64 *sp; 5600 5601 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 5602 sizeof(struct rtnl_link_stats64), 5603 IFLA_STATS_UNSPEC); 5604 if (!attr) { 5605 err = -EMSGSIZE; 5606 goto nla_put_failure; 5607 } 5608 5609 sp = nla_data(attr); 5610 dev_get_stats(dev, sp); 5611 } 5612 5613 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5614 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5615 5616 if (ops && ops->fill_linkxstats) { 5617 *idxattr = IFLA_STATS_LINK_XSTATS; 5618 attr = nla_nest_start_noflag(skb, 5619 IFLA_STATS_LINK_XSTATS); 5620 if (!attr) { 5621 err = -EMSGSIZE; 5622 goto nla_put_failure; 5623 } 5624 5625 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5626 nla_nest_end(skb, attr); 5627 if (err) 5628 goto nla_put_failure; 5629 *idxattr = 0; 5630 } 5631 } 5632 5633 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5634 *idxattr)) { 5635 const struct rtnl_link_ops *ops = NULL; 5636 const struct net_device *master; 5637 5638 master = netdev_master_upper_dev_get(dev); 5639 if (master) 5640 ops = master->rtnl_link_ops; 5641 if (ops && ops->fill_linkxstats) { 5642 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5643 attr = nla_nest_start_noflag(skb, 5644 IFLA_STATS_LINK_XSTATS_SLAVE); 5645 if (!attr) { 5646 err = -EMSGSIZE; 5647 goto nla_put_failure; 5648 } 5649 5650 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5651 nla_nest_end(skb, attr); 5652 if (err) 5653 goto nla_put_failure; 5654 *idxattr = 0; 5655 } 5656 } 5657 5658 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5659 *idxattr)) { 5660 u32 off_filter_mask; 5661 5662 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5663 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5664 attr = nla_nest_start_noflag(skb, 5665 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5666 if (!attr) { 5667 err = -EMSGSIZE; 5668 goto nla_put_failure; 5669 } 5670 5671 err = rtnl_offload_xstats_fill(skb, dev, prividx, 5672 off_filter_mask, extack); 5673 if (err == -ENODATA) 5674 nla_nest_cancel(skb, attr); 5675 else 5676 nla_nest_end(skb, attr); 5677 5678 if (err && err != -ENODATA) 5679 goto nla_put_failure; 5680 *idxattr = 0; 5681 } 5682 5683 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5684 struct rtnl_af_ops *af_ops; 5685 5686 *idxattr = IFLA_STATS_AF_SPEC; 5687 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5688 if (!attr) { 5689 err = -EMSGSIZE; 5690 goto nla_put_failure; 5691 } 5692 5693 rcu_read_lock(); 5694 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5695 if (af_ops->fill_stats_af) { 5696 struct nlattr *af; 5697 5698 af = nla_nest_start_noflag(skb, 5699 af_ops->family); 5700 if (!af) { 5701 rcu_read_unlock(); 5702 err = -EMSGSIZE; 5703 goto nla_put_failure; 5704 } 5705 err = af_ops->fill_stats_af(skb, dev); 5706 5707 if (err == -ENODATA) { 5708 nla_nest_cancel(skb, af); 5709 } else if (err < 0) { 5710 rcu_read_unlock(); 5711 goto nla_put_failure; 5712 } 5713 5714 nla_nest_end(skb, af); 5715 } 5716 } 5717 rcu_read_unlock(); 5718 5719 nla_nest_end(skb, attr); 5720 5721 *idxattr = 0; 5722 } 5723 5724 nlmsg_end(skb, nlh); 5725 5726 return 0; 5727 5728 nla_put_failure: 5729 /* not a multi message or no progress mean a real error */ 5730 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5731 nlmsg_cancel(skb, nlh); 5732 else 5733 nlmsg_end(skb, nlh); 5734 5735 return err; 5736 } 5737 5738 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5739 const struct rtnl_stats_dump_filters *filters) 5740 { 5741 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5742 unsigned int filter_mask = filters->mask[0]; 5743 5744 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5745 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5746 5747 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5748 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5749 int attr = IFLA_STATS_LINK_XSTATS; 5750 5751 if (ops && ops->get_linkxstats_size) { 5752 size += nla_total_size(ops->get_linkxstats_size(dev, 5753 attr)); 5754 /* for IFLA_STATS_LINK_XSTATS */ 5755 size += nla_total_size(0); 5756 } 5757 } 5758 5759 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5760 struct net_device *_dev = (struct net_device *)dev; 5761 const struct rtnl_link_ops *ops = NULL; 5762 const struct net_device *master; 5763 5764 /* netdev_master_upper_dev_get can't take const */ 5765 master = netdev_master_upper_dev_get(_dev); 5766 if (master) 5767 ops = master->rtnl_link_ops; 5768 if (ops && ops->get_linkxstats_size) { 5769 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5770 5771 size += nla_total_size(ops->get_linkxstats_size(dev, 5772 attr)); 5773 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5774 size += nla_total_size(0); 5775 } 5776 } 5777 5778 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { 5779 u32 off_filter_mask; 5780 5781 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5782 size += rtnl_offload_xstats_get_size(dev, off_filter_mask); 5783 } 5784 5785 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5786 struct rtnl_af_ops *af_ops; 5787 5788 /* for IFLA_STATS_AF_SPEC */ 5789 size += nla_total_size(0); 5790 5791 rcu_read_lock(); 5792 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5793 if (af_ops->get_stats_af_size) { 5794 size += nla_total_size( 5795 af_ops->get_stats_af_size(dev)); 5796 5797 /* for AF_* */ 5798 size += nla_total_size(0); 5799 } 5800 } 5801 rcu_read_unlock(); 5802 } 5803 5804 return size; 5805 } 5806 5807 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) 5808 5809 static const struct nla_policy 5810 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { 5811 [IFLA_STATS_LINK_OFFLOAD_XSTATS] = 5812 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), 5813 }; 5814 5815 static const struct nla_policy 5816 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { 5817 [IFLA_STATS_GET_FILTERS] = 5818 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), 5819 }; 5820 5821 static const struct nla_policy 5822 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { 5823 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), 5824 }; 5825 5826 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, 5827 struct rtnl_stats_dump_filters *filters, 5828 struct netlink_ext_ack *extack) 5829 { 5830 struct nlattr *tb[IFLA_STATS_MAX + 1]; 5831 int err; 5832 int at; 5833 5834 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, 5835 rtnl_stats_get_policy_filters, extack); 5836 if (err < 0) 5837 return err; 5838 5839 for (at = 1; at <= IFLA_STATS_MAX; at++) { 5840 if (tb[at]) { 5841 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { 5842 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); 5843 return -EINVAL; 5844 } 5845 filters->mask[at] = nla_get_u32(tb[at]); 5846 } 5847 } 5848 5849 return 0; 5850 } 5851 5852 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, 5853 u32 filter_mask, 5854 struct rtnl_stats_dump_filters *filters, 5855 struct netlink_ext_ack *extack) 5856 { 5857 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5858 int err; 5859 int i; 5860 5861 filters->mask[0] = filter_mask; 5862 for (i = 1; i < ARRAY_SIZE(filters->mask); i++) 5863 filters->mask[i] = -1U; 5864 5865 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, 5866 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); 5867 if (err < 0) 5868 return err; 5869 5870 if (tb[IFLA_STATS_GET_FILTERS]) { 5871 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], 5872 filters, extack); 5873 if (err) 5874 return err; 5875 } 5876 5877 return 0; 5878 } 5879 5880 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 5881 bool is_dump, struct netlink_ext_ack *extack) 5882 { 5883 struct if_stats_msg *ifsm; 5884 5885 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 5886 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 5887 return -EINVAL; 5888 } 5889 5890 if (!strict_check) 5891 return 0; 5892 5893 ifsm = nlmsg_data(nlh); 5894 5895 /* only requests using strict checks can pass data to influence 5896 * the dump. The legacy exception is filter_mask. 5897 */ 5898 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 5899 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 5900 return -EINVAL; 5901 } 5902 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 5903 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 5904 return -EINVAL; 5905 } 5906 5907 return 0; 5908 } 5909 5910 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 5911 struct netlink_ext_ack *extack) 5912 { 5913 struct rtnl_stats_dump_filters filters; 5914 struct net *net = sock_net(skb->sk); 5915 struct net_device *dev = NULL; 5916 int idxattr = 0, prividx = 0; 5917 struct if_stats_msg *ifsm; 5918 struct sk_buff *nskb; 5919 int err; 5920 5921 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5922 false, extack); 5923 if (err) 5924 return err; 5925 5926 ifsm = nlmsg_data(nlh); 5927 if (ifsm->ifindex > 0) 5928 dev = __dev_get_by_index(net, ifsm->ifindex); 5929 else 5930 return -EINVAL; 5931 5932 if (!dev) 5933 return -ENODEV; 5934 5935 if (!ifsm->filter_mask) { 5936 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); 5937 return -EINVAL; 5938 } 5939 5940 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); 5941 if (err) 5942 return err; 5943 5944 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); 5945 if (!nskb) 5946 return -ENOBUFS; 5947 5948 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 5949 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 5950 0, &filters, &idxattr, &prividx, extack); 5951 if (err < 0) { 5952 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 5953 WARN_ON(err == -EMSGSIZE); 5954 kfree_skb(nskb); 5955 } else { 5956 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 5957 } 5958 5959 return err; 5960 } 5961 5962 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 5963 { 5964 struct netlink_ext_ack *extack = cb->extack; 5965 int h, s_h, err, s_idx, s_idxattr, s_prividx; 5966 struct rtnl_stats_dump_filters filters; 5967 struct net *net = sock_net(skb->sk); 5968 unsigned int flags = NLM_F_MULTI; 5969 struct if_stats_msg *ifsm; 5970 struct hlist_head *head; 5971 struct net_device *dev; 5972 int idx = 0; 5973 5974 s_h = cb->args[0]; 5975 s_idx = cb->args[1]; 5976 s_idxattr = cb->args[2]; 5977 s_prividx = cb->args[3]; 5978 5979 cb->seq = net->dev_base_seq; 5980 5981 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 5982 if (err) 5983 return err; 5984 5985 ifsm = nlmsg_data(cb->nlh); 5986 if (!ifsm->filter_mask) { 5987 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 5988 return -EINVAL; 5989 } 5990 5991 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, 5992 extack); 5993 if (err) 5994 return err; 5995 5996 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 5997 idx = 0; 5998 head = &net->dev_index_head[h]; 5999 hlist_for_each_entry(dev, head, index_hlist) { 6000 if (idx < s_idx) 6001 goto cont; 6002 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 6003 NETLINK_CB(cb->skb).portid, 6004 cb->nlh->nlmsg_seq, 0, 6005 flags, &filters, 6006 &s_idxattr, &s_prividx, 6007 extack); 6008 /* If we ran out of room on the first message, 6009 * we're in trouble 6010 */ 6011 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 6012 6013 if (err < 0) 6014 goto out; 6015 s_prividx = 0; 6016 s_idxattr = 0; 6017 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 6018 cont: 6019 idx++; 6020 } 6021 } 6022 out: 6023 cb->args[3] = s_prividx; 6024 cb->args[2] = s_idxattr; 6025 cb->args[1] = idx; 6026 cb->args[0] = h; 6027 6028 return skb->len; 6029 } 6030 6031 void rtnl_offload_xstats_notify(struct net_device *dev) 6032 { 6033 struct rtnl_stats_dump_filters response_filters = {}; 6034 struct net *net = dev_net(dev); 6035 int idxattr = 0, prividx = 0; 6036 struct sk_buff *skb; 6037 int err = -ENOBUFS; 6038 6039 ASSERT_RTNL(); 6040 6041 response_filters.mask[0] |= 6042 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6043 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6044 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6045 6046 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), 6047 GFP_KERNEL); 6048 if (!skb) 6049 goto errout; 6050 6051 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, 6052 &response_filters, &idxattr, &prividx, NULL); 6053 if (err < 0) { 6054 kfree_skb(skb); 6055 goto errout; 6056 } 6057 6058 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); 6059 return; 6060 6061 errout: 6062 rtnl_set_sk_err(net, RTNLGRP_STATS, err); 6063 } 6064 EXPORT_SYMBOL(rtnl_offload_xstats_notify); 6065 6066 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, 6067 struct netlink_ext_ack *extack) 6068 { 6069 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 6070 struct rtnl_stats_dump_filters response_filters = {}; 6071 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 6072 struct net *net = sock_net(skb->sk); 6073 struct net_device *dev = NULL; 6074 struct if_stats_msg *ifsm; 6075 bool notify = false; 6076 int err; 6077 6078 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 6079 false, extack); 6080 if (err) 6081 return err; 6082 6083 ifsm = nlmsg_data(nlh); 6084 if (ifsm->family != AF_UNSPEC) { 6085 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); 6086 return -EINVAL; 6087 } 6088 6089 if (ifsm->ifindex > 0) 6090 dev = __dev_get_by_index(net, ifsm->ifindex); 6091 else 6092 return -EINVAL; 6093 6094 if (!dev) 6095 return -ENODEV; 6096 6097 if (ifsm->filter_mask) { 6098 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); 6099 return -EINVAL; 6100 } 6101 6102 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, 6103 ifla_stats_set_policy, extack); 6104 if (err < 0) 6105 return err; 6106 6107 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { 6108 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); 6109 6110 if (req) 6111 err = netdev_offload_xstats_enable(dev, t_l3, extack); 6112 else 6113 err = netdev_offload_xstats_disable(dev, t_l3); 6114 6115 if (!err) 6116 notify = true; 6117 else if (err != -EALREADY) 6118 return err; 6119 6120 response_filters.mask[0] |= 6121 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6122 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6123 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6124 } 6125 6126 if (notify) 6127 rtnl_offload_xstats_notify(dev); 6128 6129 return 0; 6130 } 6131 6132 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh, 6133 struct netlink_ext_ack *extack) 6134 { 6135 struct br_port_msg *bpm; 6136 6137 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 6138 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request"); 6139 return -EINVAL; 6140 } 6141 6142 bpm = nlmsg_data(nlh); 6143 if (bpm->ifindex) { 6144 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request"); 6145 return -EINVAL; 6146 } 6147 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 6148 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 6149 return -EINVAL; 6150 } 6151 6152 return 0; 6153 } 6154 6155 struct rtnl_mdb_dump_ctx { 6156 long idx; 6157 }; 6158 6159 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 6160 { 6161 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx; 6162 struct net *net = sock_net(skb->sk); 6163 struct net_device *dev; 6164 int idx, s_idx; 6165 int err; 6166 6167 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx); 6168 6169 if (cb->strict_check) { 6170 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack); 6171 if (err) 6172 return err; 6173 } 6174 6175 s_idx = ctx->idx; 6176 idx = 0; 6177 6178 for_each_netdev(net, dev) { 6179 if (idx < s_idx) 6180 goto skip; 6181 if (!dev->netdev_ops->ndo_mdb_dump) 6182 goto skip; 6183 6184 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb); 6185 if (err == -EMSGSIZE) 6186 goto out; 6187 /* Moving on to next device, reset markers and sequence 6188 * counters since they are all maintained per-device. 6189 */ 6190 memset(cb->ctx, 0, sizeof(cb->ctx)); 6191 cb->prev_seq = 0; 6192 cb->seq = 0; 6193 skip: 6194 idx++; 6195 } 6196 6197 out: 6198 ctx->idx = idx; 6199 return skb->len; 6200 } 6201 6202 static int rtnl_validate_mdb_entry_get(const struct nlattr *attr, 6203 struct netlink_ext_ack *extack) 6204 { 6205 struct br_mdb_entry *entry = nla_data(attr); 6206 6207 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6208 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6209 return -EINVAL; 6210 } 6211 6212 if (entry->ifindex) { 6213 NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified"); 6214 return -EINVAL; 6215 } 6216 6217 if (entry->state) { 6218 NL_SET_ERR_MSG(extack, "Entry state cannot be specified"); 6219 return -EINVAL; 6220 } 6221 6222 if (entry->flags) { 6223 NL_SET_ERR_MSG(extack, "Entry flags cannot be specified"); 6224 return -EINVAL; 6225 } 6226 6227 if (entry->vid >= VLAN_VID_MASK) { 6228 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6229 return -EINVAL; 6230 } 6231 6232 if (entry->addr.proto != htons(ETH_P_IP) && 6233 entry->addr.proto != htons(ETH_P_IPV6) && 6234 entry->addr.proto != 0) { 6235 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6236 return -EINVAL; 6237 } 6238 6239 return 0; 6240 } 6241 6242 static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = { 6243 [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6244 rtnl_validate_mdb_entry_get, 6245 sizeof(struct br_mdb_entry)), 6246 [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6247 }; 6248 6249 static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 6250 struct netlink_ext_ack *extack) 6251 { 6252 struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1]; 6253 struct net *net = sock_net(in_skb->sk); 6254 struct br_port_msg *bpm; 6255 struct net_device *dev; 6256 int err; 6257 6258 err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb, 6259 MDBA_GET_ENTRY_MAX, mdba_get_policy, extack); 6260 if (err) 6261 return err; 6262 6263 bpm = nlmsg_data(nlh); 6264 if (!bpm->ifindex) { 6265 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6266 return -EINVAL; 6267 } 6268 6269 dev = __dev_get_by_index(net, bpm->ifindex); 6270 if (!dev) { 6271 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6272 return -ENODEV; 6273 } 6274 6275 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) { 6276 NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute"); 6277 return -EINVAL; 6278 } 6279 6280 if (!dev->netdev_ops->ndo_mdb_get) { 6281 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6282 return -EOPNOTSUPP; 6283 } 6284 6285 return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid, 6286 nlh->nlmsg_seq, extack); 6287 } 6288 6289 static int rtnl_validate_mdb_entry(const struct nlattr *attr, 6290 struct netlink_ext_ack *extack) 6291 { 6292 struct br_mdb_entry *entry = nla_data(attr); 6293 6294 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6295 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6296 return -EINVAL; 6297 } 6298 6299 if (entry->ifindex == 0) { 6300 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed"); 6301 return -EINVAL; 6302 } 6303 6304 if (entry->addr.proto == htons(ETH_P_IP)) { 6305 if (!ipv4_is_multicast(entry->addr.u.ip4) && 6306 !ipv4_is_zeronet(entry->addr.u.ip4)) { 6307 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0"); 6308 return -EINVAL; 6309 } 6310 if (ipv4_is_local_multicast(entry->addr.u.ip4)) { 6311 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast"); 6312 return -EINVAL; 6313 } 6314 #if IS_ENABLED(CONFIG_IPV6) 6315 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 6316 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) { 6317 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes"); 6318 return -EINVAL; 6319 } 6320 #endif 6321 } else if (entry->addr.proto == 0) { 6322 /* L2 mdb */ 6323 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) { 6324 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast"); 6325 return -EINVAL; 6326 } 6327 } else { 6328 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6329 return -EINVAL; 6330 } 6331 6332 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6333 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6334 return -EINVAL; 6335 } 6336 if (entry->vid >= VLAN_VID_MASK) { 6337 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6338 return -EINVAL; 6339 } 6340 6341 return 0; 6342 } 6343 6344 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = { 6345 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 }, 6346 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6347 rtnl_validate_mdb_entry, 6348 sizeof(struct br_mdb_entry)), 6349 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6350 }; 6351 6352 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 6353 struct netlink_ext_ack *extack) 6354 { 6355 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6356 struct net *net = sock_net(skb->sk); 6357 struct br_port_msg *bpm; 6358 struct net_device *dev; 6359 int err; 6360 6361 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6362 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6363 if (err) 6364 return err; 6365 6366 bpm = nlmsg_data(nlh); 6367 if (!bpm->ifindex) { 6368 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6369 return -EINVAL; 6370 } 6371 6372 dev = __dev_get_by_index(net, bpm->ifindex); 6373 if (!dev) { 6374 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6375 return -ENODEV; 6376 } 6377 6378 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6379 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6380 return -EINVAL; 6381 } 6382 6383 if (!dev->netdev_ops->ndo_mdb_add) { 6384 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6385 return -EOPNOTSUPP; 6386 } 6387 6388 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack); 6389 } 6390 6391 static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr, 6392 struct netlink_ext_ack *extack) 6393 { 6394 struct br_mdb_entry *entry = nla_data(attr); 6395 struct br_mdb_entry zero_entry = {}; 6396 6397 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6398 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6399 return -EINVAL; 6400 } 6401 6402 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6403 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6404 return -EINVAL; 6405 } 6406 6407 if (entry->flags) { 6408 NL_SET_ERR_MSG(extack, "Entry flags cannot be set"); 6409 return -EINVAL; 6410 } 6411 6412 if (entry->vid >= VLAN_N_VID - 1) { 6413 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6414 return -EINVAL; 6415 } 6416 6417 if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) { 6418 NL_SET_ERR_MSG(extack, "Entry address cannot be set"); 6419 return -EINVAL; 6420 } 6421 6422 return 0; 6423 } 6424 6425 static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = { 6426 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6427 rtnl_validate_mdb_entry_del_bulk, 6428 sizeof(struct br_mdb_entry)), 6429 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6430 }; 6431 6432 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 6433 struct netlink_ext_ack *extack) 6434 { 6435 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 6436 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6437 struct net *net = sock_net(skb->sk); 6438 struct br_port_msg *bpm; 6439 struct net_device *dev; 6440 int err; 6441 6442 if (!del_bulk) 6443 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6444 MDBA_SET_ENTRY_MAX, mdba_policy, 6445 extack); 6446 else 6447 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, 6448 mdba_del_bulk_policy, extack); 6449 if (err) 6450 return err; 6451 6452 bpm = nlmsg_data(nlh); 6453 if (!bpm->ifindex) { 6454 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6455 return -EINVAL; 6456 } 6457 6458 dev = __dev_get_by_index(net, bpm->ifindex); 6459 if (!dev) { 6460 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6461 return -ENODEV; 6462 } 6463 6464 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6465 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6466 return -EINVAL; 6467 } 6468 6469 if (del_bulk) { 6470 if (!dev->netdev_ops->ndo_mdb_del_bulk) { 6471 NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion"); 6472 return -EOPNOTSUPP; 6473 } 6474 return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack); 6475 } 6476 6477 if (!dev->netdev_ops->ndo_mdb_del) { 6478 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6479 return -EOPNOTSUPP; 6480 } 6481 6482 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack); 6483 } 6484 6485 /* Process one rtnetlink message. */ 6486 6487 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 6488 struct netlink_ext_ack *extack) 6489 { 6490 struct net *net = sock_net(skb->sk); 6491 struct rtnl_link *link; 6492 enum rtnl_kinds kind; 6493 struct module *owner; 6494 int err = -EOPNOTSUPP; 6495 rtnl_doit_func doit; 6496 unsigned int flags; 6497 int family; 6498 int type; 6499 6500 type = nlh->nlmsg_type; 6501 if (type > RTM_MAX) 6502 return -EOPNOTSUPP; 6503 6504 type -= RTM_BASE; 6505 6506 /* All the messages must have at least 1 byte length */ 6507 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 6508 return 0; 6509 6510 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 6511 kind = rtnl_msgtype_kind(type); 6512 6513 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) 6514 return -EPERM; 6515 6516 rcu_read_lock(); 6517 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { 6518 struct sock *rtnl; 6519 rtnl_dumpit_func dumpit; 6520 u32 min_dump_alloc = 0; 6521 6522 link = rtnl_get_link(family, type); 6523 if (!link || !link->dumpit) { 6524 family = PF_UNSPEC; 6525 link = rtnl_get_link(family, type); 6526 if (!link || !link->dumpit) 6527 goto err_unlock; 6528 } 6529 owner = link->owner; 6530 dumpit = link->dumpit; 6531 flags = link->flags; 6532 6533 if (type == RTM_GETLINK - RTM_BASE) 6534 min_dump_alloc = rtnl_calcit(skb, nlh); 6535 6536 err = 0; 6537 /* need to do this before rcu_read_unlock() */ 6538 if (!try_module_get(owner)) 6539 err = -EPROTONOSUPPORT; 6540 6541 rcu_read_unlock(); 6542 6543 rtnl = net->rtnl; 6544 if (err == 0) { 6545 struct netlink_dump_control c = { 6546 .dump = dumpit, 6547 .min_dump_alloc = min_dump_alloc, 6548 .module = owner, 6549 .flags = flags, 6550 }; 6551 err = netlink_dump_start(rtnl, skb, nlh, &c); 6552 /* netlink_dump_start() will keep a reference on 6553 * module if dump is still in progress. 6554 */ 6555 module_put(owner); 6556 } 6557 return err; 6558 } 6559 6560 link = rtnl_get_link(family, type); 6561 if (!link || !link->doit) { 6562 family = PF_UNSPEC; 6563 link = rtnl_get_link(PF_UNSPEC, type); 6564 if (!link || !link->doit) 6565 goto out_unlock; 6566 } 6567 6568 owner = link->owner; 6569 if (!try_module_get(owner)) { 6570 err = -EPROTONOSUPPORT; 6571 goto out_unlock; 6572 } 6573 6574 flags = link->flags; 6575 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && 6576 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { 6577 NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); 6578 module_put(owner); 6579 goto err_unlock; 6580 } 6581 6582 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 6583 doit = link->doit; 6584 rcu_read_unlock(); 6585 if (doit) 6586 err = doit(skb, nlh, extack); 6587 module_put(owner); 6588 return err; 6589 } 6590 rcu_read_unlock(); 6591 6592 rtnl_lock(); 6593 link = rtnl_get_link(family, type); 6594 if (link && link->doit) 6595 err = link->doit(skb, nlh, extack); 6596 rtnl_unlock(); 6597 6598 module_put(owner); 6599 6600 return err; 6601 6602 out_unlock: 6603 rcu_read_unlock(); 6604 return err; 6605 6606 err_unlock: 6607 rcu_read_unlock(); 6608 return -EOPNOTSUPP; 6609 } 6610 6611 static void rtnetlink_rcv(struct sk_buff *skb) 6612 { 6613 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 6614 } 6615 6616 static int rtnetlink_bind(struct net *net, int group) 6617 { 6618 switch (group) { 6619 case RTNLGRP_IPV4_MROUTE_R: 6620 case RTNLGRP_IPV6_MROUTE_R: 6621 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 6622 return -EPERM; 6623 break; 6624 } 6625 return 0; 6626 } 6627 6628 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 6629 { 6630 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6631 6632 switch (event) { 6633 case NETDEV_REBOOT: 6634 case NETDEV_CHANGEMTU: 6635 case NETDEV_CHANGEADDR: 6636 case NETDEV_CHANGENAME: 6637 case NETDEV_FEAT_CHANGE: 6638 case NETDEV_BONDING_FAILOVER: 6639 case NETDEV_POST_TYPE_CHANGE: 6640 case NETDEV_NOTIFY_PEERS: 6641 case NETDEV_CHANGEUPPER: 6642 case NETDEV_RESEND_IGMP: 6643 case NETDEV_CHANGEINFODATA: 6644 case NETDEV_CHANGELOWERSTATE: 6645 case NETDEV_CHANGE_TX_QUEUE_LEN: 6646 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 6647 GFP_KERNEL, NULL, 0, 0, NULL); 6648 break; 6649 default: 6650 break; 6651 } 6652 return NOTIFY_DONE; 6653 } 6654 6655 static struct notifier_block rtnetlink_dev_notifier = { 6656 .notifier_call = rtnetlink_event, 6657 }; 6658 6659 6660 static int __net_init rtnetlink_net_init(struct net *net) 6661 { 6662 struct sock *sk; 6663 struct netlink_kernel_cfg cfg = { 6664 .groups = RTNLGRP_MAX, 6665 .input = rtnetlink_rcv, 6666 .cb_mutex = &rtnl_mutex, 6667 .flags = NL_CFG_F_NONROOT_RECV, 6668 .bind = rtnetlink_bind, 6669 }; 6670 6671 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 6672 if (!sk) 6673 return -ENOMEM; 6674 net->rtnl = sk; 6675 return 0; 6676 } 6677 6678 static void __net_exit rtnetlink_net_exit(struct net *net) 6679 { 6680 netlink_kernel_release(net->rtnl); 6681 net->rtnl = NULL; 6682 } 6683 6684 static struct pernet_operations rtnetlink_net_ops = { 6685 .init = rtnetlink_net_init, 6686 .exit = rtnetlink_net_exit, 6687 }; 6688 6689 void __init rtnetlink_init(void) 6690 { 6691 if (register_pernet_subsys(&rtnetlink_net_ops)) 6692 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 6693 6694 register_netdevice_notifier(&rtnetlink_dev_notifier); 6695 6696 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 6697 rtnl_dump_ifinfo, 0); 6698 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 6699 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 6700 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 6701 6702 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 6703 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 6704 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 6705 6706 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 6707 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 6708 6709 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 6710 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 6711 RTNL_FLAG_BULK_DEL_SUPPORTED); 6712 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 6713 6714 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 6715 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 6716 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 6717 6718 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 6719 0); 6720 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); 6721 6722 rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0); 6723 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0); 6724 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 6725 RTNL_FLAG_BULK_DEL_SUPPORTED); 6726 } 6727