1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 #include <net/devlink.h> 57 #if IS_ENABLED(CONFIG_IPV6) 58 #include <net/addrconf.h> 59 #endif 60 #include <linux/dpll.h> 61 62 #include "dev.h" 63 64 #define RTNL_MAX_TYPE 50 65 #define RTNL_SLAVE_MAX_TYPE 44 66 67 struct rtnl_link { 68 rtnl_doit_func doit; 69 rtnl_dumpit_func dumpit; 70 struct module *owner; 71 unsigned int flags; 72 struct rcu_head rcu; 73 }; 74 75 static DEFINE_MUTEX(rtnl_mutex); 76 77 void rtnl_lock(void) 78 { 79 mutex_lock(&rtnl_mutex); 80 } 81 EXPORT_SYMBOL(rtnl_lock); 82 83 int rtnl_lock_killable(void) 84 { 85 return mutex_lock_killable(&rtnl_mutex); 86 } 87 EXPORT_SYMBOL(rtnl_lock_killable); 88 89 static struct sk_buff *defer_kfree_skb_list; 90 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 91 { 92 if (head && tail) { 93 tail->next = defer_kfree_skb_list; 94 defer_kfree_skb_list = head; 95 } 96 } 97 EXPORT_SYMBOL(rtnl_kfree_skbs); 98 99 void __rtnl_unlock(void) 100 { 101 struct sk_buff *head = defer_kfree_skb_list; 102 103 defer_kfree_skb_list = NULL; 104 105 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() 106 * is used. In some places, e.g. in cfg80211, we have code that will do 107 * something like 108 * rtnl_lock() 109 * wiphy_lock() 110 * ... 111 * rtnl_unlock() 112 * 113 * and because netdev_run_todo() acquires the RTNL for items on the list 114 * we could cause a situation such as this: 115 * Thread 1 Thread 2 116 * rtnl_lock() 117 * unregister_netdevice() 118 * __rtnl_unlock() 119 * rtnl_lock() 120 * wiphy_lock() 121 * rtnl_unlock() 122 * netdev_run_todo() 123 * __rtnl_unlock() 124 * 125 * // list not empty now 126 * // because of thread 2 127 * rtnl_lock() 128 * while (!list_empty(...)) 129 * rtnl_lock() 130 * wiphy_lock() 131 * **** DEADLOCK **** 132 * 133 * However, usage of __rtnl_unlock() is rare, and so we can ensure that 134 * it's not used in cases where something is added to do the list. 135 */ 136 WARN_ON(!list_empty(&net_todo_list)); 137 138 mutex_unlock(&rtnl_mutex); 139 140 while (head) { 141 struct sk_buff *next = head->next; 142 143 kfree_skb(head); 144 cond_resched(); 145 head = next; 146 } 147 } 148 149 void rtnl_unlock(void) 150 { 151 /* This fellow will unlock it for us. */ 152 netdev_run_todo(); 153 } 154 EXPORT_SYMBOL(rtnl_unlock); 155 156 int rtnl_trylock(void) 157 { 158 return mutex_trylock(&rtnl_mutex); 159 } 160 EXPORT_SYMBOL(rtnl_trylock); 161 162 int rtnl_is_locked(void) 163 { 164 return mutex_is_locked(&rtnl_mutex); 165 } 166 EXPORT_SYMBOL(rtnl_is_locked); 167 168 bool refcount_dec_and_rtnl_lock(refcount_t *r) 169 { 170 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 171 } 172 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 173 174 #ifdef CONFIG_PROVE_LOCKING 175 bool lockdep_rtnl_is_held(void) 176 { 177 return lockdep_is_held(&rtnl_mutex); 178 } 179 EXPORT_SYMBOL(lockdep_rtnl_is_held); 180 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 181 182 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 183 184 static inline int rtm_msgindex(int msgtype) 185 { 186 int msgindex = msgtype - RTM_BASE; 187 188 /* 189 * msgindex < 0 implies someone tried to register a netlink 190 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 191 * the message type has not been added to linux/rtnetlink.h 192 */ 193 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 194 195 return msgindex; 196 } 197 198 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 199 { 200 struct rtnl_link __rcu **tab; 201 202 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 203 protocol = PF_UNSPEC; 204 205 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 206 if (!tab) 207 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 208 209 return rcu_dereference_rtnl(tab[msgtype]); 210 } 211 212 static int rtnl_register_internal(struct module *owner, 213 int protocol, int msgtype, 214 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 215 unsigned int flags) 216 { 217 struct rtnl_link *link, *old; 218 struct rtnl_link __rcu **tab; 219 int msgindex; 220 int ret = -ENOBUFS; 221 222 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 223 msgindex = rtm_msgindex(msgtype); 224 225 rtnl_lock(); 226 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 227 if (tab == NULL) { 228 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 229 if (!tab) 230 goto unlock; 231 232 /* ensures we see the 0 stores */ 233 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 234 } 235 236 old = rtnl_dereference(tab[msgindex]); 237 if (old) { 238 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 239 if (!link) 240 goto unlock; 241 } else { 242 link = kzalloc(sizeof(*link), GFP_KERNEL); 243 if (!link) 244 goto unlock; 245 } 246 247 WARN_ON(link->owner && link->owner != owner); 248 link->owner = owner; 249 250 WARN_ON(doit && link->doit && link->doit != doit); 251 if (doit) 252 link->doit = doit; 253 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 254 if (dumpit) 255 link->dumpit = dumpit; 256 257 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && 258 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); 259 link->flags |= flags; 260 261 /* publish protocol:msgtype */ 262 rcu_assign_pointer(tab[msgindex], link); 263 ret = 0; 264 if (old) 265 kfree_rcu(old, rcu); 266 unlock: 267 rtnl_unlock(); 268 return ret; 269 } 270 271 /** 272 * rtnl_register_module - Register a rtnetlink message type 273 * 274 * @owner: module registering the hook (THIS_MODULE) 275 * @protocol: Protocol family or PF_UNSPEC 276 * @msgtype: rtnetlink message type 277 * @doit: Function pointer called for each request message 278 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 279 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 280 * 281 * Like rtnl_register, but for use by removable modules. 282 */ 283 int rtnl_register_module(struct module *owner, 284 int protocol, int msgtype, 285 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 286 unsigned int flags) 287 { 288 return rtnl_register_internal(owner, protocol, msgtype, 289 doit, dumpit, flags); 290 } 291 EXPORT_SYMBOL_GPL(rtnl_register_module); 292 293 /** 294 * rtnl_register - Register a rtnetlink message type 295 * @protocol: Protocol family or PF_UNSPEC 296 * @msgtype: rtnetlink message type 297 * @doit: Function pointer called for each request message 298 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 299 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 300 * 301 * Registers the specified function pointers (at least one of them has 302 * to be non-NULL) to be called whenever a request message for the 303 * specified protocol family and message type is received. 304 * 305 * The special protocol family PF_UNSPEC may be used to define fallback 306 * function pointers for the case when no entry for the specific protocol 307 * family exists. 308 */ 309 void rtnl_register(int protocol, int msgtype, 310 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 311 unsigned int flags) 312 { 313 int err; 314 315 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 316 flags); 317 if (err) 318 pr_err("Unable to register rtnetlink message handler, " 319 "protocol = %d, message type = %d\n", protocol, msgtype); 320 } 321 322 /** 323 * rtnl_unregister - Unregister a rtnetlink message type 324 * @protocol: Protocol family or PF_UNSPEC 325 * @msgtype: rtnetlink message type 326 * 327 * Returns 0 on success or a negative error code. 328 */ 329 int rtnl_unregister(int protocol, int msgtype) 330 { 331 struct rtnl_link __rcu **tab; 332 struct rtnl_link *link; 333 int msgindex; 334 335 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 336 msgindex = rtm_msgindex(msgtype); 337 338 rtnl_lock(); 339 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 340 if (!tab) { 341 rtnl_unlock(); 342 return -ENOENT; 343 } 344 345 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); 346 rtnl_unlock(); 347 348 kfree_rcu(link, rcu); 349 350 return 0; 351 } 352 EXPORT_SYMBOL_GPL(rtnl_unregister); 353 354 /** 355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 356 * @protocol : Protocol family or PF_UNSPEC 357 * 358 * Identical to calling rtnl_unregster() for all registered message types 359 * of a certain protocol family. 360 */ 361 void rtnl_unregister_all(int protocol) 362 { 363 struct rtnl_link __rcu **tab; 364 struct rtnl_link *link; 365 int msgindex; 366 367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 368 369 rtnl_lock(); 370 tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL); 371 if (!tab) { 372 rtnl_unlock(); 373 return; 374 } 375 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 376 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); 377 kfree_rcu(link, rcu); 378 } 379 rtnl_unlock(); 380 381 synchronize_net(); 382 383 kfree(tab); 384 } 385 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 386 387 static LIST_HEAD(link_ops); 388 389 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 390 { 391 const struct rtnl_link_ops *ops; 392 393 list_for_each_entry(ops, &link_ops, list) { 394 if (!strcmp(ops->kind, kind)) 395 return ops; 396 } 397 return NULL; 398 } 399 400 /** 401 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 402 * @ops: struct rtnl_link_ops * to register 403 * 404 * The caller must hold the rtnl_mutex. This function should be used 405 * by drivers that create devices during module initialization. It 406 * must be called before registering the devices. 407 * 408 * Returns 0 on success or a negative error code. 409 */ 410 int __rtnl_link_register(struct rtnl_link_ops *ops) 411 { 412 if (rtnl_link_ops_get(ops->kind)) 413 return -EEXIST; 414 415 /* The check for alloc/setup is here because if ops 416 * does not have that filled up, it is not possible 417 * to use the ops for creating device. So do not 418 * fill up dellink as well. That disables rtnl_dellink. 419 */ 420 if ((ops->alloc || ops->setup) && !ops->dellink) 421 ops->dellink = unregister_netdevice_queue; 422 423 list_add_tail(&ops->list, &link_ops); 424 return 0; 425 } 426 EXPORT_SYMBOL_GPL(__rtnl_link_register); 427 428 /** 429 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 430 * @ops: struct rtnl_link_ops * to register 431 * 432 * Returns 0 on success or a negative error code. 433 */ 434 int rtnl_link_register(struct rtnl_link_ops *ops) 435 { 436 int err; 437 438 /* Sanity-check max sizes to avoid stack buffer overflow. */ 439 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 440 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 441 return -EINVAL; 442 443 rtnl_lock(); 444 err = __rtnl_link_register(ops); 445 rtnl_unlock(); 446 return err; 447 } 448 EXPORT_SYMBOL_GPL(rtnl_link_register); 449 450 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 451 { 452 struct net_device *dev; 453 LIST_HEAD(list_kill); 454 455 for_each_netdev(net, dev) { 456 if (dev->rtnl_link_ops == ops) 457 ops->dellink(dev, &list_kill); 458 } 459 unregister_netdevice_many(&list_kill); 460 } 461 462 /** 463 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 464 * @ops: struct rtnl_link_ops * to unregister 465 * 466 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 467 * integrity (hold pernet_ops_rwsem for writing to close the race 468 * with setup_net() and cleanup_net()). 469 */ 470 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 471 { 472 struct net *net; 473 474 for_each_net(net) { 475 __rtnl_kill_links(net, ops); 476 } 477 list_del(&ops->list); 478 } 479 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 480 481 /* Return with the rtnl_lock held when there are no network 482 * devices unregistering in any network namespace. 483 */ 484 static void rtnl_lock_unregistering_all(void) 485 { 486 DEFINE_WAIT_FUNC(wait, woken_wake_function); 487 488 add_wait_queue(&netdev_unregistering_wq, &wait); 489 for (;;) { 490 rtnl_lock(); 491 /* We held write locked pernet_ops_rwsem, and parallel 492 * setup_net() and cleanup_net() are not possible. 493 */ 494 if (!atomic_read(&dev_unreg_count)) 495 break; 496 __rtnl_unlock(); 497 498 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 499 } 500 remove_wait_queue(&netdev_unregistering_wq, &wait); 501 } 502 503 /** 504 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 505 * @ops: struct rtnl_link_ops * to unregister 506 */ 507 void rtnl_link_unregister(struct rtnl_link_ops *ops) 508 { 509 /* Close the race with setup_net() and cleanup_net() */ 510 down_write(&pernet_ops_rwsem); 511 rtnl_lock_unregistering_all(); 512 __rtnl_link_unregister(ops); 513 rtnl_unlock(); 514 up_write(&pernet_ops_rwsem); 515 } 516 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 517 518 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 519 { 520 struct net_device *master_dev; 521 const struct rtnl_link_ops *ops; 522 size_t size = 0; 523 524 rcu_read_lock(); 525 526 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 527 if (!master_dev) 528 goto out; 529 530 ops = master_dev->rtnl_link_ops; 531 if (!ops || !ops->get_slave_size) 532 goto out; 533 /* IFLA_INFO_SLAVE_DATA + nested data */ 534 size = nla_total_size(sizeof(struct nlattr)) + 535 ops->get_slave_size(master_dev, dev); 536 537 out: 538 rcu_read_unlock(); 539 return size; 540 } 541 542 static size_t rtnl_link_get_size(const struct net_device *dev) 543 { 544 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 545 size_t size; 546 547 if (!ops) 548 return 0; 549 550 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 551 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 552 553 if (ops->get_size) 554 /* IFLA_INFO_DATA + nested data */ 555 size += nla_total_size(sizeof(struct nlattr)) + 556 ops->get_size(dev); 557 558 if (ops->get_xstats_size) 559 /* IFLA_INFO_XSTATS */ 560 size += nla_total_size(ops->get_xstats_size(dev)); 561 562 size += rtnl_link_get_slave_info_data_size(dev); 563 564 return size; 565 } 566 567 static LIST_HEAD(rtnl_af_ops); 568 569 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 570 { 571 const struct rtnl_af_ops *ops; 572 573 ASSERT_RTNL(); 574 575 list_for_each_entry(ops, &rtnl_af_ops, list) { 576 if (ops->family == family) 577 return ops; 578 } 579 580 return NULL; 581 } 582 583 /** 584 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 585 * @ops: struct rtnl_af_ops * to register 586 * 587 * Returns 0 on success or a negative error code. 588 */ 589 void rtnl_af_register(struct rtnl_af_ops *ops) 590 { 591 rtnl_lock(); 592 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 593 rtnl_unlock(); 594 } 595 EXPORT_SYMBOL_GPL(rtnl_af_register); 596 597 /** 598 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 599 * @ops: struct rtnl_af_ops * to unregister 600 */ 601 void rtnl_af_unregister(struct rtnl_af_ops *ops) 602 { 603 rtnl_lock(); 604 list_del_rcu(&ops->list); 605 rtnl_unlock(); 606 607 synchronize_rcu(); 608 } 609 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 610 611 static size_t rtnl_link_get_af_size(const struct net_device *dev, 612 u32 ext_filter_mask) 613 { 614 struct rtnl_af_ops *af_ops; 615 size_t size; 616 617 /* IFLA_AF_SPEC */ 618 size = nla_total_size(sizeof(struct nlattr)); 619 620 rcu_read_lock(); 621 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 622 if (af_ops->get_link_af_size) { 623 /* AF_* + nested data */ 624 size += nla_total_size(sizeof(struct nlattr)) + 625 af_ops->get_link_af_size(dev, ext_filter_mask); 626 } 627 } 628 rcu_read_unlock(); 629 630 return size; 631 } 632 633 static bool rtnl_have_link_slave_info(const struct net_device *dev) 634 { 635 struct net_device *master_dev; 636 bool ret = false; 637 638 rcu_read_lock(); 639 640 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 641 if (master_dev && master_dev->rtnl_link_ops) 642 ret = true; 643 rcu_read_unlock(); 644 return ret; 645 } 646 647 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 648 const struct net_device *dev) 649 { 650 struct net_device *master_dev; 651 const struct rtnl_link_ops *ops; 652 struct nlattr *slave_data; 653 int err; 654 655 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 656 if (!master_dev) 657 return 0; 658 ops = master_dev->rtnl_link_ops; 659 if (!ops) 660 return 0; 661 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 662 return -EMSGSIZE; 663 if (ops->fill_slave_info) { 664 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 665 if (!slave_data) 666 return -EMSGSIZE; 667 err = ops->fill_slave_info(skb, master_dev, dev); 668 if (err < 0) 669 goto err_cancel_slave_data; 670 nla_nest_end(skb, slave_data); 671 } 672 return 0; 673 674 err_cancel_slave_data: 675 nla_nest_cancel(skb, slave_data); 676 return err; 677 } 678 679 static int rtnl_link_info_fill(struct sk_buff *skb, 680 const struct net_device *dev) 681 { 682 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 683 struct nlattr *data; 684 int err; 685 686 if (!ops) 687 return 0; 688 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 689 return -EMSGSIZE; 690 if (ops->fill_xstats) { 691 err = ops->fill_xstats(skb, dev); 692 if (err < 0) 693 return err; 694 } 695 if (ops->fill_info) { 696 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 697 if (data == NULL) 698 return -EMSGSIZE; 699 err = ops->fill_info(skb, dev); 700 if (err < 0) 701 goto err_cancel_data; 702 nla_nest_end(skb, data); 703 } 704 return 0; 705 706 err_cancel_data: 707 nla_nest_cancel(skb, data); 708 return err; 709 } 710 711 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 712 { 713 struct nlattr *linkinfo; 714 int err = -EMSGSIZE; 715 716 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 717 if (linkinfo == NULL) 718 goto out; 719 720 err = rtnl_link_info_fill(skb, dev); 721 if (err < 0) 722 goto err_cancel_link; 723 724 err = rtnl_link_slave_info_fill(skb, dev); 725 if (err < 0) 726 goto err_cancel_link; 727 728 nla_nest_end(skb, linkinfo); 729 return 0; 730 731 err_cancel_link: 732 nla_nest_cancel(skb, linkinfo); 733 out: 734 return err; 735 } 736 737 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 738 { 739 struct sock *rtnl = net->rtnl; 740 741 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); 742 } 743 744 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 745 { 746 struct sock *rtnl = net->rtnl; 747 748 return nlmsg_unicast(rtnl, skb, pid); 749 } 750 EXPORT_SYMBOL(rtnl_unicast); 751 752 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 753 const struct nlmsghdr *nlh, gfp_t flags) 754 { 755 struct sock *rtnl = net->rtnl; 756 757 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); 758 } 759 EXPORT_SYMBOL(rtnl_notify); 760 761 void rtnl_set_sk_err(struct net *net, u32 group, int error) 762 { 763 struct sock *rtnl = net->rtnl; 764 765 netlink_set_err(rtnl, 0, group, error); 766 } 767 EXPORT_SYMBOL(rtnl_set_sk_err); 768 769 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 770 { 771 struct nlattr *mx; 772 int i, valid = 0; 773 774 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 775 if (metrics == dst_default_metrics.metrics) 776 return 0; 777 778 mx = nla_nest_start_noflag(skb, RTA_METRICS); 779 if (mx == NULL) 780 return -ENOBUFS; 781 782 for (i = 0; i < RTAX_MAX; i++) { 783 if (metrics[i]) { 784 if (i == RTAX_CC_ALGO - 1) { 785 char tmp[TCP_CA_NAME_MAX], *name; 786 787 name = tcp_ca_get_name_by_key(metrics[i], tmp); 788 if (!name) 789 continue; 790 if (nla_put_string(skb, i + 1, name)) 791 goto nla_put_failure; 792 } else if (i == RTAX_FEATURES - 1) { 793 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 794 795 if (!user_features) 796 continue; 797 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 798 if (nla_put_u32(skb, i + 1, user_features)) 799 goto nla_put_failure; 800 } else { 801 if (nla_put_u32(skb, i + 1, metrics[i])) 802 goto nla_put_failure; 803 } 804 valid++; 805 } 806 } 807 808 if (!valid) { 809 nla_nest_cancel(skb, mx); 810 return 0; 811 } 812 813 return nla_nest_end(skb, mx); 814 815 nla_put_failure: 816 nla_nest_cancel(skb, mx); 817 return -EMSGSIZE; 818 } 819 EXPORT_SYMBOL(rtnetlink_put_metrics); 820 821 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 822 long expires, u32 error) 823 { 824 struct rta_cacheinfo ci = { 825 .rta_error = error, 826 .rta_id = id, 827 }; 828 829 if (dst) { 830 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 831 ci.rta_used = dst->__use; 832 ci.rta_clntref = rcuref_read(&dst->__rcuref); 833 } 834 if (expires) { 835 unsigned long clock; 836 837 clock = jiffies_to_clock_t(abs(expires)); 838 clock = min_t(unsigned long, clock, INT_MAX); 839 ci.rta_expires = (expires > 0) ? clock : -clock; 840 } 841 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 842 } 843 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 844 845 static void set_operstate(struct net_device *dev, unsigned char transition) 846 { 847 unsigned char operstate = dev->operstate; 848 849 switch (transition) { 850 case IF_OPER_UP: 851 if ((operstate == IF_OPER_DORMANT || 852 operstate == IF_OPER_TESTING || 853 operstate == IF_OPER_UNKNOWN) && 854 !netif_dormant(dev) && !netif_testing(dev)) 855 operstate = IF_OPER_UP; 856 break; 857 858 case IF_OPER_TESTING: 859 if (netif_oper_up(dev)) 860 operstate = IF_OPER_TESTING; 861 break; 862 863 case IF_OPER_DORMANT: 864 if (netif_oper_up(dev)) 865 operstate = IF_OPER_DORMANT; 866 break; 867 } 868 869 if (dev->operstate != operstate) { 870 write_lock(&dev_base_lock); 871 dev->operstate = operstate; 872 write_unlock(&dev_base_lock); 873 netdev_state_change(dev); 874 } 875 } 876 877 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 878 { 879 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 880 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 881 } 882 883 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 884 const struct ifinfomsg *ifm) 885 { 886 unsigned int flags = ifm->ifi_flags; 887 888 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 889 if (ifm->ifi_change) 890 flags = (flags & ifm->ifi_change) | 891 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 892 893 return flags; 894 } 895 896 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 897 const struct rtnl_link_stats64 *b) 898 { 899 a->rx_packets = b->rx_packets; 900 a->tx_packets = b->tx_packets; 901 a->rx_bytes = b->rx_bytes; 902 a->tx_bytes = b->tx_bytes; 903 a->rx_errors = b->rx_errors; 904 a->tx_errors = b->tx_errors; 905 a->rx_dropped = b->rx_dropped; 906 a->tx_dropped = b->tx_dropped; 907 908 a->multicast = b->multicast; 909 a->collisions = b->collisions; 910 911 a->rx_length_errors = b->rx_length_errors; 912 a->rx_over_errors = b->rx_over_errors; 913 a->rx_crc_errors = b->rx_crc_errors; 914 a->rx_frame_errors = b->rx_frame_errors; 915 a->rx_fifo_errors = b->rx_fifo_errors; 916 a->rx_missed_errors = b->rx_missed_errors; 917 918 a->tx_aborted_errors = b->tx_aborted_errors; 919 a->tx_carrier_errors = b->tx_carrier_errors; 920 a->tx_fifo_errors = b->tx_fifo_errors; 921 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 922 a->tx_window_errors = b->tx_window_errors; 923 924 a->rx_compressed = b->rx_compressed; 925 a->tx_compressed = b->tx_compressed; 926 927 a->rx_nohandler = b->rx_nohandler; 928 } 929 930 /* All VF info */ 931 static inline int rtnl_vfinfo_size(const struct net_device *dev, 932 u32 ext_filter_mask) 933 { 934 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 935 int num_vfs = dev_num_vf(dev->dev.parent); 936 size_t size = nla_total_size(0); 937 size += num_vfs * 938 (nla_total_size(0) + 939 nla_total_size(sizeof(struct ifla_vf_mac)) + 940 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 941 nla_total_size(sizeof(struct ifla_vf_vlan)) + 942 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 943 nla_total_size(MAX_VLAN_LIST_LEN * 944 sizeof(struct ifla_vf_vlan_info)) + 945 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 946 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 947 nla_total_size(sizeof(struct ifla_vf_rate)) + 948 nla_total_size(sizeof(struct ifla_vf_link_state)) + 949 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 950 nla_total_size(sizeof(struct ifla_vf_trust))); 951 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 952 size += num_vfs * 953 (nla_total_size(0) + /* nest IFLA_VF_STATS */ 954 /* IFLA_VF_STATS_RX_PACKETS */ 955 nla_total_size_64bit(sizeof(__u64)) + 956 /* IFLA_VF_STATS_TX_PACKETS */ 957 nla_total_size_64bit(sizeof(__u64)) + 958 /* IFLA_VF_STATS_RX_BYTES */ 959 nla_total_size_64bit(sizeof(__u64)) + 960 /* IFLA_VF_STATS_TX_BYTES */ 961 nla_total_size_64bit(sizeof(__u64)) + 962 /* IFLA_VF_STATS_BROADCAST */ 963 nla_total_size_64bit(sizeof(__u64)) + 964 /* IFLA_VF_STATS_MULTICAST */ 965 nla_total_size_64bit(sizeof(__u64)) + 966 /* IFLA_VF_STATS_RX_DROPPED */ 967 nla_total_size_64bit(sizeof(__u64)) + 968 /* IFLA_VF_STATS_TX_DROPPED */ 969 nla_total_size_64bit(sizeof(__u64))); 970 } 971 return size; 972 } else 973 return 0; 974 } 975 976 static size_t rtnl_port_size(const struct net_device *dev, 977 u32 ext_filter_mask) 978 { 979 size_t port_size = nla_total_size(4) /* PORT_VF */ 980 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 981 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 982 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 983 + nla_total_size(1) /* PROT_VDP_REQUEST */ 984 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 985 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 986 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 987 + port_size; 988 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 989 + port_size; 990 991 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 992 !(ext_filter_mask & RTEXT_FILTER_VF)) 993 return 0; 994 if (dev_num_vf(dev->dev.parent)) 995 return port_self_size + vf_ports_size + 996 vf_port_size * dev_num_vf(dev->dev.parent); 997 else 998 return port_self_size; 999 } 1000 1001 static size_t rtnl_xdp_size(void) 1002 { 1003 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 1004 nla_total_size(1) + /* XDP_ATTACHED */ 1005 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 1006 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 1007 1008 return xdp_size; 1009 } 1010 1011 static size_t rtnl_prop_list_size(const struct net_device *dev) 1012 { 1013 struct netdev_name_node *name_node; 1014 size_t size; 1015 1016 if (list_empty(&dev->name_node->list)) 1017 return 0; 1018 size = nla_total_size(0); 1019 list_for_each_entry(name_node, &dev->name_node->list, list) 1020 size += nla_total_size(ALTIFNAMSIZ); 1021 return size; 1022 } 1023 1024 static size_t rtnl_proto_down_size(const struct net_device *dev) 1025 { 1026 size_t size = nla_total_size(1); 1027 1028 if (dev->proto_down_reason) 1029 size += nla_total_size(0) + nla_total_size(4); 1030 1031 return size; 1032 } 1033 1034 static size_t rtnl_devlink_port_size(const struct net_device *dev) 1035 { 1036 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */ 1037 1038 if (dev->devlink_port) 1039 size += devlink_nl_port_handle_size(dev->devlink_port); 1040 1041 return size; 1042 } 1043 1044 static size_t rtnl_dpll_pin_size(const struct net_device *dev) 1045 { 1046 size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */ 1047 1048 size += dpll_msg_pin_handle_size(netdev_dpll_pin(dev)); 1049 1050 return size; 1051 } 1052 1053 static noinline size_t if_nlmsg_size(const struct net_device *dev, 1054 u32 ext_filter_mask) 1055 { 1056 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1057 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1058 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1059 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1060 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1061 + nla_total_size(sizeof(struct rtnl_link_stats)) 1062 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1063 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1064 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1065 + nla_total_size(4) /* IFLA_TXQLEN */ 1066 + nla_total_size(4) /* IFLA_WEIGHT */ 1067 + nla_total_size(4) /* IFLA_MTU */ 1068 + nla_total_size(4) /* IFLA_LINK */ 1069 + nla_total_size(4) /* IFLA_MASTER */ 1070 + nla_total_size(1) /* IFLA_CARRIER */ 1071 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1072 + nla_total_size(4) /* IFLA_ALLMULTI */ 1073 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1074 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1075 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1076 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1077 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ 1078 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */ 1079 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */ 1080 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */ 1081 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ 1082 + nla_total_size(1) /* IFLA_OPERSTATE */ 1083 + nla_total_size(1) /* IFLA_LINKMODE */ 1084 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1085 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1086 + nla_total_size(4) /* IFLA_GROUP */ 1087 + nla_total_size(ext_filter_mask 1088 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1089 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1090 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1091 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1092 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1093 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1094 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1095 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1096 + rtnl_xdp_size() /* IFLA_XDP */ 1097 + nla_total_size(4) /* IFLA_EVENT */ 1098 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1099 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1100 + rtnl_proto_down_size(dev) /* proto down */ 1101 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1102 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1103 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1104 + nla_total_size(4) /* IFLA_MIN_MTU */ 1105 + nla_total_size(4) /* IFLA_MAX_MTU */ 1106 + rtnl_prop_list_size(dev) 1107 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ 1108 + rtnl_devlink_port_size(dev) 1109 + rtnl_dpll_pin_size(dev) 1110 + 0; 1111 } 1112 1113 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1114 { 1115 struct nlattr *vf_ports; 1116 struct nlattr *vf_port; 1117 int vf; 1118 int err; 1119 1120 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1121 if (!vf_ports) 1122 return -EMSGSIZE; 1123 1124 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1125 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1126 if (!vf_port) 1127 goto nla_put_failure; 1128 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1129 goto nla_put_failure; 1130 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1131 if (err == -EMSGSIZE) 1132 goto nla_put_failure; 1133 if (err) { 1134 nla_nest_cancel(skb, vf_port); 1135 continue; 1136 } 1137 nla_nest_end(skb, vf_port); 1138 } 1139 1140 nla_nest_end(skb, vf_ports); 1141 1142 return 0; 1143 1144 nla_put_failure: 1145 nla_nest_cancel(skb, vf_ports); 1146 return -EMSGSIZE; 1147 } 1148 1149 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1150 { 1151 struct nlattr *port_self; 1152 int err; 1153 1154 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1155 if (!port_self) 1156 return -EMSGSIZE; 1157 1158 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1159 if (err) { 1160 nla_nest_cancel(skb, port_self); 1161 return (err == -EMSGSIZE) ? err : 0; 1162 } 1163 1164 nla_nest_end(skb, port_self); 1165 1166 return 0; 1167 } 1168 1169 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1170 u32 ext_filter_mask) 1171 { 1172 int err; 1173 1174 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1175 !(ext_filter_mask & RTEXT_FILTER_VF)) 1176 return 0; 1177 1178 err = rtnl_port_self_fill(skb, dev); 1179 if (err) 1180 return err; 1181 1182 if (dev_num_vf(dev->dev.parent)) { 1183 err = rtnl_vf_ports_fill(skb, dev); 1184 if (err) 1185 return err; 1186 } 1187 1188 return 0; 1189 } 1190 1191 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1192 { 1193 int err; 1194 struct netdev_phys_item_id ppid; 1195 1196 err = dev_get_phys_port_id(dev, &ppid); 1197 if (err) { 1198 if (err == -EOPNOTSUPP) 1199 return 0; 1200 return err; 1201 } 1202 1203 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1204 return -EMSGSIZE; 1205 1206 return 0; 1207 } 1208 1209 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1210 { 1211 char name[IFNAMSIZ]; 1212 int err; 1213 1214 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1215 if (err) { 1216 if (err == -EOPNOTSUPP) 1217 return 0; 1218 return err; 1219 } 1220 1221 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1222 return -EMSGSIZE; 1223 1224 return 0; 1225 } 1226 1227 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1228 { 1229 struct netdev_phys_item_id ppid = { }; 1230 int err; 1231 1232 err = dev_get_port_parent_id(dev, &ppid, false); 1233 if (err) { 1234 if (err == -EOPNOTSUPP) 1235 return 0; 1236 return err; 1237 } 1238 1239 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1240 return -EMSGSIZE; 1241 1242 return 0; 1243 } 1244 1245 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1246 struct net_device *dev) 1247 { 1248 struct rtnl_link_stats64 *sp; 1249 struct nlattr *attr; 1250 1251 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1252 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1253 if (!attr) 1254 return -EMSGSIZE; 1255 1256 sp = nla_data(attr); 1257 dev_get_stats(dev, sp); 1258 1259 attr = nla_reserve(skb, IFLA_STATS, 1260 sizeof(struct rtnl_link_stats)); 1261 if (!attr) 1262 return -EMSGSIZE; 1263 1264 copy_rtnl_link_stats(nla_data(attr), sp); 1265 1266 return 0; 1267 } 1268 1269 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1270 struct net_device *dev, 1271 int vfs_num, 1272 u32 ext_filter_mask) 1273 { 1274 struct ifla_vf_rss_query_en vf_rss_query_en; 1275 struct nlattr *vf, *vfstats, *vfvlanlist; 1276 struct ifla_vf_link_state vf_linkstate; 1277 struct ifla_vf_vlan_info vf_vlan_info; 1278 struct ifla_vf_spoofchk vf_spoofchk; 1279 struct ifla_vf_tx_rate vf_tx_rate; 1280 struct ifla_vf_stats vf_stats; 1281 struct ifla_vf_trust vf_trust; 1282 struct ifla_vf_vlan vf_vlan; 1283 struct ifla_vf_rate vf_rate; 1284 struct ifla_vf_mac vf_mac; 1285 struct ifla_vf_broadcast vf_broadcast; 1286 struct ifla_vf_info ivi; 1287 struct ifla_vf_guid node_guid; 1288 struct ifla_vf_guid port_guid; 1289 1290 memset(&ivi, 0, sizeof(ivi)); 1291 1292 /* Not all SR-IOV capable drivers support the 1293 * spoofcheck and "RSS query enable" query. Preset to 1294 * -1 so the user space tool can detect that the driver 1295 * didn't report anything. 1296 */ 1297 ivi.spoofchk = -1; 1298 ivi.rss_query_en = -1; 1299 ivi.trusted = -1; 1300 /* The default value for VF link state is "auto" 1301 * IFLA_VF_LINK_STATE_AUTO which equals zero 1302 */ 1303 ivi.linkstate = 0; 1304 /* VLAN Protocol by default is 802.1Q */ 1305 ivi.vlan_proto = htons(ETH_P_8021Q); 1306 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1307 return 0; 1308 1309 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1310 memset(&node_guid, 0, sizeof(node_guid)); 1311 memset(&port_guid, 0, sizeof(port_guid)); 1312 1313 vf_mac.vf = 1314 vf_vlan.vf = 1315 vf_vlan_info.vf = 1316 vf_rate.vf = 1317 vf_tx_rate.vf = 1318 vf_spoofchk.vf = 1319 vf_linkstate.vf = 1320 vf_rss_query_en.vf = 1321 vf_trust.vf = 1322 node_guid.vf = 1323 port_guid.vf = ivi.vf; 1324 1325 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1326 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1327 vf_vlan.vlan = ivi.vlan; 1328 vf_vlan.qos = ivi.qos; 1329 vf_vlan_info.vlan = ivi.vlan; 1330 vf_vlan_info.qos = ivi.qos; 1331 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1332 vf_tx_rate.rate = ivi.max_tx_rate; 1333 vf_rate.min_tx_rate = ivi.min_tx_rate; 1334 vf_rate.max_tx_rate = ivi.max_tx_rate; 1335 vf_spoofchk.setting = ivi.spoofchk; 1336 vf_linkstate.link_state = ivi.linkstate; 1337 vf_rss_query_en.setting = ivi.rss_query_en; 1338 vf_trust.setting = ivi.trusted; 1339 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1340 if (!vf) 1341 return -EMSGSIZE; 1342 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1343 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1344 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1345 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1346 &vf_rate) || 1347 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1348 &vf_tx_rate) || 1349 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1350 &vf_spoofchk) || 1351 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1352 &vf_linkstate) || 1353 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1354 sizeof(vf_rss_query_en), 1355 &vf_rss_query_en) || 1356 nla_put(skb, IFLA_VF_TRUST, 1357 sizeof(vf_trust), &vf_trust)) 1358 goto nla_put_vf_failure; 1359 1360 if (dev->netdev_ops->ndo_get_vf_guid && 1361 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1362 &port_guid)) { 1363 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1364 &node_guid) || 1365 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1366 &port_guid)) 1367 goto nla_put_vf_failure; 1368 } 1369 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1370 if (!vfvlanlist) 1371 goto nla_put_vf_failure; 1372 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1373 &vf_vlan_info)) { 1374 nla_nest_cancel(skb, vfvlanlist); 1375 goto nla_put_vf_failure; 1376 } 1377 nla_nest_end(skb, vfvlanlist); 1378 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 1379 memset(&vf_stats, 0, sizeof(vf_stats)); 1380 if (dev->netdev_ops->ndo_get_vf_stats) 1381 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1382 &vf_stats); 1383 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1384 if (!vfstats) 1385 goto nla_put_vf_failure; 1386 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1387 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1388 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1389 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1390 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1391 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1392 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1393 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1394 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1395 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1396 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1397 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1398 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1399 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1400 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1401 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1402 nla_nest_cancel(skb, vfstats); 1403 goto nla_put_vf_failure; 1404 } 1405 nla_nest_end(skb, vfstats); 1406 } 1407 nla_nest_end(skb, vf); 1408 return 0; 1409 1410 nla_put_vf_failure: 1411 nla_nest_cancel(skb, vf); 1412 return -EMSGSIZE; 1413 } 1414 1415 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1416 struct net_device *dev, 1417 u32 ext_filter_mask) 1418 { 1419 struct nlattr *vfinfo; 1420 int i, num_vfs; 1421 1422 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1423 return 0; 1424 1425 num_vfs = dev_num_vf(dev->dev.parent); 1426 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1427 return -EMSGSIZE; 1428 1429 if (!dev->netdev_ops->ndo_get_vf_config) 1430 return 0; 1431 1432 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1433 if (!vfinfo) 1434 return -EMSGSIZE; 1435 1436 for (i = 0; i < num_vfs; i++) { 1437 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) { 1438 nla_nest_cancel(skb, vfinfo); 1439 return -EMSGSIZE; 1440 } 1441 } 1442 1443 nla_nest_end(skb, vfinfo); 1444 return 0; 1445 } 1446 1447 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1448 { 1449 struct rtnl_link_ifmap map; 1450 1451 memset(&map, 0, sizeof(map)); 1452 map.mem_start = dev->mem_start; 1453 map.mem_end = dev->mem_end; 1454 map.base_addr = dev->base_addr; 1455 map.irq = dev->irq; 1456 map.dma = dev->dma; 1457 map.port = dev->if_port; 1458 1459 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1460 return -EMSGSIZE; 1461 1462 return 0; 1463 } 1464 1465 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1466 { 1467 const struct bpf_prog *generic_xdp_prog; 1468 1469 ASSERT_RTNL(); 1470 1471 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1472 if (!generic_xdp_prog) 1473 return 0; 1474 return generic_xdp_prog->aux->id; 1475 } 1476 1477 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1478 { 1479 return dev_xdp_prog_id(dev, XDP_MODE_DRV); 1480 } 1481 1482 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1483 { 1484 return dev_xdp_prog_id(dev, XDP_MODE_HW); 1485 } 1486 1487 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1488 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1489 u32 (*get_prog_id)(struct net_device *dev)) 1490 { 1491 u32 curr_id; 1492 int err; 1493 1494 curr_id = get_prog_id(dev); 1495 if (!curr_id) 1496 return 0; 1497 1498 *prog_id = curr_id; 1499 err = nla_put_u32(skb, attr, curr_id); 1500 if (err) 1501 return err; 1502 1503 if (*mode != XDP_ATTACHED_NONE) 1504 *mode = XDP_ATTACHED_MULTI; 1505 else 1506 *mode = tgt_mode; 1507 1508 return 0; 1509 } 1510 1511 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1512 { 1513 struct nlattr *xdp; 1514 u32 prog_id; 1515 int err; 1516 u8 mode; 1517 1518 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1519 if (!xdp) 1520 return -EMSGSIZE; 1521 1522 prog_id = 0; 1523 mode = XDP_ATTACHED_NONE; 1524 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1525 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1526 if (err) 1527 goto err_cancel; 1528 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1529 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1530 if (err) 1531 goto err_cancel; 1532 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1533 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1534 if (err) 1535 goto err_cancel; 1536 1537 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1538 if (err) 1539 goto err_cancel; 1540 1541 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1542 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1543 if (err) 1544 goto err_cancel; 1545 } 1546 1547 nla_nest_end(skb, xdp); 1548 return 0; 1549 1550 err_cancel: 1551 nla_nest_cancel(skb, xdp); 1552 return err; 1553 } 1554 1555 static u32 rtnl_get_event(unsigned long event) 1556 { 1557 u32 rtnl_event_type = IFLA_EVENT_NONE; 1558 1559 switch (event) { 1560 case NETDEV_REBOOT: 1561 rtnl_event_type = IFLA_EVENT_REBOOT; 1562 break; 1563 case NETDEV_FEAT_CHANGE: 1564 rtnl_event_type = IFLA_EVENT_FEATURES; 1565 break; 1566 case NETDEV_BONDING_FAILOVER: 1567 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1568 break; 1569 case NETDEV_NOTIFY_PEERS: 1570 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1571 break; 1572 case NETDEV_RESEND_IGMP: 1573 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1574 break; 1575 case NETDEV_CHANGEINFODATA: 1576 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1577 break; 1578 default: 1579 break; 1580 } 1581 1582 return rtnl_event_type; 1583 } 1584 1585 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1586 { 1587 const struct net_device *upper_dev; 1588 int ret = 0; 1589 1590 rcu_read_lock(); 1591 1592 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1593 if (upper_dev) 1594 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1595 1596 rcu_read_unlock(); 1597 return ret; 1598 } 1599 1600 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1601 bool force) 1602 { 1603 int ifindex = dev_get_iflink(dev); 1604 1605 if (force || dev->ifindex != ifindex) 1606 return nla_put_u32(skb, IFLA_LINK, ifindex); 1607 1608 return 0; 1609 } 1610 1611 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1612 struct net_device *dev) 1613 { 1614 char buf[IFALIASZ]; 1615 int ret; 1616 1617 ret = dev_get_alias(dev, buf, sizeof(buf)); 1618 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1619 } 1620 1621 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1622 const struct net_device *dev, 1623 struct net *src_net, gfp_t gfp) 1624 { 1625 bool put_iflink = false; 1626 1627 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1628 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1629 1630 if (!net_eq(dev_net(dev), link_net)) { 1631 int id = peernet2id_alloc(src_net, link_net, gfp); 1632 1633 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1634 return -EMSGSIZE; 1635 1636 put_iflink = true; 1637 } 1638 } 1639 1640 return nla_put_iflink(skb, dev, put_iflink); 1641 } 1642 1643 static int rtnl_fill_link_af(struct sk_buff *skb, 1644 const struct net_device *dev, 1645 u32 ext_filter_mask) 1646 { 1647 const struct rtnl_af_ops *af_ops; 1648 struct nlattr *af_spec; 1649 1650 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1651 if (!af_spec) 1652 return -EMSGSIZE; 1653 1654 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1655 struct nlattr *af; 1656 int err; 1657 1658 if (!af_ops->fill_link_af) 1659 continue; 1660 1661 af = nla_nest_start_noflag(skb, af_ops->family); 1662 if (!af) 1663 return -EMSGSIZE; 1664 1665 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1666 /* 1667 * Caller may return ENODATA to indicate that there 1668 * was no data to be dumped. This is not an error, it 1669 * means we should trim the attribute header and 1670 * continue. 1671 */ 1672 if (err == -ENODATA) 1673 nla_nest_cancel(skb, af); 1674 else if (err < 0) 1675 return -EMSGSIZE; 1676 1677 nla_nest_end(skb, af); 1678 } 1679 1680 nla_nest_end(skb, af_spec); 1681 return 0; 1682 } 1683 1684 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1685 const struct net_device *dev) 1686 { 1687 struct netdev_name_node *name_node; 1688 int count = 0; 1689 1690 list_for_each_entry(name_node, &dev->name_node->list, list) { 1691 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1692 return -EMSGSIZE; 1693 count++; 1694 } 1695 return count; 1696 } 1697 1698 static int rtnl_fill_prop_list(struct sk_buff *skb, 1699 const struct net_device *dev) 1700 { 1701 struct nlattr *prop_list; 1702 int ret; 1703 1704 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1705 if (!prop_list) 1706 return -EMSGSIZE; 1707 1708 ret = rtnl_fill_alt_ifnames(skb, dev); 1709 if (ret <= 0) 1710 goto nest_cancel; 1711 1712 nla_nest_end(skb, prop_list); 1713 return 0; 1714 1715 nest_cancel: 1716 nla_nest_cancel(skb, prop_list); 1717 return ret; 1718 } 1719 1720 static int rtnl_fill_proto_down(struct sk_buff *skb, 1721 const struct net_device *dev) 1722 { 1723 struct nlattr *pr; 1724 u32 preason; 1725 1726 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1727 goto nla_put_failure; 1728 1729 preason = dev->proto_down_reason; 1730 if (!preason) 1731 return 0; 1732 1733 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); 1734 if (!pr) 1735 return -EMSGSIZE; 1736 1737 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { 1738 nla_nest_cancel(skb, pr); 1739 goto nla_put_failure; 1740 } 1741 1742 nla_nest_end(skb, pr); 1743 return 0; 1744 1745 nla_put_failure: 1746 return -EMSGSIZE; 1747 } 1748 1749 static int rtnl_fill_devlink_port(struct sk_buff *skb, 1750 const struct net_device *dev) 1751 { 1752 struct nlattr *devlink_port_nest; 1753 int ret; 1754 1755 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT); 1756 if (!devlink_port_nest) 1757 return -EMSGSIZE; 1758 1759 if (dev->devlink_port) { 1760 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port); 1761 if (ret < 0) 1762 goto nest_cancel; 1763 } 1764 1765 nla_nest_end(skb, devlink_port_nest); 1766 return 0; 1767 1768 nest_cancel: 1769 nla_nest_cancel(skb, devlink_port_nest); 1770 return ret; 1771 } 1772 1773 static int rtnl_fill_dpll_pin(struct sk_buff *skb, 1774 const struct net_device *dev) 1775 { 1776 struct nlattr *dpll_pin_nest; 1777 int ret; 1778 1779 dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN); 1780 if (!dpll_pin_nest) 1781 return -EMSGSIZE; 1782 1783 ret = dpll_msg_add_pin_handle(skb, netdev_dpll_pin(dev)); 1784 if (ret < 0) 1785 goto nest_cancel; 1786 1787 nla_nest_end(skb, dpll_pin_nest); 1788 return 0; 1789 1790 nest_cancel: 1791 nla_nest_cancel(skb, dpll_pin_nest); 1792 return ret; 1793 } 1794 1795 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1796 struct net_device *dev, struct net *src_net, 1797 int type, u32 pid, u32 seq, u32 change, 1798 unsigned int flags, u32 ext_filter_mask, 1799 u32 event, int *new_nsid, int new_ifindex, 1800 int tgt_netnsid, gfp_t gfp) 1801 { 1802 struct ifinfomsg *ifm; 1803 struct nlmsghdr *nlh; 1804 struct Qdisc *qdisc; 1805 1806 ASSERT_RTNL(); 1807 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1808 if (nlh == NULL) 1809 return -EMSGSIZE; 1810 1811 ifm = nlmsg_data(nlh); 1812 ifm->ifi_family = AF_UNSPEC; 1813 ifm->__ifi_pad = 0; 1814 ifm->ifi_type = dev->type; 1815 ifm->ifi_index = dev->ifindex; 1816 ifm->ifi_flags = dev_get_flags(dev); 1817 ifm->ifi_change = change; 1818 1819 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1820 goto nla_put_failure; 1821 1822 qdisc = rtnl_dereference(dev->qdisc); 1823 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1824 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1825 nla_put_u8(skb, IFLA_OPERSTATE, 1826 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1827 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1828 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1829 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || 1830 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || 1831 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1832 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1833 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) || 1834 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1835 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1836 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1837 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) || 1838 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) || 1839 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) || 1840 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) || 1841 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) || 1842 #ifdef CONFIG_RPS 1843 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1844 #endif 1845 put_master_ifindex(skb, dev) || 1846 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1847 (qdisc && 1848 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || 1849 nla_put_ifalias(skb, dev) || 1850 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1851 atomic_read(&dev->carrier_up_count) + 1852 atomic_read(&dev->carrier_down_count)) || 1853 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1854 atomic_read(&dev->carrier_up_count)) || 1855 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1856 atomic_read(&dev->carrier_down_count))) 1857 goto nla_put_failure; 1858 1859 if (rtnl_fill_proto_down(skb, dev)) 1860 goto nla_put_failure; 1861 1862 if (event != IFLA_EVENT_NONE) { 1863 if (nla_put_u32(skb, IFLA_EVENT, event)) 1864 goto nla_put_failure; 1865 } 1866 1867 if (rtnl_fill_link_ifmap(skb, dev)) 1868 goto nla_put_failure; 1869 1870 if (dev->addr_len) { 1871 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1872 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1873 goto nla_put_failure; 1874 } 1875 1876 if (rtnl_phys_port_id_fill(skb, dev)) 1877 goto nla_put_failure; 1878 1879 if (rtnl_phys_port_name_fill(skb, dev)) 1880 goto nla_put_failure; 1881 1882 if (rtnl_phys_switch_id_fill(skb, dev)) 1883 goto nla_put_failure; 1884 1885 if (rtnl_fill_stats(skb, dev)) 1886 goto nla_put_failure; 1887 1888 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1889 goto nla_put_failure; 1890 1891 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1892 goto nla_put_failure; 1893 1894 if (rtnl_xdp_fill(skb, dev)) 1895 goto nla_put_failure; 1896 1897 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1898 if (rtnl_link_fill(skb, dev) < 0) 1899 goto nla_put_failure; 1900 } 1901 1902 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp)) 1903 goto nla_put_failure; 1904 1905 if (new_nsid && 1906 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1907 goto nla_put_failure; 1908 if (new_ifindex && 1909 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1910 goto nla_put_failure; 1911 1912 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) && 1913 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr)) 1914 goto nla_put_failure; 1915 1916 rcu_read_lock(); 1917 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1918 goto nla_put_failure_rcu; 1919 rcu_read_unlock(); 1920 1921 if (rtnl_fill_prop_list(skb, dev)) 1922 goto nla_put_failure; 1923 1924 if (dev->dev.parent && 1925 nla_put_string(skb, IFLA_PARENT_DEV_NAME, 1926 dev_name(dev->dev.parent))) 1927 goto nla_put_failure; 1928 1929 if (dev->dev.parent && dev->dev.parent->bus && 1930 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, 1931 dev->dev.parent->bus->name)) 1932 goto nla_put_failure; 1933 1934 if (rtnl_fill_devlink_port(skb, dev)) 1935 goto nla_put_failure; 1936 1937 if (rtnl_fill_dpll_pin(skb, dev)) 1938 goto nla_put_failure; 1939 1940 nlmsg_end(skb, nlh); 1941 return 0; 1942 1943 nla_put_failure_rcu: 1944 rcu_read_unlock(); 1945 nla_put_failure: 1946 nlmsg_cancel(skb, nlh); 1947 return -EMSGSIZE; 1948 } 1949 1950 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1951 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1952 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1953 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1954 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1955 [IFLA_MTU] = { .type = NLA_U32 }, 1956 [IFLA_LINK] = { .type = NLA_U32 }, 1957 [IFLA_MASTER] = { .type = NLA_U32 }, 1958 [IFLA_CARRIER] = { .type = NLA_U8 }, 1959 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1960 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1961 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1962 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1963 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1964 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1965 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1966 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1967 * allow 0-length string (needed to remove an alias). 1968 */ 1969 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1970 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1971 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1972 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1973 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1974 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1975 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1976 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1977 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1978 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1979 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 1980 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1981 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1982 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1983 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1984 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1985 [IFLA_XDP] = { .type = NLA_NESTED }, 1986 [IFLA_EVENT] = { .type = NLA_U32 }, 1987 [IFLA_GROUP] = { .type = NLA_U32 }, 1988 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 1989 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 1990 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 1991 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 1992 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 1993 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 1994 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 1995 .len = ALTIFNAMSIZ - 1 }, 1996 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, 1997 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, 1998 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 1999 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, 2000 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, 2001 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, 2002 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, 2003 [IFLA_ALLMULTI] = { .type = NLA_REJECT }, 2004 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 2005 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 2006 }; 2007 2008 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 2009 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 2010 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 2011 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 2012 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 2013 }; 2014 2015 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 2016 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 2017 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 2018 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 2019 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 2020 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 2021 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 2022 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 2023 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 2024 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 2025 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 2026 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 2027 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2028 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2029 }; 2030 2031 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 2032 [IFLA_PORT_VF] = { .type = NLA_U32 }, 2033 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 2034 .len = PORT_PROFILE_MAX }, 2035 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 2036 .len = PORT_UUID_MAX }, 2037 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 2038 .len = PORT_UUID_MAX }, 2039 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 2040 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 2041 2042 /* Unused, but we need to keep it here since user space could 2043 * fill it. It's also broken with regard to NLA_BINARY use in 2044 * combination with structs. 2045 */ 2046 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 2047 .len = sizeof(struct ifla_port_vsi) }, 2048 }; 2049 2050 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 2051 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, 2052 [IFLA_XDP_FD] = { .type = NLA_S32 }, 2053 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, 2054 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 2055 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 2056 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 2057 }; 2058 2059 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 2060 { 2061 const struct rtnl_link_ops *ops = NULL; 2062 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 2063 2064 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 2065 return NULL; 2066 2067 if (linfo[IFLA_INFO_KIND]) { 2068 char kind[MODULE_NAME_LEN]; 2069 2070 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 2071 ops = rtnl_link_ops_get(kind); 2072 } 2073 2074 return ops; 2075 } 2076 2077 static bool link_master_filtered(struct net_device *dev, int master_idx) 2078 { 2079 struct net_device *master; 2080 2081 if (!master_idx) 2082 return false; 2083 2084 master = netdev_master_upper_dev_get(dev); 2085 2086 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need 2087 * another invalid value for ifindex to denote "no master". 2088 */ 2089 if (master_idx == -1) 2090 return !!master; 2091 2092 if (!master || master->ifindex != master_idx) 2093 return true; 2094 2095 return false; 2096 } 2097 2098 static bool link_kind_filtered(const struct net_device *dev, 2099 const struct rtnl_link_ops *kind_ops) 2100 { 2101 if (kind_ops && dev->rtnl_link_ops != kind_ops) 2102 return true; 2103 2104 return false; 2105 } 2106 2107 static bool link_dump_filtered(struct net_device *dev, 2108 int master_idx, 2109 const struct rtnl_link_ops *kind_ops) 2110 { 2111 if (link_master_filtered(dev, master_idx) || 2112 link_kind_filtered(dev, kind_ops)) 2113 return true; 2114 2115 return false; 2116 } 2117 2118 /** 2119 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 2120 * @sk: netlink socket 2121 * @netnsid: network namespace identifier 2122 * 2123 * Returns the network namespace identified by netnsid on success or an error 2124 * pointer on failure. 2125 */ 2126 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 2127 { 2128 struct net *net; 2129 2130 net = get_net_ns_by_id(sock_net(sk), netnsid); 2131 if (!net) 2132 return ERR_PTR(-EINVAL); 2133 2134 /* For now, the caller is required to have CAP_NET_ADMIN in 2135 * the user namespace owning the target net ns. 2136 */ 2137 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 2138 put_net(net); 2139 return ERR_PTR(-EACCES); 2140 } 2141 return net; 2142 } 2143 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 2144 2145 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 2146 bool strict_check, struct nlattr **tb, 2147 struct netlink_ext_ack *extack) 2148 { 2149 int hdrlen; 2150 2151 if (strict_check) { 2152 struct ifinfomsg *ifm; 2153 2154 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2155 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 2156 return -EINVAL; 2157 } 2158 2159 ifm = nlmsg_data(nlh); 2160 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2161 ifm->ifi_change) { 2162 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 2163 return -EINVAL; 2164 } 2165 if (ifm->ifi_index) { 2166 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 2167 return -EINVAL; 2168 } 2169 2170 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 2171 IFLA_MAX, ifla_policy, 2172 extack); 2173 } 2174 2175 /* A hack to preserve kernel<->userspace interface. 2176 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 2177 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 2178 * what iproute2 < v3.9.0 used. 2179 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 2180 * attribute, its netlink message is shorter than struct ifinfomsg. 2181 */ 2182 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 2183 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 2184 2185 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 2186 extack); 2187 } 2188 2189 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 2190 { 2191 const struct rtnl_link_ops *kind_ops = NULL; 2192 struct netlink_ext_ack *extack = cb->extack; 2193 const struct nlmsghdr *nlh = cb->nlh; 2194 struct net *net = sock_net(skb->sk); 2195 unsigned int flags = NLM_F_MULTI; 2196 struct nlattr *tb[IFLA_MAX+1]; 2197 struct { 2198 unsigned long ifindex; 2199 } *ctx = (void *)cb->ctx; 2200 struct net *tgt_net = net; 2201 u32 ext_filter_mask = 0; 2202 struct net_device *dev; 2203 int master_idx = 0; 2204 int netnsid = -1; 2205 int err, i; 2206 2207 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2208 if (err < 0) { 2209 if (cb->strict_check) 2210 return err; 2211 2212 goto walk_entries; 2213 } 2214 2215 for (i = 0; i <= IFLA_MAX; ++i) { 2216 if (!tb[i]) 2217 continue; 2218 2219 /* new attributes should only be added with strict checking */ 2220 switch (i) { 2221 case IFLA_TARGET_NETNSID: 2222 netnsid = nla_get_s32(tb[i]); 2223 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2224 if (IS_ERR(tgt_net)) { 2225 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2226 return PTR_ERR(tgt_net); 2227 } 2228 break; 2229 case IFLA_EXT_MASK: 2230 ext_filter_mask = nla_get_u32(tb[i]); 2231 break; 2232 case IFLA_MASTER: 2233 master_idx = nla_get_u32(tb[i]); 2234 break; 2235 case IFLA_LINKINFO: 2236 kind_ops = linkinfo_to_kind_ops(tb[i]); 2237 break; 2238 default: 2239 if (cb->strict_check) { 2240 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2241 return -EINVAL; 2242 } 2243 } 2244 } 2245 2246 if (master_idx || kind_ops) 2247 flags |= NLM_F_DUMP_FILTERED; 2248 2249 walk_entries: 2250 err = 0; 2251 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) { 2252 if (link_dump_filtered(dev, master_idx, kind_ops)) 2253 continue; 2254 err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK, 2255 NETLINK_CB(cb->skb).portid, 2256 nlh->nlmsg_seq, 0, flags, 2257 ext_filter_mask, 0, NULL, 0, 2258 netnsid, GFP_KERNEL); 2259 if (err < 0) { 2260 if (likely(skb->len)) 2261 err = skb->len; 2262 break; 2263 } 2264 } 2265 cb->seq = tgt_net->dev_base_seq; 2266 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2267 if (netnsid >= 0) 2268 put_net(tgt_net); 2269 2270 return err; 2271 } 2272 2273 int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, 2274 struct netlink_ext_ack *exterr) 2275 { 2276 const struct ifinfomsg *ifmp; 2277 const struct nlattr *attrs; 2278 size_t len; 2279 2280 ifmp = nla_data(nla_peer); 2281 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); 2282 len = nla_len(nla_peer) - sizeof(struct ifinfomsg); 2283 2284 if (ifmp->ifi_index < 0) { 2285 NL_SET_ERR_MSG_ATTR(exterr, nla_peer, 2286 "ifindex can't be negative"); 2287 return -EINVAL; 2288 } 2289 2290 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, 2291 exterr); 2292 } 2293 EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); 2294 2295 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2296 { 2297 struct net *net; 2298 /* Examine the link attributes and figure out which 2299 * network namespace we are talking about. 2300 */ 2301 if (tb[IFLA_NET_NS_PID]) 2302 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2303 else if (tb[IFLA_NET_NS_FD]) 2304 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2305 else 2306 net = get_net(src_net); 2307 return net; 2308 } 2309 EXPORT_SYMBOL(rtnl_link_get_net); 2310 2311 /* Figure out which network namespace we are talking about by 2312 * examining the link attributes in the following order: 2313 * 2314 * 1. IFLA_NET_NS_PID 2315 * 2. IFLA_NET_NS_FD 2316 * 3. IFLA_TARGET_NETNSID 2317 */ 2318 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2319 struct nlattr *tb[]) 2320 { 2321 struct net *net; 2322 2323 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2324 return rtnl_link_get_net(src_net, tb); 2325 2326 if (!tb[IFLA_TARGET_NETNSID]) 2327 return get_net(src_net); 2328 2329 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2330 if (!net) 2331 return ERR_PTR(-EINVAL); 2332 2333 return net; 2334 } 2335 2336 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2337 struct net *src_net, 2338 struct nlattr *tb[], int cap) 2339 { 2340 struct net *net; 2341 2342 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2343 if (IS_ERR(net)) 2344 return net; 2345 2346 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2347 put_net(net); 2348 return ERR_PTR(-EPERM); 2349 } 2350 2351 return net; 2352 } 2353 2354 /* Verify that rtnetlink requests do not pass additional properties 2355 * potentially referring to different network namespaces. 2356 */ 2357 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2358 struct netlink_ext_ack *extack, 2359 bool netns_id_only) 2360 { 2361 2362 if (netns_id_only) { 2363 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2364 return 0; 2365 2366 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2367 return -EOPNOTSUPP; 2368 } 2369 2370 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2371 goto invalid_attr; 2372 2373 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2374 goto invalid_attr; 2375 2376 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2377 goto invalid_attr; 2378 2379 return 0; 2380 2381 invalid_attr: 2382 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2383 return -EINVAL; 2384 } 2385 2386 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2387 int max_tx_rate) 2388 { 2389 const struct net_device_ops *ops = dev->netdev_ops; 2390 2391 if (!ops->ndo_set_vf_rate) 2392 return -EOPNOTSUPP; 2393 if (max_tx_rate && max_tx_rate < min_tx_rate) 2394 return -EINVAL; 2395 2396 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate); 2397 } 2398 2399 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], 2400 struct netlink_ext_ack *extack) 2401 { 2402 if (tb[IFLA_ADDRESS] && 2403 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2404 return -EINVAL; 2405 2406 if (tb[IFLA_BROADCAST] && 2407 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2408 return -EINVAL; 2409 2410 if (tb[IFLA_GSO_MAX_SIZE] && 2411 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { 2412 NL_SET_ERR_MSG(extack, "too big gso_max_size"); 2413 return -EINVAL; 2414 } 2415 2416 if (tb[IFLA_GSO_MAX_SEGS] && 2417 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || 2418 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { 2419 NL_SET_ERR_MSG(extack, "too big gso_max_segs"); 2420 return -EINVAL; 2421 } 2422 2423 if (tb[IFLA_GRO_MAX_SIZE] && 2424 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { 2425 NL_SET_ERR_MSG(extack, "too big gro_max_size"); 2426 return -EINVAL; 2427 } 2428 2429 if (tb[IFLA_GSO_IPV4_MAX_SIZE] && 2430 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { 2431 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); 2432 return -EINVAL; 2433 } 2434 2435 if (tb[IFLA_GRO_IPV4_MAX_SIZE] && 2436 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { 2437 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size"); 2438 return -EINVAL; 2439 } 2440 2441 if (tb[IFLA_AF_SPEC]) { 2442 struct nlattr *af; 2443 int rem, err; 2444 2445 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2446 const struct rtnl_af_ops *af_ops; 2447 2448 af_ops = rtnl_af_lookup(nla_type(af)); 2449 if (!af_ops) 2450 return -EAFNOSUPPORT; 2451 2452 if (!af_ops->set_link_af) 2453 return -EOPNOTSUPP; 2454 2455 if (af_ops->validate_link_af) { 2456 err = af_ops->validate_link_af(dev, af, extack); 2457 if (err < 0) 2458 return err; 2459 } 2460 } 2461 } 2462 2463 return 0; 2464 } 2465 2466 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2467 int guid_type) 2468 { 2469 const struct net_device_ops *ops = dev->netdev_ops; 2470 2471 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2472 } 2473 2474 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2475 { 2476 if (dev->type != ARPHRD_INFINIBAND) 2477 return -EOPNOTSUPP; 2478 2479 return handle_infiniband_guid(dev, ivt, guid_type); 2480 } 2481 2482 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2483 { 2484 const struct net_device_ops *ops = dev->netdev_ops; 2485 int err = -EINVAL; 2486 2487 if (tb[IFLA_VF_MAC]) { 2488 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2489 2490 if (ivm->vf >= INT_MAX) 2491 return -EINVAL; 2492 err = -EOPNOTSUPP; 2493 if (ops->ndo_set_vf_mac) 2494 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2495 ivm->mac); 2496 if (err < 0) 2497 return err; 2498 } 2499 2500 if (tb[IFLA_VF_VLAN]) { 2501 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2502 2503 if (ivv->vf >= INT_MAX) 2504 return -EINVAL; 2505 err = -EOPNOTSUPP; 2506 if (ops->ndo_set_vf_vlan) 2507 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2508 ivv->qos, 2509 htons(ETH_P_8021Q)); 2510 if (err < 0) 2511 return err; 2512 } 2513 2514 if (tb[IFLA_VF_VLAN_LIST]) { 2515 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2516 struct nlattr *attr; 2517 int rem, len = 0; 2518 2519 err = -EOPNOTSUPP; 2520 if (!ops->ndo_set_vf_vlan) 2521 return err; 2522 2523 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2524 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2525 nla_len(attr) < NLA_HDRLEN) { 2526 return -EINVAL; 2527 } 2528 if (len >= MAX_VLAN_LIST_LEN) 2529 return -EOPNOTSUPP; 2530 ivvl[len] = nla_data(attr); 2531 2532 len++; 2533 } 2534 if (len == 0) 2535 return -EINVAL; 2536 2537 if (ivvl[0]->vf >= INT_MAX) 2538 return -EINVAL; 2539 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2540 ivvl[0]->qos, ivvl[0]->vlan_proto); 2541 if (err < 0) 2542 return err; 2543 } 2544 2545 if (tb[IFLA_VF_TX_RATE]) { 2546 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2547 struct ifla_vf_info ivf; 2548 2549 if (ivt->vf >= INT_MAX) 2550 return -EINVAL; 2551 err = -EOPNOTSUPP; 2552 if (ops->ndo_get_vf_config) 2553 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2554 if (err < 0) 2555 return err; 2556 2557 err = rtnl_set_vf_rate(dev, ivt->vf, 2558 ivf.min_tx_rate, ivt->rate); 2559 if (err < 0) 2560 return err; 2561 } 2562 2563 if (tb[IFLA_VF_RATE]) { 2564 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2565 2566 if (ivt->vf >= INT_MAX) 2567 return -EINVAL; 2568 2569 err = rtnl_set_vf_rate(dev, ivt->vf, 2570 ivt->min_tx_rate, ivt->max_tx_rate); 2571 if (err < 0) 2572 return err; 2573 } 2574 2575 if (tb[IFLA_VF_SPOOFCHK]) { 2576 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2577 2578 if (ivs->vf >= INT_MAX) 2579 return -EINVAL; 2580 err = -EOPNOTSUPP; 2581 if (ops->ndo_set_vf_spoofchk) 2582 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2583 ivs->setting); 2584 if (err < 0) 2585 return err; 2586 } 2587 2588 if (tb[IFLA_VF_LINK_STATE]) { 2589 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2590 2591 if (ivl->vf >= INT_MAX) 2592 return -EINVAL; 2593 err = -EOPNOTSUPP; 2594 if (ops->ndo_set_vf_link_state) 2595 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2596 ivl->link_state); 2597 if (err < 0) 2598 return err; 2599 } 2600 2601 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2602 struct ifla_vf_rss_query_en *ivrssq_en; 2603 2604 err = -EOPNOTSUPP; 2605 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2606 if (ivrssq_en->vf >= INT_MAX) 2607 return -EINVAL; 2608 if (ops->ndo_set_vf_rss_query_en) 2609 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2610 ivrssq_en->setting); 2611 if (err < 0) 2612 return err; 2613 } 2614 2615 if (tb[IFLA_VF_TRUST]) { 2616 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2617 2618 if (ivt->vf >= INT_MAX) 2619 return -EINVAL; 2620 err = -EOPNOTSUPP; 2621 if (ops->ndo_set_vf_trust) 2622 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2623 if (err < 0) 2624 return err; 2625 } 2626 2627 if (tb[IFLA_VF_IB_NODE_GUID]) { 2628 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2629 2630 if (ivt->vf >= INT_MAX) 2631 return -EINVAL; 2632 if (!ops->ndo_set_vf_guid) 2633 return -EOPNOTSUPP; 2634 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2635 } 2636 2637 if (tb[IFLA_VF_IB_PORT_GUID]) { 2638 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2639 2640 if (ivt->vf >= INT_MAX) 2641 return -EINVAL; 2642 if (!ops->ndo_set_vf_guid) 2643 return -EOPNOTSUPP; 2644 2645 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2646 } 2647 2648 return err; 2649 } 2650 2651 static int do_set_master(struct net_device *dev, int ifindex, 2652 struct netlink_ext_ack *extack) 2653 { 2654 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2655 const struct net_device_ops *ops; 2656 int err; 2657 2658 if (upper_dev) { 2659 if (upper_dev->ifindex == ifindex) 2660 return 0; 2661 ops = upper_dev->netdev_ops; 2662 if (ops->ndo_del_slave) { 2663 err = ops->ndo_del_slave(upper_dev, dev); 2664 if (err) 2665 return err; 2666 } else { 2667 return -EOPNOTSUPP; 2668 } 2669 } 2670 2671 if (ifindex) { 2672 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2673 if (!upper_dev) 2674 return -EINVAL; 2675 ops = upper_dev->netdev_ops; 2676 if (ops->ndo_add_slave) { 2677 err = ops->ndo_add_slave(upper_dev, dev, extack); 2678 if (err) 2679 return err; 2680 } else { 2681 return -EOPNOTSUPP; 2682 } 2683 } 2684 return 0; 2685 } 2686 2687 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { 2688 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, 2689 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, 2690 }; 2691 2692 static int do_set_proto_down(struct net_device *dev, 2693 struct nlattr *nl_proto_down, 2694 struct nlattr *nl_proto_down_reason, 2695 struct netlink_ext_ack *extack) 2696 { 2697 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; 2698 unsigned long mask = 0; 2699 u32 value; 2700 bool proto_down; 2701 int err; 2702 2703 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) { 2704 NL_SET_ERR_MSG(extack, "Protodown not supported by device"); 2705 return -EOPNOTSUPP; 2706 } 2707 2708 if (nl_proto_down_reason) { 2709 err = nla_parse_nested_deprecated(pdreason, 2710 IFLA_PROTO_DOWN_REASON_MAX, 2711 nl_proto_down_reason, 2712 ifla_proto_down_reason_policy, 2713 NULL); 2714 if (err < 0) 2715 return err; 2716 2717 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { 2718 NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); 2719 return -EINVAL; 2720 } 2721 2722 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); 2723 2724 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) 2725 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); 2726 2727 dev_change_proto_down_reason(dev, mask, value); 2728 } 2729 2730 if (nl_proto_down) { 2731 proto_down = nla_get_u8(nl_proto_down); 2732 2733 /* Don't turn off protodown if there are active reasons */ 2734 if (!proto_down && dev->proto_down_reason) { 2735 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); 2736 return -EBUSY; 2737 } 2738 err = dev_change_proto_down(dev, 2739 proto_down); 2740 if (err) 2741 return err; 2742 } 2743 2744 return 0; 2745 } 2746 2747 #define DO_SETLINK_MODIFIED 0x01 2748 /* notify flag means notify + modified. */ 2749 #define DO_SETLINK_NOTIFY 0x03 2750 static int do_setlink(const struct sk_buff *skb, 2751 struct net_device *dev, struct ifinfomsg *ifm, 2752 struct netlink_ext_ack *extack, 2753 struct nlattr **tb, int status) 2754 { 2755 const struct net_device_ops *ops = dev->netdev_ops; 2756 char ifname[IFNAMSIZ]; 2757 int err; 2758 2759 if (tb[IFLA_IFNAME]) 2760 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2761 else 2762 ifname[0] = '\0'; 2763 2764 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2765 const char *pat = ifname[0] ? ifname : NULL; 2766 struct net *net; 2767 int new_ifindex; 2768 2769 net = rtnl_link_get_net_capable(skb, dev_net(dev), 2770 tb, CAP_NET_ADMIN); 2771 if (IS_ERR(net)) { 2772 err = PTR_ERR(net); 2773 goto errout; 2774 } 2775 2776 if (tb[IFLA_NEW_IFINDEX]) 2777 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); 2778 else 2779 new_ifindex = 0; 2780 2781 err = __dev_change_net_namespace(dev, net, pat, new_ifindex); 2782 put_net(net); 2783 if (err) 2784 goto errout; 2785 status |= DO_SETLINK_MODIFIED; 2786 } 2787 2788 if (tb[IFLA_MAP]) { 2789 struct rtnl_link_ifmap *u_map; 2790 struct ifmap k_map; 2791 2792 if (!ops->ndo_set_config) { 2793 err = -EOPNOTSUPP; 2794 goto errout; 2795 } 2796 2797 if (!netif_device_present(dev)) { 2798 err = -ENODEV; 2799 goto errout; 2800 } 2801 2802 u_map = nla_data(tb[IFLA_MAP]); 2803 k_map.mem_start = (unsigned long) u_map->mem_start; 2804 k_map.mem_end = (unsigned long) u_map->mem_end; 2805 k_map.base_addr = (unsigned short) u_map->base_addr; 2806 k_map.irq = (unsigned char) u_map->irq; 2807 k_map.dma = (unsigned char) u_map->dma; 2808 k_map.port = (unsigned char) u_map->port; 2809 2810 err = ops->ndo_set_config(dev, &k_map); 2811 if (err < 0) 2812 goto errout; 2813 2814 status |= DO_SETLINK_NOTIFY; 2815 } 2816 2817 if (tb[IFLA_ADDRESS]) { 2818 struct sockaddr *sa; 2819 int len; 2820 2821 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2822 sizeof(*sa)); 2823 sa = kmalloc(len, GFP_KERNEL); 2824 if (!sa) { 2825 err = -ENOMEM; 2826 goto errout; 2827 } 2828 sa->sa_family = dev->type; 2829 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2830 dev->addr_len); 2831 err = dev_set_mac_address_user(dev, sa, extack); 2832 kfree(sa); 2833 if (err) 2834 goto errout; 2835 status |= DO_SETLINK_MODIFIED; 2836 } 2837 2838 if (tb[IFLA_MTU]) { 2839 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2840 if (err < 0) 2841 goto errout; 2842 status |= DO_SETLINK_MODIFIED; 2843 } 2844 2845 if (tb[IFLA_GROUP]) { 2846 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2847 status |= DO_SETLINK_NOTIFY; 2848 } 2849 2850 /* 2851 * Interface selected by interface index but interface 2852 * name provided implies that a name change has been 2853 * requested. 2854 */ 2855 if (ifm->ifi_index > 0 && ifname[0]) { 2856 err = dev_change_name(dev, ifname); 2857 if (err < 0) 2858 goto errout; 2859 status |= DO_SETLINK_MODIFIED; 2860 } 2861 2862 if (tb[IFLA_IFALIAS]) { 2863 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2864 nla_len(tb[IFLA_IFALIAS])); 2865 if (err < 0) 2866 goto errout; 2867 status |= DO_SETLINK_NOTIFY; 2868 } 2869 2870 if (tb[IFLA_BROADCAST]) { 2871 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2872 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2873 } 2874 2875 if (ifm->ifi_flags || ifm->ifi_change) { 2876 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2877 extack); 2878 if (err < 0) 2879 goto errout; 2880 } 2881 2882 if (tb[IFLA_MASTER]) { 2883 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2884 if (err) 2885 goto errout; 2886 status |= DO_SETLINK_MODIFIED; 2887 } 2888 2889 if (tb[IFLA_CARRIER]) { 2890 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2891 if (err) 2892 goto errout; 2893 status |= DO_SETLINK_MODIFIED; 2894 } 2895 2896 if (tb[IFLA_TXQLEN]) { 2897 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2898 2899 err = dev_change_tx_queue_len(dev, value); 2900 if (err) 2901 goto errout; 2902 status |= DO_SETLINK_MODIFIED; 2903 } 2904 2905 if (tb[IFLA_GSO_MAX_SIZE]) { 2906 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2907 2908 if (dev->gso_max_size ^ max_size) { 2909 netif_set_gso_max_size(dev, max_size); 2910 status |= DO_SETLINK_MODIFIED; 2911 } 2912 } 2913 2914 if (tb[IFLA_GSO_MAX_SEGS]) { 2915 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2916 2917 if (dev->gso_max_segs ^ max_segs) { 2918 netif_set_gso_max_segs(dev, max_segs); 2919 status |= DO_SETLINK_MODIFIED; 2920 } 2921 } 2922 2923 if (tb[IFLA_GRO_MAX_SIZE]) { 2924 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 2925 2926 if (dev->gro_max_size ^ gro_max_size) { 2927 netif_set_gro_max_size(dev, gro_max_size); 2928 status |= DO_SETLINK_MODIFIED; 2929 } 2930 } 2931 2932 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) { 2933 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]); 2934 2935 if (dev->gso_ipv4_max_size ^ max_size) { 2936 netif_set_gso_ipv4_max_size(dev, max_size); 2937 status |= DO_SETLINK_MODIFIED; 2938 } 2939 } 2940 2941 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) { 2942 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]); 2943 2944 if (dev->gro_ipv4_max_size ^ gro_max_size) { 2945 netif_set_gro_ipv4_max_size(dev, gro_max_size); 2946 status |= DO_SETLINK_MODIFIED; 2947 } 2948 } 2949 2950 if (tb[IFLA_OPERSTATE]) 2951 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2952 2953 if (tb[IFLA_LINKMODE]) { 2954 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2955 2956 write_lock(&dev_base_lock); 2957 if (dev->link_mode ^ value) 2958 status |= DO_SETLINK_NOTIFY; 2959 dev->link_mode = value; 2960 write_unlock(&dev_base_lock); 2961 } 2962 2963 if (tb[IFLA_VFINFO_LIST]) { 2964 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2965 struct nlattr *attr; 2966 int rem; 2967 2968 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2969 if (nla_type(attr) != IFLA_VF_INFO || 2970 nla_len(attr) < NLA_HDRLEN) { 2971 err = -EINVAL; 2972 goto errout; 2973 } 2974 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 2975 attr, 2976 ifla_vf_policy, 2977 NULL); 2978 if (err < 0) 2979 goto errout; 2980 err = do_setvfinfo(dev, vfinfo); 2981 if (err < 0) 2982 goto errout; 2983 status |= DO_SETLINK_NOTIFY; 2984 } 2985 } 2986 err = 0; 2987 2988 if (tb[IFLA_VF_PORTS]) { 2989 struct nlattr *port[IFLA_PORT_MAX+1]; 2990 struct nlattr *attr; 2991 int vf; 2992 int rem; 2993 2994 err = -EOPNOTSUPP; 2995 if (!ops->ndo_set_vf_port) 2996 goto errout; 2997 2998 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 2999 if (nla_type(attr) != IFLA_VF_PORT || 3000 nla_len(attr) < NLA_HDRLEN) { 3001 err = -EINVAL; 3002 goto errout; 3003 } 3004 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3005 attr, 3006 ifla_port_policy, 3007 NULL); 3008 if (err < 0) 3009 goto errout; 3010 if (!port[IFLA_PORT_VF]) { 3011 err = -EOPNOTSUPP; 3012 goto errout; 3013 } 3014 vf = nla_get_u32(port[IFLA_PORT_VF]); 3015 err = ops->ndo_set_vf_port(dev, vf, port); 3016 if (err < 0) 3017 goto errout; 3018 status |= DO_SETLINK_NOTIFY; 3019 } 3020 } 3021 err = 0; 3022 3023 if (tb[IFLA_PORT_SELF]) { 3024 struct nlattr *port[IFLA_PORT_MAX+1]; 3025 3026 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3027 tb[IFLA_PORT_SELF], 3028 ifla_port_policy, NULL); 3029 if (err < 0) 3030 goto errout; 3031 3032 err = -EOPNOTSUPP; 3033 if (ops->ndo_set_vf_port) 3034 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 3035 if (err < 0) 3036 goto errout; 3037 status |= DO_SETLINK_NOTIFY; 3038 } 3039 3040 if (tb[IFLA_AF_SPEC]) { 3041 struct nlattr *af; 3042 int rem; 3043 3044 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 3045 const struct rtnl_af_ops *af_ops; 3046 3047 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 3048 3049 err = af_ops->set_link_af(dev, af, extack); 3050 if (err < 0) 3051 goto errout; 3052 3053 status |= DO_SETLINK_NOTIFY; 3054 } 3055 } 3056 err = 0; 3057 3058 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { 3059 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], 3060 tb[IFLA_PROTO_DOWN_REASON], extack); 3061 if (err) 3062 goto errout; 3063 status |= DO_SETLINK_NOTIFY; 3064 } 3065 3066 if (tb[IFLA_XDP]) { 3067 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 3068 u32 xdp_flags = 0; 3069 3070 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 3071 tb[IFLA_XDP], 3072 ifla_xdp_policy, NULL); 3073 if (err < 0) 3074 goto errout; 3075 3076 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 3077 err = -EINVAL; 3078 goto errout; 3079 } 3080 3081 if (xdp[IFLA_XDP_FLAGS]) { 3082 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 3083 if (xdp_flags & ~XDP_FLAGS_MASK) { 3084 err = -EINVAL; 3085 goto errout; 3086 } 3087 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 3088 err = -EINVAL; 3089 goto errout; 3090 } 3091 } 3092 3093 if (xdp[IFLA_XDP_FD]) { 3094 int expected_fd = -1; 3095 3096 if (xdp_flags & XDP_FLAGS_REPLACE) { 3097 if (!xdp[IFLA_XDP_EXPECTED_FD]) { 3098 err = -EINVAL; 3099 goto errout; 3100 } 3101 expected_fd = 3102 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]); 3103 } 3104 3105 err = dev_change_xdp_fd(dev, extack, 3106 nla_get_s32(xdp[IFLA_XDP_FD]), 3107 expected_fd, 3108 xdp_flags); 3109 if (err) 3110 goto errout; 3111 status |= DO_SETLINK_NOTIFY; 3112 } 3113 } 3114 3115 errout: 3116 if (status & DO_SETLINK_MODIFIED) { 3117 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 3118 netdev_state_change(dev); 3119 3120 if (err < 0) 3121 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 3122 dev->name); 3123 } 3124 3125 return err; 3126 } 3127 3128 static struct net_device *rtnl_dev_get(struct net *net, 3129 struct nlattr *tb[]) 3130 { 3131 char ifname[ALTIFNAMSIZ]; 3132 3133 if (tb[IFLA_IFNAME]) 3134 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3135 else if (tb[IFLA_ALT_IFNAME]) 3136 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ); 3137 else 3138 return NULL; 3139 3140 return __dev_get_by_name(net, ifname); 3141 } 3142 3143 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3144 struct netlink_ext_ack *extack) 3145 { 3146 struct net *net = sock_net(skb->sk); 3147 struct ifinfomsg *ifm; 3148 struct net_device *dev; 3149 int err; 3150 struct nlattr *tb[IFLA_MAX+1]; 3151 3152 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3153 ifla_policy, extack); 3154 if (err < 0) 3155 goto errout; 3156 3157 err = rtnl_ensure_unique_netns(tb, extack, false); 3158 if (err < 0) 3159 goto errout; 3160 3161 err = -EINVAL; 3162 ifm = nlmsg_data(nlh); 3163 if (ifm->ifi_index > 0) 3164 dev = __dev_get_by_index(net, ifm->ifi_index); 3165 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3166 dev = rtnl_dev_get(net, tb); 3167 else 3168 goto errout; 3169 3170 if (dev == NULL) { 3171 err = -ENODEV; 3172 goto errout; 3173 } 3174 3175 err = validate_linkmsg(dev, tb, extack); 3176 if (err < 0) 3177 goto errout; 3178 3179 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3180 errout: 3181 return err; 3182 } 3183 3184 static int rtnl_group_dellink(const struct net *net, int group) 3185 { 3186 struct net_device *dev, *aux; 3187 LIST_HEAD(list_kill); 3188 bool found = false; 3189 3190 if (!group) 3191 return -EPERM; 3192 3193 for_each_netdev(net, dev) { 3194 if (dev->group == group) { 3195 const struct rtnl_link_ops *ops; 3196 3197 found = true; 3198 ops = dev->rtnl_link_ops; 3199 if (!ops || !ops->dellink) 3200 return -EOPNOTSUPP; 3201 } 3202 } 3203 3204 if (!found) 3205 return -ENODEV; 3206 3207 for_each_netdev_safe(net, dev, aux) { 3208 if (dev->group == group) { 3209 const struct rtnl_link_ops *ops; 3210 3211 ops = dev->rtnl_link_ops; 3212 ops->dellink(dev, &list_kill); 3213 } 3214 } 3215 unregister_netdevice_many(&list_kill); 3216 3217 return 0; 3218 } 3219 3220 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh) 3221 { 3222 const struct rtnl_link_ops *ops; 3223 LIST_HEAD(list_kill); 3224 3225 ops = dev->rtnl_link_ops; 3226 if (!ops || !ops->dellink) 3227 return -EOPNOTSUPP; 3228 3229 ops->dellink(dev, &list_kill); 3230 unregister_netdevice_many_notify(&list_kill, portid, nlh); 3231 3232 return 0; 3233 } 3234 EXPORT_SYMBOL_GPL(rtnl_delete_link); 3235 3236 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 3237 struct netlink_ext_ack *extack) 3238 { 3239 struct net *net = sock_net(skb->sk); 3240 u32 portid = NETLINK_CB(skb).portid; 3241 struct net *tgt_net = net; 3242 struct net_device *dev = NULL; 3243 struct ifinfomsg *ifm; 3244 struct nlattr *tb[IFLA_MAX+1]; 3245 int err; 3246 int netnsid = -1; 3247 3248 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3249 ifla_policy, extack); 3250 if (err < 0) 3251 return err; 3252 3253 err = rtnl_ensure_unique_netns(tb, extack, true); 3254 if (err < 0) 3255 return err; 3256 3257 if (tb[IFLA_TARGET_NETNSID]) { 3258 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3259 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3260 if (IS_ERR(tgt_net)) 3261 return PTR_ERR(tgt_net); 3262 } 3263 3264 err = -EINVAL; 3265 ifm = nlmsg_data(nlh); 3266 if (ifm->ifi_index > 0) 3267 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3268 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3269 dev = rtnl_dev_get(net, tb); 3270 else if (tb[IFLA_GROUP]) 3271 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3272 else 3273 goto out; 3274 3275 if (!dev) { 3276 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0) 3277 err = -ENODEV; 3278 3279 goto out; 3280 } 3281 3282 err = rtnl_delete_link(dev, portid, nlh); 3283 3284 out: 3285 if (netnsid >= 0) 3286 put_net(tgt_net); 3287 3288 return err; 3289 } 3290 3291 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, 3292 u32 portid, const struct nlmsghdr *nlh) 3293 { 3294 unsigned int old_flags; 3295 int err; 3296 3297 old_flags = dev->flags; 3298 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 3299 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3300 NULL); 3301 if (err < 0) 3302 return err; 3303 } 3304 3305 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3306 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh); 3307 } else { 3308 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3309 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh); 3310 } 3311 return 0; 3312 } 3313 EXPORT_SYMBOL(rtnl_configure_link); 3314 3315 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3316 unsigned char name_assign_type, 3317 const struct rtnl_link_ops *ops, 3318 struct nlattr *tb[], 3319 struct netlink_ext_ack *extack) 3320 { 3321 struct net_device *dev; 3322 unsigned int num_tx_queues = 1; 3323 unsigned int num_rx_queues = 1; 3324 int err; 3325 3326 if (tb[IFLA_NUM_TX_QUEUES]) 3327 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3328 else if (ops->get_num_tx_queues) 3329 num_tx_queues = ops->get_num_tx_queues(); 3330 3331 if (tb[IFLA_NUM_RX_QUEUES]) 3332 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3333 else if (ops->get_num_rx_queues) 3334 num_rx_queues = ops->get_num_rx_queues(); 3335 3336 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3337 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3338 return ERR_PTR(-EINVAL); 3339 } 3340 3341 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3342 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3343 return ERR_PTR(-EINVAL); 3344 } 3345 3346 if (ops->alloc) { 3347 dev = ops->alloc(tb, ifname, name_assign_type, 3348 num_tx_queues, num_rx_queues); 3349 if (IS_ERR(dev)) 3350 return dev; 3351 } else { 3352 dev = alloc_netdev_mqs(ops->priv_size, ifname, 3353 name_assign_type, ops->setup, 3354 num_tx_queues, num_rx_queues); 3355 } 3356 3357 if (!dev) 3358 return ERR_PTR(-ENOMEM); 3359 3360 err = validate_linkmsg(dev, tb, extack); 3361 if (err < 0) { 3362 free_netdev(dev); 3363 return ERR_PTR(err); 3364 } 3365 3366 dev_net_set(dev, net); 3367 dev->rtnl_link_ops = ops; 3368 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3369 3370 if (tb[IFLA_MTU]) { 3371 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3372 3373 err = dev_validate_mtu(dev, mtu, extack); 3374 if (err) { 3375 free_netdev(dev); 3376 return ERR_PTR(err); 3377 } 3378 dev->mtu = mtu; 3379 } 3380 if (tb[IFLA_ADDRESS]) { 3381 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), 3382 nla_len(tb[IFLA_ADDRESS])); 3383 dev->addr_assign_type = NET_ADDR_SET; 3384 } 3385 if (tb[IFLA_BROADCAST]) 3386 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3387 nla_len(tb[IFLA_BROADCAST])); 3388 if (tb[IFLA_TXQLEN]) 3389 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3390 if (tb[IFLA_OPERSTATE]) 3391 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3392 if (tb[IFLA_LINKMODE]) 3393 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3394 if (tb[IFLA_GROUP]) 3395 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3396 if (tb[IFLA_GSO_MAX_SIZE]) 3397 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3398 if (tb[IFLA_GSO_MAX_SEGS]) 3399 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); 3400 if (tb[IFLA_GRO_MAX_SIZE]) 3401 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE])); 3402 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) 3403 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE])); 3404 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) 3405 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE])); 3406 3407 return dev; 3408 } 3409 EXPORT_SYMBOL(rtnl_create_link); 3410 3411 static int rtnl_group_changelink(const struct sk_buff *skb, 3412 struct net *net, int group, 3413 struct ifinfomsg *ifm, 3414 struct netlink_ext_ack *extack, 3415 struct nlattr **tb) 3416 { 3417 struct net_device *dev, *aux; 3418 int err; 3419 3420 for_each_netdev_safe(net, dev, aux) { 3421 if (dev->group == group) { 3422 err = validate_linkmsg(dev, tb, extack); 3423 if (err < 0) 3424 return err; 3425 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3426 if (err < 0) 3427 return err; 3428 } 3429 } 3430 3431 return 0; 3432 } 3433 3434 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, 3435 const struct rtnl_link_ops *ops, 3436 const struct nlmsghdr *nlh, 3437 struct nlattr **tb, struct nlattr **data, 3438 struct netlink_ext_ack *extack) 3439 { 3440 unsigned char name_assign_type = NET_NAME_USER; 3441 struct net *net = sock_net(skb->sk); 3442 u32 portid = NETLINK_CB(skb).portid; 3443 struct net *dest_net, *link_net; 3444 struct net_device *dev; 3445 char ifname[IFNAMSIZ]; 3446 int err; 3447 3448 if (!ops->alloc && !ops->setup) 3449 return -EOPNOTSUPP; 3450 3451 if (tb[IFLA_IFNAME]) { 3452 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3453 } else { 3454 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3455 name_assign_type = NET_NAME_ENUM; 3456 } 3457 3458 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3459 if (IS_ERR(dest_net)) 3460 return PTR_ERR(dest_net); 3461 3462 if (tb[IFLA_LINK_NETNSID]) { 3463 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3464 3465 link_net = get_net_ns_by_id(dest_net, id); 3466 if (!link_net) { 3467 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3468 err = -EINVAL; 3469 goto out; 3470 } 3471 err = -EPERM; 3472 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3473 goto out; 3474 } else { 3475 link_net = NULL; 3476 } 3477 3478 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3479 name_assign_type, ops, tb, extack); 3480 if (IS_ERR(dev)) { 3481 err = PTR_ERR(dev); 3482 goto out; 3483 } 3484 3485 dev->ifindex = ifm->ifi_index; 3486 3487 if (ops->newlink) 3488 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3489 else 3490 err = register_netdevice(dev); 3491 if (err < 0) { 3492 free_netdev(dev); 3493 goto out; 3494 } 3495 3496 err = rtnl_configure_link(dev, ifm, portid, nlh); 3497 if (err < 0) 3498 goto out_unregister; 3499 if (link_net) { 3500 err = dev_change_net_namespace(dev, dest_net, ifname); 3501 if (err < 0) 3502 goto out_unregister; 3503 } 3504 if (tb[IFLA_MASTER]) { 3505 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3506 if (err) 3507 goto out_unregister; 3508 } 3509 out: 3510 if (link_net) 3511 put_net(link_net); 3512 put_net(dest_net); 3513 return err; 3514 out_unregister: 3515 if (ops->newlink) { 3516 LIST_HEAD(list_kill); 3517 3518 ops->dellink(dev, &list_kill); 3519 unregister_netdevice_many(&list_kill); 3520 } else { 3521 unregister_netdevice(dev); 3522 } 3523 goto out; 3524 } 3525 3526 struct rtnl_newlink_tbs { 3527 struct nlattr *tb[IFLA_MAX + 1]; 3528 struct nlattr *attr[RTNL_MAX_TYPE + 1]; 3529 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3530 }; 3531 3532 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3533 struct rtnl_newlink_tbs *tbs, 3534 struct netlink_ext_ack *extack) 3535 { 3536 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3537 struct nlattr ** const tb = tbs->tb; 3538 const struct rtnl_link_ops *m_ops; 3539 struct net_device *master_dev; 3540 struct net *net = sock_net(skb->sk); 3541 const struct rtnl_link_ops *ops; 3542 struct nlattr **slave_data; 3543 char kind[MODULE_NAME_LEN]; 3544 struct net_device *dev; 3545 struct ifinfomsg *ifm; 3546 struct nlattr **data; 3547 bool link_specified; 3548 int err; 3549 3550 #ifdef CONFIG_MODULES 3551 replay: 3552 #endif 3553 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3554 ifla_policy, extack); 3555 if (err < 0) 3556 return err; 3557 3558 err = rtnl_ensure_unique_netns(tb, extack, false); 3559 if (err < 0) 3560 return err; 3561 3562 ifm = nlmsg_data(nlh); 3563 if (ifm->ifi_index > 0) { 3564 link_specified = true; 3565 dev = __dev_get_by_index(net, ifm->ifi_index); 3566 } else if (ifm->ifi_index < 0) { 3567 NL_SET_ERR_MSG(extack, "ifindex can't be negative"); 3568 return -EINVAL; 3569 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { 3570 link_specified = true; 3571 dev = rtnl_dev_get(net, tb); 3572 } else { 3573 link_specified = false; 3574 dev = NULL; 3575 } 3576 3577 master_dev = NULL; 3578 m_ops = NULL; 3579 if (dev) { 3580 master_dev = netdev_master_upper_dev_get(dev); 3581 if (master_dev) 3582 m_ops = master_dev->rtnl_link_ops; 3583 } 3584 3585 if (tb[IFLA_LINKINFO]) { 3586 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3587 tb[IFLA_LINKINFO], 3588 ifla_info_policy, NULL); 3589 if (err < 0) 3590 return err; 3591 } else 3592 memset(linkinfo, 0, sizeof(linkinfo)); 3593 3594 if (linkinfo[IFLA_INFO_KIND]) { 3595 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3596 ops = rtnl_link_ops_get(kind); 3597 } else { 3598 kind[0] = '\0'; 3599 ops = NULL; 3600 } 3601 3602 data = NULL; 3603 if (ops) { 3604 if (ops->maxtype > RTNL_MAX_TYPE) 3605 return -EINVAL; 3606 3607 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3608 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, 3609 linkinfo[IFLA_INFO_DATA], 3610 ops->policy, extack); 3611 if (err < 0) 3612 return err; 3613 data = tbs->attr; 3614 } 3615 if (ops->validate) { 3616 err = ops->validate(tb, data, extack); 3617 if (err < 0) 3618 return err; 3619 } 3620 } 3621 3622 slave_data = NULL; 3623 if (m_ops) { 3624 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3625 return -EINVAL; 3626 3627 if (m_ops->slave_maxtype && 3628 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3629 err = nla_parse_nested_deprecated(tbs->slave_attr, 3630 m_ops->slave_maxtype, 3631 linkinfo[IFLA_INFO_SLAVE_DATA], 3632 m_ops->slave_policy, 3633 extack); 3634 if (err < 0) 3635 return err; 3636 slave_data = tbs->slave_attr; 3637 } 3638 } 3639 3640 if (dev) { 3641 int status = 0; 3642 3643 if (nlh->nlmsg_flags & NLM_F_EXCL) 3644 return -EEXIST; 3645 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3646 return -EOPNOTSUPP; 3647 3648 err = validate_linkmsg(dev, tb, extack); 3649 if (err < 0) 3650 return err; 3651 3652 if (linkinfo[IFLA_INFO_DATA]) { 3653 if (!ops || ops != dev->rtnl_link_ops || 3654 !ops->changelink) 3655 return -EOPNOTSUPP; 3656 3657 err = ops->changelink(dev, tb, data, extack); 3658 if (err < 0) 3659 return err; 3660 status |= DO_SETLINK_NOTIFY; 3661 } 3662 3663 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3664 if (!m_ops || !m_ops->slave_changelink) 3665 return -EOPNOTSUPP; 3666 3667 err = m_ops->slave_changelink(master_dev, dev, tb, 3668 slave_data, extack); 3669 if (err < 0) 3670 return err; 3671 status |= DO_SETLINK_NOTIFY; 3672 } 3673 3674 return do_setlink(skb, dev, ifm, extack, tb, status); 3675 } 3676 3677 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3678 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, 3679 * or it's for a group 3680 */ 3681 if (link_specified) 3682 return -ENODEV; 3683 if (tb[IFLA_GROUP]) 3684 return rtnl_group_changelink(skb, net, 3685 nla_get_u32(tb[IFLA_GROUP]), 3686 ifm, extack, tb); 3687 return -ENODEV; 3688 } 3689 3690 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3691 return -EOPNOTSUPP; 3692 3693 if (!ops) { 3694 #ifdef CONFIG_MODULES 3695 if (kind[0]) { 3696 __rtnl_unlock(); 3697 request_module("rtnl-link-%s", kind); 3698 rtnl_lock(); 3699 ops = rtnl_link_ops_get(kind); 3700 if (ops) 3701 goto replay; 3702 } 3703 #endif 3704 NL_SET_ERR_MSG(extack, "Unknown device type"); 3705 return -EOPNOTSUPP; 3706 } 3707 3708 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack); 3709 } 3710 3711 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3712 struct netlink_ext_ack *extack) 3713 { 3714 struct rtnl_newlink_tbs *tbs; 3715 int ret; 3716 3717 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); 3718 if (!tbs) 3719 return -ENOMEM; 3720 3721 ret = __rtnl_newlink(skb, nlh, tbs, extack); 3722 kfree(tbs); 3723 return ret; 3724 } 3725 3726 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3727 const struct nlmsghdr *nlh, 3728 struct nlattr **tb, 3729 struct netlink_ext_ack *extack) 3730 { 3731 struct ifinfomsg *ifm; 3732 int i, err; 3733 3734 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3735 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3736 return -EINVAL; 3737 } 3738 3739 if (!netlink_strict_get_check(skb)) 3740 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3741 ifla_policy, extack); 3742 3743 ifm = nlmsg_data(nlh); 3744 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3745 ifm->ifi_change) { 3746 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3747 return -EINVAL; 3748 } 3749 3750 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3751 ifla_policy, extack); 3752 if (err) 3753 return err; 3754 3755 for (i = 0; i <= IFLA_MAX; i++) { 3756 if (!tb[i]) 3757 continue; 3758 3759 switch (i) { 3760 case IFLA_IFNAME: 3761 case IFLA_ALT_IFNAME: 3762 case IFLA_EXT_MASK: 3763 case IFLA_TARGET_NETNSID: 3764 break; 3765 default: 3766 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3767 return -EINVAL; 3768 } 3769 } 3770 3771 return 0; 3772 } 3773 3774 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3775 struct netlink_ext_ack *extack) 3776 { 3777 struct net *net = sock_net(skb->sk); 3778 struct net *tgt_net = net; 3779 struct ifinfomsg *ifm; 3780 struct nlattr *tb[IFLA_MAX+1]; 3781 struct net_device *dev = NULL; 3782 struct sk_buff *nskb; 3783 int netnsid = -1; 3784 int err; 3785 u32 ext_filter_mask = 0; 3786 3787 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3788 if (err < 0) 3789 return err; 3790 3791 err = rtnl_ensure_unique_netns(tb, extack, true); 3792 if (err < 0) 3793 return err; 3794 3795 if (tb[IFLA_TARGET_NETNSID]) { 3796 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3797 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3798 if (IS_ERR(tgt_net)) 3799 return PTR_ERR(tgt_net); 3800 } 3801 3802 if (tb[IFLA_EXT_MASK]) 3803 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3804 3805 err = -EINVAL; 3806 ifm = nlmsg_data(nlh); 3807 if (ifm->ifi_index > 0) 3808 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3809 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3810 dev = rtnl_dev_get(tgt_net, tb); 3811 else 3812 goto out; 3813 3814 err = -ENODEV; 3815 if (dev == NULL) 3816 goto out; 3817 3818 err = -ENOBUFS; 3819 nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask)); 3820 if (nskb == NULL) 3821 goto out; 3822 3823 /* Synchronize the carrier state so we don't report a state 3824 * that we're not actually going to honour immediately; if 3825 * the driver just did a carrier off->on transition, we can 3826 * only TX if link watch work has run, but without this we'd 3827 * already report carrier on, even if it doesn't work yet. 3828 */ 3829 linkwatch_sync_dev(dev); 3830 3831 err = rtnl_fill_ifinfo(nskb, dev, net, 3832 RTM_NEWLINK, NETLINK_CB(skb).portid, 3833 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3834 0, NULL, 0, netnsid, GFP_KERNEL); 3835 if (err < 0) { 3836 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3837 WARN_ON(err == -EMSGSIZE); 3838 kfree_skb(nskb); 3839 } else 3840 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3841 out: 3842 if (netnsid >= 0) 3843 put_net(tgt_net); 3844 3845 return err; 3846 } 3847 3848 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3849 bool *changed, struct netlink_ext_ack *extack) 3850 { 3851 char *alt_ifname; 3852 size_t size; 3853 int err; 3854 3855 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3856 if (err) 3857 return err; 3858 3859 if (cmd == RTM_NEWLINKPROP) { 3860 size = rtnl_prop_list_size(dev); 3861 size += nla_total_size(ALTIFNAMSIZ); 3862 if (size >= U16_MAX) { 3863 NL_SET_ERR_MSG(extack, 3864 "effective property list too long"); 3865 return -EINVAL; 3866 } 3867 } 3868 3869 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 3870 if (!alt_ifname) 3871 return -ENOMEM; 3872 3873 if (cmd == RTM_NEWLINKPROP) { 3874 err = netdev_name_node_alt_create(dev, alt_ifname); 3875 if (!err) 3876 alt_ifname = NULL; 3877 } else if (cmd == RTM_DELLINKPROP) { 3878 err = netdev_name_node_alt_destroy(dev, alt_ifname); 3879 } else { 3880 WARN_ON_ONCE(1); 3881 err = -EINVAL; 3882 } 3883 3884 kfree(alt_ifname); 3885 if (!err) 3886 *changed = true; 3887 return err; 3888 } 3889 3890 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 3891 struct netlink_ext_ack *extack) 3892 { 3893 struct net *net = sock_net(skb->sk); 3894 struct nlattr *tb[IFLA_MAX + 1]; 3895 struct net_device *dev; 3896 struct ifinfomsg *ifm; 3897 bool changed = false; 3898 struct nlattr *attr; 3899 int err, rem; 3900 3901 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3902 if (err) 3903 return err; 3904 3905 err = rtnl_ensure_unique_netns(tb, extack, true); 3906 if (err) 3907 return err; 3908 3909 ifm = nlmsg_data(nlh); 3910 if (ifm->ifi_index > 0) 3911 dev = __dev_get_by_index(net, ifm->ifi_index); 3912 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3913 dev = rtnl_dev_get(net, tb); 3914 else 3915 return -EINVAL; 3916 3917 if (!dev) 3918 return -ENODEV; 3919 3920 if (!tb[IFLA_PROP_LIST]) 3921 return 0; 3922 3923 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 3924 switch (nla_type(attr)) { 3925 case IFLA_ALT_IFNAME: 3926 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 3927 if (err) 3928 return err; 3929 break; 3930 } 3931 } 3932 3933 if (changed) 3934 netdev_state_change(dev); 3935 return 0; 3936 } 3937 3938 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3939 struct netlink_ext_ack *extack) 3940 { 3941 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 3942 } 3943 3944 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3945 struct netlink_ext_ack *extack) 3946 { 3947 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 3948 } 3949 3950 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3951 { 3952 struct net *net = sock_net(skb->sk); 3953 size_t min_ifinfo_dump_size = 0; 3954 struct nlattr *tb[IFLA_MAX+1]; 3955 u32 ext_filter_mask = 0; 3956 struct net_device *dev; 3957 int hdrlen; 3958 3959 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3960 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3961 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3962 3963 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3964 if (tb[IFLA_EXT_MASK]) 3965 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3966 } 3967 3968 if (!ext_filter_mask) 3969 return NLMSG_GOODSIZE; 3970 /* 3971 * traverse the list of net devices and compute the minimum 3972 * buffer size based upon the filter mask. 3973 */ 3974 rcu_read_lock(); 3975 for_each_netdev_rcu(net, dev) { 3976 min_ifinfo_dump_size = max(min_ifinfo_dump_size, 3977 if_nlmsg_size(dev, ext_filter_mask)); 3978 } 3979 rcu_read_unlock(); 3980 3981 return nlmsg_total_size(min_ifinfo_dump_size); 3982 } 3983 3984 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 3985 { 3986 int idx; 3987 int s_idx = cb->family; 3988 int type = cb->nlh->nlmsg_type - RTM_BASE; 3989 int ret = 0; 3990 3991 if (s_idx == 0) 3992 s_idx = 1; 3993 3994 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 3995 struct rtnl_link __rcu **tab; 3996 struct rtnl_link *link; 3997 rtnl_dumpit_func dumpit; 3998 3999 if (idx < s_idx || idx == PF_PACKET) 4000 continue; 4001 4002 if (type < 0 || type >= RTM_NR_MSGTYPES) 4003 continue; 4004 4005 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 4006 if (!tab) 4007 continue; 4008 4009 link = rcu_dereference_rtnl(tab[type]); 4010 if (!link) 4011 continue; 4012 4013 dumpit = link->dumpit; 4014 if (!dumpit) 4015 continue; 4016 4017 if (idx > s_idx) { 4018 memset(&cb->args[0], 0, sizeof(cb->args)); 4019 cb->prev_seq = 0; 4020 cb->seq = 0; 4021 } 4022 ret = dumpit(skb, cb); 4023 if (ret) 4024 break; 4025 } 4026 cb->family = idx; 4027 4028 return skb->len ? : ret; 4029 } 4030 4031 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 4032 unsigned int change, 4033 u32 event, gfp_t flags, int *new_nsid, 4034 int new_ifindex, u32 portid, 4035 const struct nlmsghdr *nlh) 4036 { 4037 struct net *net = dev_net(dev); 4038 struct sk_buff *skb; 4039 int err = -ENOBUFS; 4040 u32 seq = 0; 4041 4042 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); 4043 if (skb == NULL) 4044 goto errout; 4045 4046 if (nlmsg_report(nlh)) 4047 seq = nlmsg_seq(nlh); 4048 else 4049 portid = 0; 4050 4051 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 4052 type, portid, seq, change, 0, 0, event, 4053 new_nsid, new_ifindex, -1, flags); 4054 if (err < 0) { 4055 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 4056 WARN_ON(err == -EMSGSIZE); 4057 kfree_skb(skb); 4058 goto errout; 4059 } 4060 return skb; 4061 errout: 4062 if (err < 0) 4063 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4064 return NULL; 4065 } 4066 4067 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags, 4068 u32 portid, const struct nlmsghdr *nlh) 4069 { 4070 struct net *net = dev_net(dev); 4071 4072 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags); 4073 } 4074 4075 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 4076 unsigned int change, u32 event, 4077 gfp_t flags, int *new_nsid, int new_ifindex, 4078 u32 portid, const struct nlmsghdr *nlh) 4079 { 4080 struct sk_buff *skb; 4081 4082 if (dev->reg_state != NETREG_REGISTERED) 4083 return; 4084 4085 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 4086 new_ifindex, portid, nlh); 4087 if (skb) 4088 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh); 4089 } 4090 4091 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 4092 gfp_t flags, u32 portid, const struct nlmsghdr *nlh) 4093 { 4094 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4095 NULL, 0, portid, nlh); 4096 } 4097 4098 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 4099 gfp_t flags, int *new_nsid, int new_ifindex) 4100 { 4101 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4102 new_nsid, new_ifindex, 0, NULL); 4103 } 4104 4105 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 4106 struct net_device *dev, 4107 u8 *addr, u16 vid, u32 pid, u32 seq, 4108 int type, unsigned int flags, 4109 int nlflags, u16 ndm_state) 4110 { 4111 struct nlmsghdr *nlh; 4112 struct ndmsg *ndm; 4113 4114 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 4115 if (!nlh) 4116 return -EMSGSIZE; 4117 4118 ndm = nlmsg_data(nlh); 4119 ndm->ndm_family = AF_BRIDGE; 4120 ndm->ndm_pad1 = 0; 4121 ndm->ndm_pad2 = 0; 4122 ndm->ndm_flags = flags; 4123 ndm->ndm_type = 0; 4124 ndm->ndm_ifindex = dev->ifindex; 4125 ndm->ndm_state = ndm_state; 4126 4127 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr)) 4128 goto nla_put_failure; 4129 if (vid) 4130 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 4131 goto nla_put_failure; 4132 4133 nlmsg_end(skb, nlh); 4134 return 0; 4135 4136 nla_put_failure: 4137 nlmsg_cancel(skb, nlh); 4138 return -EMSGSIZE; 4139 } 4140 4141 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev) 4142 { 4143 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 4144 nla_total_size(dev->addr_len) + /* NDA_LLADDR */ 4145 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 4146 0; 4147 } 4148 4149 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 4150 u16 ndm_state) 4151 { 4152 struct net *net = dev_net(dev); 4153 struct sk_buff *skb; 4154 int err = -ENOBUFS; 4155 4156 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC); 4157 if (!skb) 4158 goto errout; 4159 4160 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 4161 0, 0, type, NTF_SELF, 0, ndm_state); 4162 if (err < 0) { 4163 kfree_skb(skb); 4164 goto errout; 4165 } 4166 4167 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 4168 return; 4169 errout: 4170 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 4171 } 4172 4173 /* 4174 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 4175 */ 4176 int ndo_dflt_fdb_add(struct ndmsg *ndm, 4177 struct nlattr *tb[], 4178 struct net_device *dev, 4179 const unsigned char *addr, u16 vid, 4180 u16 flags) 4181 { 4182 int err = -EINVAL; 4183 4184 /* If aging addresses are supported device will need to 4185 * implement its own handler for this. 4186 */ 4187 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 4188 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4189 return err; 4190 } 4191 4192 if (tb[NDA_FLAGS_EXT]) { 4193 netdev_info(dev, "invalid flags given to default FDB implementation\n"); 4194 return err; 4195 } 4196 4197 if (vid) { 4198 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n"); 4199 return err; 4200 } 4201 4202 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4203 err = dev_uc_add_excl(dev, addr); 4204 else if (is_multicast_ether_addr(addr)) 4205 err = dev_mc_add_excl(dev, addr); 4206 4207 /* Only return duplicate errors if NLM_F_EXCL is set */ 4208 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 4209 err = 0; 4210 4211 return err; 4212 } 4213 EXPORT_SYMBOL(ndo_dflt_fdb_add); 4214 4215 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 4216 struct netlink_ext_ack *extack) 4217 { 4218 u16 vid = 0; 4219 4220 if (vlan_attr) { 4221 if (nla_len(vlan_attr) != sizeof(u16)) { 4222 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 4223 return -EINVAL; 4224 } 4225 4226 vid = nla_get_u16(vlan_attr); 4227 4228 if (!vid || vid >= VLAN_VID_MASK) { 4229 NL_SET_ERR_MSG(extack, "invalid vlan id"); 4230 return -EINVAL; 4231 } 4232 } 4233 *p_vid = vid; 4234 return 0; 4235 } 4236 4237 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 4238 struct netlink_ext_ack *extack) 4239 { 4240 struct net *net = sock_net(skb->sk); 4241 struct ndmsg *ndm; 4242 struct nlattr *tb[NDA_MAX+1]; 4243 struct net_device *dev; 4244 u8 *addr; 4245 u16 vid; 4246 int err; 4247 4248 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 4249 extack); 4250 if (err < 0) 4251 return err; 4252 4253 ndm = nlmsg_data(nlh); 4254 if (ndm->ndm_ifindex == 0) { 4255 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4256 return -EINVAL; 4257 } 4258 4259 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4260 if (dev == NULL) { 4261 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4262 return -ENODEV; 4263 } 4264 4265 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4266 NL_SET_ERR_MSG(extack, "invalid address"); 4267 return -EINVAL; 4268 } 4269 4270 if (dev->type != ARPHRD_ETHER) { 4271 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 4272 return -EINVAL; 4273 } 4274 4275 addr = nla_data(tb[NDA_LLADDR]); 4276 4277 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4278 if (err) 4279 return err; 4280 4281 err = -EOPNOTSUPP; 4282 4283 /* Support fdb on master device the net/bridge default case */ 4284 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4285 netif_is_bridge_port(dev)) { 4286 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4287 const struct net_device_ops *ops = br_dev->netdev_ops; 4288 4289 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 4290 nlh->nlmsg_flags, extack); 4291 if (err) 4292 goto out; 4293 else 4294 ndm->ndm_flags &= ~NTF_MASTER; 4295 } 4296 4297 /* Embedded bridge, macvlan, and any other device support */ 4298 if ((ndm->ndm_flags & NTF_SELF)) { 4299 if (dev->netdev_ops->ndo_fdb_add) 4300 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 4301 vid, 4302 nlh->nlmsg_flags, 4303 extack); 4304 else 4305 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 4306 nlh->nlmsg_flags); 4307 4308 if (!err) { 4309 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 4310 ndm->ndm_state); 4311 ndm->ndm_flags &= ~NTF_SELF; 4312 } 4313 } 4314 out: 4315 return err; 4316 } 4317 4318 /* 4319 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 4320 */ 4321 int ndo_dflt_fdb_del(struct ndmsg *ndm, 4322 struct nlattr *tb[], 4323 struct net_device *dev, 4324 const unsigned char *addr, u16 vid) 4325 { 4326 int err = -EINVAL; 4327 4328 /* If aging addresses are supported device will need to 4329 * implement its own handler for this. 4330 */ 4331 if (!(ndm->ndm_state & NUD_PERMANENT)) { 4332 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4333 return err; 4334 } 4335 4336 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4337 err = dev_uc_del(dev, addr); 4338 else if (is_multicast_ether_addr(addr)) 4339 err = dev_mc_del(dev, addr); 4340 4341 return err; 4342 } 4343 EXPORT_SYMBOL(ndo_dflt_fdb_del); 4344 4345 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 4346 struct netlink_ext_ack *extack) 4347 { 4348 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 4349 struct net *net = sock_net(skb->sk); 4350 const struct net_device_ops *ops; 4351 struct ndmsg *ndm; 4352 struct nlattr *tb[NDA_MAX+1]; 4353 struct net_device *dev; 4354 __u8 *addr = NULL; 4355 int err; 4356 u16 vid; 4357 4358 if (!netlink_capable(skb, CAP_NET_ADMIN)) 4359 return -EPERM; 4360 4361 if (!del_bulk) { 4362 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4363 NULL, extack); 4364 } else { 4365 /* For bulk delete, the drivers will parse the message with 4366 * policy. 4367 */ 4368 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); 4369 } 4370 if (err < 0) 4371 return err; 4372 4373 ndm = nlmsg_data(nlh); 4374 if (ndm->ndm_ifindex == 0) { 4375 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4376 return -EINVAL; 4377 } 4378 4379 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4380 if (dev == NULL) { 4381 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4382 return -ENODEV; 4383 } 4384 4385 if (!del_bulk) { 4386 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4387 NL_SET_ERR_MSG(extack, "invalid address"); 4388 return -EINVAL; 4389 } 4390 addr = nla_data(tb[NDA_LLADDR]); 4391 4392 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4393 if (err) 4394 return err; 4395 } 4396 4397 if (dev->type != ARPHRD_ETHER) { 4398 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 4399 return -EINVAL; 4400 } 4401 4402 err = -EOPNOTSUPP; 4403 4404 /* Support fdb on master device the net/bridge default case */ 4405 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4406 netif_is_bridge_port(dev)) { 4407 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4408 4409 ops = br_dev->netdev_ops; 4410 if (!del_bulk) { 4411 if (ops->ndo_fdb_del) 4412 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4413 } else { 4414 if (ops->ndo_fdb_del_bulk) 4415 err = ops->ndo_fdb_del_bulk(nlh, dev, extack); 4416 } 4417 4418 if (err) 4419 goto out; 4420 else 4421 ndm->ndm_flags &= ~NTF_MASTER; 4422 } 4423 4424 /* Embedded bridge, macvlan, and any other device support */ 4425 if (ndm->ndm_flags & NTF_SELF) { 4426 ops = dev->netdev_ops; 4427 if (!del_bulk) { 4428 if (ops->ndo_fdb_del) 4429 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4430 else 4431 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4432 } else { 4433 /* in case err was cleared by NTF_MASTER call */ 4434 err = -EOPNOTSUPP; 4435 if (ops->ndo_fdb_del_bulk) 4436 err = ops->ndo_fdb_del_bulk(nlh, dev, extack); 4437 } 4438 4439 if (!err) { 4440 if (!del_bulk) 4441 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4442 ndm->ndm_state); 4443 ndm->ndm_flags &= ~NTF_SELF; 4444 } 4445 } 4446 out: 4447 return err; 4448 } 4449 4450 static int nlmsg_populate_fdb(struct sk_buff *skb, 4451 struct netlink_callback *cb, 4452 struct net_device *dev, 4453 int *idx, 4454 struct netdev_hw_addr_list *list) 4455 { 4456 struct netdev_hw_addr *ha; 4457 int err; 4458 u32 portid, seq; 4459 4460 portid = NETLINK_CB(cb->skb).portid; 4461 seq = cb->nlh->nlmsg_seq; 4462 4463 list_for_each_entry(ha, &list->list, list) { 4464 if (*idx < cb->args[2]) 4465 goto skip; 4466 4467 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4468 portid, seq, 4469 RTM_NEWNEIGH, NTF_SELF, 4470 NLM_F_MULTI, NUD_PERMANENT); 4471 if (err < 0) 4472 return err; 4473 skip: 4474 *idx += 1; 4475 } 4476 return 0; 4477 } 4478 4479 /** 4480 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4481 * @skb: socket buffer to store message in 4482 * @cb: netlink callback 4483 * @dev: netdevice 4484 * @filter_dev: ignored 4485 * @idx: the number of FDB table entries dumped is added to *@idx 4486 * 4487 * Default netdevice operation to dump the existing unicast address list. 4488 * Returns number of addresses from list put in skb. 4489 */ 4490 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4491 struct netlink_callback *cb, 4492 struct net_device *dev, 4493 struct net_device *filter_dev, 4494 int *idx) 4495 { 4496 int err; 4497 4498 if (dev->type != ARPHRD_ETHER) 4499 return -EINVAL; 4500 4501 netif_addr_lock_bh(dev); 4502 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4503 if (err) 4504 goto out; 4505 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4506 out: 4507 netif_addr_unlock_bh(dev); 4508 return err; 4509 } 4510 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4511 4512 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4513 int *br_idx, int *brport_idx, 4514 struct netlink_ext_ack *extack) 4515 { 4516 struct nlattr *tb[NDA_MAX + 1]; 4517 struct ndmsg *ndm; 4518 int err, i; 4519 4520 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4521 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4522 return -EINVAL; 4523 } 4524 4525 ndm = nlmsg_data(nlh); 4526 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4527 ndm->ndm_flags || ndm->ndm_type) { 4528 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4529 return -EINVAL; 4530 } 4531 4532 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4533 NDA_MAX, NULL, extack); 4534 if (err < 0) 4535 return err; 4536 4537 *brport_idx = ndm->ndm_ifindex; 4538 for (i = 0; i <= NDA_MAX; ++i) { 4539 if (!tb[i]) 4540 continue; 4541 4542 switch (i) { 4543 case NDA_IFINDEX: 4544 if (nla_len(tb[i]) != sizeof(u32)) { 4545 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4546 return -EINVAL; 4547 } 4548 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4549 break; 4550 case NDA_MASTER: 4551 if (nla_len(tb[i]) != sizeof(u32)) { 4552 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4553 return -EINVAL; 4554 } 4555 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4556 break; 4557 default: 4558 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4559 return -EINVAL; 4560 } 4561 } 4562 4563 return 0; 4564 } 4565 4566 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4567 int *br_idx, int *brport_idx, 4568 struct netlink_ext_ack *extack) 4569 { 4570 struct nlattr *tb[IFLA_MAX+1]; 4571 int err; 4572 4573 /* A hack to preserve kernel<->userspace interface. 4574 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4575 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4576 * So, check for ndmsg with an optional u32 attribute (not used here). 4577 * Fortunately these sizes don't conflict with the size of ifinfomsg 4578 * with an optional attribute. 4579 */ 4580 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4581 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4582 nla_attr_size(sizeof(u32)))) { 4583 struct ifinfomsg *ifm; 4584 4585 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4586 tb, IFLA_MAX, ifla_policy, 4587 extack); 4588 if (err < 0) { 4589 return -EINVAL; 4590 } else if (err == 0) { 4591 if (tb[IFLA_MASTER]) 4592 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4593 } 4594 4595 ifm = nlmsg_data(nlh); 4596 *brport_idx = ifm->ifi_index; 4597 } 4598 return 0; 4599 } 4600 4601 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4602 { 4603 struct net_device *dev; 4604 struct net_device *br_dev = NULL; 4605 const struct net_device_ops *ops = NULL; 4606 const struct net_device_ops *cops = NULL; 4607 struct net *net = sock_net(skb->sk); 4608 struct hlist_head *head; 4609 int brport_idx = 0; 4610 int br_idx = 0; 4611 int h, s_h; 4612 int idx = 0, s_idx; 4613 int err = 0; 4614 int fidx = 0; 4615 4616 if (cb->strict_check) 4617 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4618 cb->extack); 4619 else 4620 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4621 cb->extack); 4622 if (err < 0) 4623 return err; 4624 4625 if (br_idx) { 4626 br_dev = __dev_get_by_index(net, br_idx); 4627 if (!br_dev) 4628 return -ENODEV; 4629 4630 ops = br_dev->netdev_ops; 4631 } 4632 4633 s_h = cb->args[0]; 4634 s_idx = cb->args[1]; 4635 4636 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4637 idx = 0; 4638 head = &net->dev_index_head[h]; 4639 hlist_for_each_entry(dev, head, index_hlist) { 4640 4641 if (brport_idx && (dev->ifindex != brport_idx)) 4642 continue; 4643 4644 if (!br_idx) { /* user did not specify a specific bridge */ 4645 if (netif_is_bridge_port(dev)) { 4646 br_dev = netdev_master_upper_dev_get(dev); 4647 cops = br_dev->netdev_ops; 4648 } 4649 } else { 4650 if (dev != br_dev && 4651 !netif_is_bridge_port(dev)) 4652 continue; 4653 4654 if (br_dev != netdev_master_upper_dev_get(dev) && 4655 !netif_is_bridge_master(dev)) 4656 continue; 4657 cops = ops; 4658 } 4659 4660 if (idx < s_idx) 4661 goto cont; 4662 4663 if (netif_is_bridge_port(dev)) { 4664 if (cops && cops->ndo_fdb_dump) { 4665 err = cops->ndo_fdb_dump(skb, cb, 4666 br_dev, dev, 4667 &fidx); 4668 if (err == -EMSGSIZE) 4669 goto out; 4670 } 4671 } 4672 4673 if (dev->netdev_ops->ndo_fdb_dump) 4674 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4675 dev, NULL, 4676 &fidx); 4677 else 4678 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4679 &fidx); 4680 if (err == -EMSGSIZE) 4681 goto out; 4682 4683 cops = NULL; 4684 4685 /* reset fdb offset to 0 for rest of the interfaces */ 4686 cb->args[2] = 0; 4687 fidx = 0; 4688 cont: 4689 idx++; 4690 } 4691 } 4692 4693 out: 4694 cb->args[0] = h; 4695 cb->args[1] = idx; 4696 cb->args[2] = fidx; 4697 4698 return skb->len; 4699 } 4700 4701 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4702 struct nlattr **tb, u8 *ndm_flags, 4703 int *br_idx, int *brport_idx, u8 **addr, 4704 u16 *vid, struct netlink_ext_ack *extack) 4705 { 4706 struct ndmsg *ndm; 4707 int err, i; 4708 4709 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4710 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4711 return -EINVAL; 4712 } 4713 4714 ndm = nlmsg_data(nlh); 4715 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4716 ndm->ndm_type) { 4717 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4718 return -EINVAL; 4719 } 4720 4721 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4722 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4723 return -EINVAL; 4724 } 4725 4726 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4727 NDA_MAX, nda_policy, extack); 4728 if (err < 0) 4729 return err; 4730 4731 *ndm_flags = ndm->ndm_flags; 4732 *brport_idx = ndm->ndm_ifindex; 4733 for (i = 0; i <= NDA_MAX; ++i) { 4734 if (!tb[i]) 4735 continue; 4736 4737 switch (i) { 4738 case NDA_MASTER: 4739 *br_idx = nla_get_u32(tb[i]); 4740 break; 4741 case NDA_LLADDR: 4742 if (nla_len(tb[i]) != ETH_ALEN) { 4743 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4744 return -EINVAL; 4745 } 4746 *addr = nla_data(tb[i]); 4747 break; 4748 case NDA_VLAN: 4749 err = fdb_vid_parse(tb[i], vid, extack); 4750 if (err) 4751 return err; 4752 break; 4753 case NDA_VNI: 4754 break; 4755 default: 4756 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4757 return -EINVAL; 4758 } 4759 } 4760 4761 return 0; 4762 } 4763 4764 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4765 struct netlink_ext_ack *extack) 4766 { 4767 struct net_device *dev = NULL, *br_dev = NULL; 4768 const struct net_device_ops *ops = NULL; 4769 struct net *net = sock_net(in_skb->sk); 4770 struct nlattr *tb[NDA_MAX + 1]; 4771 struct sk_buff *skb; 4772 int brport_idx = 0; 4773 u8 ndm_flags = 0; 4774 int br_idx = 0; 4775 u8 *addr = NULL; 4776 u16 vid = 0; 4777 int err; 4778 4779 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4780 &brport_idx, &addr, &vid, extack); 4781 if (err < 0) 4782 return err; 4783 4784 if (!addr) { 4785 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4786 return -EINVAL; 4787 } 4788 4789 if (brport_idx) { 4790 dev = __dev_get_by_index(net, brport_idx); 4791 if (!dev) { 4792 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4793 return -ENODEV; 4794 } 4795 } 4796 4797 if (br_idx) { 4798 if (dev) { 4799 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4800 return -EINVAL; 4801 } 4802 4803 br_dev = __dev_get_by_index(net, br_idx); 4804 if (!br_dev) { 4805 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4806 return -EINVAL; 4807 } 4808 ops = br_dev->netdev_ops; 4809 } 4810 4811 if (dev) { 4812 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4813 if (!netif_is_bridge_port(dev)) { 4814 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4815 return -EINVAL; 4816 } 4817 br_dev = netdev_master_upper_dev_get(dev); 4818 if (!br_dev) { 4819 NL_SET_ERR_MSG(extack, "Master of device not found"); 4820 return -EINVAL; 4821 } 4822 ops = br_dev->netdev_ops; 4823 } else { 4824 if (!(ndm_flags & NTF_SELF)) { 4825 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4826 return -EINVAL; 4827 } 4828 ops = dev->netdev_ops; 4829 } 4830 } 4831 4832 if (!br_dev && !dev) { 4833 NL_SET_ERR_MSG(extack, "No device specified"); 4834 return -ENODEV; 4835 } 4836 4837 if (!ops || !ops->ndo_fdb_get) { 4838 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4839 return -EOPNOTSUPP; 4840 } 4841 4842 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4843 if (!skb) 4844 return -ENOBUFS; 4845 4846 if (br_dev) 4847 dev = br_dev; 4848 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4849 NETLINK_CB(in_skb).portid, 4850 nlh->nlmsg_seq, extack); 4851 if (err) 4852 goto out; 4853 4854 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4855 out: 4856 kfree_skb(skb); 4857 return err; 4858 } 4859 4860 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4861 unsigned int attrnum, unsigned int flag) 4862 { 4863 if (mask & flag) 4864 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4865 return 0; 4866 } 4867 4868 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4869 struct net_device *dev, u16 mode, 4870 u32 flags, u32 mask, int nlflags, 4871 u32 filter_mask, 4872 int (*vlan_fill)(struct sk_buff *skb, 4873 struct net_device *dev, 4874 u32 filter_mask)) 4875 { 4876 struct nlmsghdr *nlh; 4877 struct ifinfomsg *ifm; 4878 struct nlattr *br_afspec; 4879 struct nlattr *protinfo; 4880 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4881 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4882 int err = 0; 4883 4884 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4885 if (nlh == NULL) 4886 return -EMSGSIZE; 4887 4888 ifm = nlmsg_data(nlh); 4889 ifm->ifi_family = AF_BRIDGE; 4890 ifm->__ifi_pad = 0; 4891 ifm->ifi_type = dev->type; 4892 ifm->ifi_index = dev->ifindex; 4893 ifm->ifi_flags = dev_get_flags(dev); 4894 ifm->ifi_change = 0; 4895 4896 4897 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4898 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4899 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4900 (br_dev && 4901 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4902 (dev->addr_len && 4903 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4904 (dev->ifindex != dev_get_iflink(dev) && 4905 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4906 goto nla_put_failure; 4907 4908 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 4909 if (!br_afspec) 4910 goto nla_put_failure; 4911 4912 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4913 nla_nest_cancel(skb, br_afspec); 4914 goto nla_put_failure; 4915 } 4916 4917 if (mode != BRIDGE_MODE_UNDEF) { 4918 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4919 nla_nest_cancel(skb, br_afspec); 4920 goto nla_put_failure; 4921 } 4922 } 4923 if (vlan_fill) { 4924 err = vlan_fill(skb, dev, filter_mask); 4925 if (err) { 4926 nla_nest_cancel(skb, br_afspec); 4927 goto nla_put_failure; 4928 } 4929 } 4930 nla_nest_end(skb, br_afspec); 4931 4932 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 4933 if (!protinfo) 4934 goto nla_put_failure; 4935 4936 if (brport_nla_put_flag(skb, flags, mask, 4937 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4938 brport_nla_put_flag(skb, flags, mask, 4939 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4940 brport_nla_put_flag(skb, flags, mask, 4941 IFLA_BRPORT_FAST_LEAVE, 4942 BR_MULTICAST_FAST_LEAVE) || 4943 brport_nla_put_flag(skb, flags, mask, 4944 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4945 brport_nla_put_flag(skb, flags, mask, 4946 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4947 brport_nla_put_flag(skb, flags, mask, 4948 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4949 brport_nla_put_flag(skb, flags, mask, 4950 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4951 brport_nla_put_flag(skb, flags, mask, 4952 IFLA_BRPORT_PROXYARP, BR_PROXYARP) || 4953 brport_nla_put_flag(skb, flags, mask, 4954 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || 4955 brport_nla_put_flag(skb, flags, mask, 4956 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { 4957 nla_nest_cancel(skb, protinfo); 4958 goto nla_put_failure; 4959 } 4960 4961 nla_nest_end(skb, protinfo); 4962 4963 nlmsg_end(skb, nlh); 4964 return 0; 4965 nla_put_failure: 4966 nlmsg_cancel(skb, nlh); 4967 return err ? err : -EMSGSIZE; 4968 } 4969 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 4970 4971 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 4972 bool strict_check, u32 *filter_mask, 4973 struct netlink_ext_ack *extack) 4974 { 4975 struct nlattr *tb[IFLA_MAX+1]; 4976 int err, i; 4977 4978 if (strict_check) { 4979 struct ifinfomsg *ifm; 4980 4981 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 4982 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 4983 return -EINVAL; 4984 } 4985 4986 ifm = nlmsg_data(nlh); 4987 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 4988 ifm->ifi_change || ifm->ifi_index) { 4989 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 4990 return -EINVAL; 4991 } 4992 4993 err = nlmsg_parse_deprecated_strict(nlh, 4994 sizeof(struct ifinfomsg), 4995 tb, IFLA_MAX, ifla_policy, 4996 extack); 4997 } else { 4998 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4999 tb, IFLA_MAX, ifla_policy, 5000 extack); 5001 } 5002 if (err < 0) 5003 return err; 5004 5005 /* new attributes should only be added with strict checking */ 5006 for (i = 0; i <= IFLA_MAX; ++i) { 5007 if (!tb[i]) 5008 continue; 5009 5010 switch (i) { 5011 case IFLA_EXT_MASK: 5012 *filter_mask = nla_get_u32(tb[i]); 5013 break; 5014 default: 5015 if (strict_check) { 5016 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 5017 return -EINVAL; 5018 } 5019 } 5020 } 5021 5022 return 0; 5023 } 5024 5025 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 5026 { 5027 const struct nlmsghdr *nlh = cb->nlh; 5028 struct net *net = sock_net(skb->sk); 5029 struct net_device *dev; 5030 int idx = 0; 5031 u32 portid = NETLINK_CB(cb->skb).portid; 5032 u32 seq = nlh->nlmsg_seq; 5033 u32 filter_mask = 0; 5034 int err; 5035 5036 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 5037 cb->extack); 5038 if (err < 0 && cb->strict_check) 5039 return err; 5040 5041 rcu_read_lock(); 5042 for_each_netdev_rcu(net, dev) { 5043 const struct net_device_ops *ops = dev->netdev_ops; 5044 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5045 5046 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 5047 if (idx >= cb->args[0]) { 5048 err = br_dev->netdev_ops->ndo_bridge_getlink( 5049 skb, portid, seq, dev, 5050 filter_mask, NLM_F_MULTI); 5051 if (err < 0 && err != -EOPNOTSUPP) { 5052 if (likely(skb->len)) 5053 break; 5054 5055 goto out_err; 5056 } 5057 } 5058 idx++; 5059 } 5060 5061 if (ops->ndo_bridge_getlink) { 5062 if (idx >= cb->args[0]) { 5063 err = ops->ndo_bridge_getlink(skb, portid, 5064 seq, dev, 5065 filter_mask, 5066 NLM_F_MULTI); 5067 if (err < 0 && err != -EOPNOTSUPP) { 5068 if (likely(skb->len)) 5069 break; 5070 5071 goto out_err; 5072 } 5073 } 5074 idx++; 5075 } 5076 } 5077 err = skb->len; 5078 out_err: 5079 rcu_read_unlock(); 5080 cb->args[0] = idx; 5081 5082 return err; 5083 } 5084 5085 static inline size_t bridge_nlmsg_size(void) 5086 { 5087 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 5088 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 5089 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 5090 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 5091 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 5092 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 5093 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 5094 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 5095 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 5096 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 5097 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 5098 } 5099 5100 static int rtnl_bridge_notify(struct net_device *dev) 5101 { 5102 struct net *net = dev_net(dev); 5103 struct sk_buff *skb; 5104 int err = -EOPNOTSUPP; 5105 5106 if (!dev->netdev_ops->ndo_bridge_getlink) 5107 return 0; 5108 5109 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 5110 if (!skb) { 5111 err = -ENOMEM; 5112 goto errout; 5113 } 5114 5115 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 5116 if (err < 0) 5117 goto errout; 5118 5119 /* Notification info is only filled for bridge ports, not the bridge 5120 * device itself. Therefore, a zero notification length is valid and 5121 * should not result in an error. 5122 */ 5123 if (!skb->len) 5124 goto errout; 5125 5126 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 5127 return 0; 5128 errout: 5129 WARN_ON(err == -EMSGSIZE); 5130 kfree_skb(skb); 5131 if (err) 5132 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 5133 return err; 5134 } 5135 5136 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 5137 struct netlink_ext_ack *extack) 5138 { 5139 struct net *net = sock_net(skb->sk); 5140 struct ifinfomsg *ifm; 5141 struct net_device *dev; 5142 struct nlattr *br_spec, *attr = NULL; 5143 int rem, err = -EOPNOTSUPP; 5144 u16 flags = 0; 5145 bool have_flags = false; 5146 5147 if (nlmsg_len(nlh) < sizeof(*ifm)) 5148 return -EINVAL; 5149 5150 ifm = nlmsg_data(nlh); 5151 if (ifm->ifi_family != AF_BRIDGE) 5152 return -EPFNOSUPPORT; 5153 5154 dev = __dev_get_by_index(net, ifm->ifi_index); 5155 if (!dev) { 5156 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5157 return -ENODEV; 5158 } 5159 5160 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5161 if (br_spec) { 5162 nla_for_each_nested(attr, br_spec, rem) { 5163 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) { 5164 if (nla_len(attr) < sizeof(flags)) 5165 return -EINVAL; 5166 5167 have_flags = true; 5168 flags = nla_get_u16(attr); 5169 } 5170 5171 if (nla_type(attr) == IFLA_BRIDGE_MODE) { 5172 if (nla_len(attr) < sizeof(u16)) 5173 return -EINVAL; 5174 } 5175 } 5176 } 5177 5178 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5179 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5180 5181 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 5182 err = -EOPNOTSUPP; 5183 goto out; 5184 } 5185 5186 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 5187 extack); 5188 if (err) 5189 goto out; 5190 5191 flags &= ~BRIDGE_FLAGS_MASTER; 5192 } 5193 5194 if ((flags & BRIDGE_FLAGS_SELF)) { 5195 if (!dev->netdev_ops->ndo_bridge_setlink) 5196 err = -EOPNOTSUPP; 5197 else 5198 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 5199 flags, 5200 extack); 5201 if (!err) { 5202 flags &= ~BRIDGE_FLAGS_SELF; 5203 5204 /* Generate event to notify upper layer of bridge 5205 * change 5206 */ 5207 err = rtnl_bridge_notify(dev); 5208 } 5209 } 5210 5211 if (have_flags) 5212 memcpy(nla_data(attr), &flags, sizeof(flags)); 5213 out: 5214 return err; 5215 } 5216 5217 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 5218 struct netlink_ext_ack *extack) 5219 { 5220 struct net *net = sock_net(skb->sk); 5221 struct ifinfomsg *ifm; 5222 struct net_device *dev; 5223 struct nlattr *br_spec, *attr = NULL; 5224 int rem, err = -EOPNOTSUPP; 5225 u16 flags = 0; 5226 bool have_flags = false; 5227 5228 if (nlmsg_len(nlh) < sizeof(*ifm)) 5229 return -EINVAL; 5230 5231 ifm = nlmsg_data(nlh); 5232 if (ifm->ifi_family != AF_BRIDGE) 5233 return -EPFNOSUPPORT; 5234 5235 dev = __dev_get_by_index(net, ifm->ifi_index); 5236 if (!dev) { 5237 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5238 return -ENODEV; 5239 } 5240 5241 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5242 if (br_spec) { 5243 nla_for_each_nested(attr, br_spec, rem) { 5244 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5245 if (nla_len(attr) < sizeof(flags)) 5246 return -EINVAL; 5247 5248 have_flags = true; 5249 flags = nla_get_u16(attr); 5250 break; 5251 } 5252 } 5253 } 5254 5255 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5256 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5257 5258 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 5259 err = -EOPNOTSUPP; 5260 goto out; 5261 } 5262 5263 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 5264 if (err) 5265 goto out; 5266 5267 flags &= ~BRIDGE_FLAGS_MASTER; 5268 } 5269 5270 if ((flags & BRIDGE_FLAGS_SELF)) { 5271 if (!dev->netdev_ops->ndo_bridge_dellink) 5272 err = -EOPNOTSUPP; 5273 else 5274 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 5275 flags); 5276 5277 if (!err) { 5278 flags &= ~BRIDGE_FLAGS_SELF; 5279 5280 /* Generate event to notify upper layer of bridge 5281 * change 5282 */ 5283 err = rtnl_bridge_notify(dev); 5284 } 5285 } 5286 5287 if (have_flags) 5288 memcpy(nla_data(attr), &flags, sizeof(flags)); 5289 out: 5290 return err; 5291 } 5292 5293 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 5294 { 5295 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 5296 (!idxattr || idxattr == attrid); 5297 } 5298 5299 static bool 5300 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) 5301 { 5302 return dev->netdev_ops && 5303 dev->netdev_ops->ndo_has_offload_stats && 5304 dev->netdev_ops->ndo_get_offload_stats && 5305 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); 5306 } 5307 5308 static unsigned int 5309 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) 5310 { 5311 return rtnl_offload_xstats_have_ndo(dev, attr_id) ? 5312 sizeof(struct rtnl_link_stats64) : 0; 5313 } 5314 5315 static int 5316 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, 5317 struct sk_buff *skb) 5318 { 5319 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); 5320 struct nlattr *attr = NULL; 5321 void *attr_data; 5322 int err; 5323 5324 if (!size) 5325 return -ENODATA; 5326 5327 attr = nla_reserve_64bit(skb, attr_id, size, 5328 IFLA_OFFLOAD_XSTATS_UNSPEC); 5329 if (!attr) 5330 return -EMSGSIZE; 5331 5332 attr_data = nla_data(attr); 5333 memset(attr_data, 0, size); 5334 5335 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); 5336 if (err) 5337 return err; 5338 5339 return 0; 5340 } 5341 5342 static unsigned int 5343 rtnl_offload_xstats_get_size_stats(const struct net_device *dev, 5344 enum netdev_offload_xstats_type type) 5345 { 5346 bool enabled = netdev_offload_xstats_enabled(dev, type); 5347 5348 return enabled ? sizeof(struct rtnl_hw_stats64) : 0; 5349 } 5350 5351 struct rtnl_offload_xstats_request_used { 5352 bool request; 5353 bool used; 5354 }; 5355 5356 static int 5357 rtnl_offload_xstats_get_stats(struct net_device *dev, 5358 enum netdev_offload_xstats_type type, 5359 struct rtnl_offload_xstats_request_used *ru, 5360 struct rtnl_hw_stats64 *stats, 5361 struct netlink_ext_ack *extack) 5362 { 5363 bool request; 5364 bool used; 5365 int err; 5366 5367 request = netdev_offload_xstats_enabled(dev, type); 5368 if (!request) { 5369 used = false; 5370 goto out; 5371 } 5372 5373 err = netdev_offload_xstats_get(dev, type, stats, &used, extack); 5374 if (err) 5375 return err; 5376 5377 out: 5378 if (ru) { 5379 ru->request = request; 5380 ru->used = used; 5381 } 5382 return 0; 5383 } 5384 5385 static int 5386 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, 5387 struct rtnl_offload_xstats_request_used *ru) 5388 { 5389 struct nlattr *nest; 5390 5391 nest = nla_nest_start(skb, attr_id); 5392 if (!nest) 5393 return -EMSGSIZE; 5394 5395 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) 5396 goto nla_put_failure; 5397 5398 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) 5399 goto nla_put_failure; 5400 5401 nla_nest_end(skb, nest); 5402 return 0; 5403 5404 nla_put_failure: 5405 nla_nest_cancel(skb, nest); 5406 return -EMSGSIZE; 5407 } 5408 5409 static int 5410 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, 5411 struct netlink_ext_ack *extack) 5412 { 5413 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5414 struct rtnl_offload_xstats_request_used ru_l3; 5415 struct nlattr *nest; 5416 int err; 5417 5418 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); 5419 if (err) 5420 return err; 5421 5422 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5423 if (!nest) 5424 return -EMSGSIZE; 5425 5426 if (rtnl_offload_xstats_fill_hw_s_info_one(skb, 5427 IFLA_OFFLOAD_XSTATS_L3_STATS, 5428 &ru_l3)) 5429 goto nla_put_failure; 5430 5431 nla_nest_end(skb, nest); 5432 return 0; 5433 5434 nla_put_failure: 5435 nla_nest_cancel(skb, nest); 5436 return -EMSGSIZE; 5437 } 5438 5439 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, 5440 int *prividx, u32 off_filter_mask, 5441 struct netlink_ext_ack *extack) 5442 { 5443 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5444 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; 5445 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; 5446 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5447 bool have_data = false; 5448 int err; 5449 5450 if (*prividx <= attr_id_cpu_hit && 5451 (off_filter_mask & 5452 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { 5453 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); 5454 if (!err) { 5455 have_data = true; 5456 } else if (err != -ENODATA) { 5457 *prividx = attr_id_cpu_hit; 5458 return err; 5459 } 5460 } 5461 5462 if (*prividx <= attr_id_hw_s_info && 5463 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { 5464 *prividx = attr_id_hw_s_info; 5465 5466 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); 5467 if (err) 5468 return err; 5469 5470 have_data = true; 5471 *prividx = 0; 5472 } 5473 5474 if (*prividx <= attr_id_l3_stats && 5475 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { 5476 unsigned int size_l3; 5477 struct nlattr *attr; 5478 5479 *prividx = attr_id_l3_stats; 5480 5481 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5482 if (!size_l3) 5483 goto skip_l3_stats; 5484 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, 5485 IFLA_OFFLOAD_XSTATS_UNSPEC); 5486 if (!attr) 5487 return -EMSGSIZE; 5488 5489 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, 5490 nla_data(attr), extack); 5491 if (err) 5492 return err; 5493 5494 have_data = true; 5495 skip_l3_stats: 5496 *prividx = 0; 5497 } 5498 5499 if (!have_data) 5500 return -ENODATA; 5501 5502 *prividx = 0; 5503 return 0; 5504 } 5505 5506 static unsigned int 5507 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, 5508 enum netdev_offload_xstats_type type) 5509 { 5510 return nla_total_size(0) + 5511 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ 5512 nla_total_size(sizeof(u8)) + 5513 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ 5514 nla_total_size(sizeof(u8)) + 5515 0; 5516 } 5517 5518 static unsigned int 5519 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) 5520 { 5521 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5522 5523 return nla_total_size(0) + 5524 /* IFLA_OFFLOAD_XSTATS_L3_STATS */ 5525 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + 5526 0; 5527 } 5528 5529 static int rtnl_offload_xstats_get_size(const struct net_device *dev, 5530 u32 off_filter_mask) 5531 { 5532 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5533 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5534 int nla_size = 0; 5535 int size; 5536 5537 if (off_filter_mask & 5538 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { 5539 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); 5540 nla_size += nla_total_size_64bit(size); 5541 } 5542 5543 if (off_filter_mask & 5544 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) 5545 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); 5546 5547 if (off_filter_mask & 5548 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { 5549 size = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5550 nla_size += nla_total_size_64bit(size); 5551 } 5552 5553 if (nla_size != 0) 5554 nla_size += nla_total_size(0); 5555 5556 return nla_size; 5557 } 5558 5559 struct rtnl_stats_dump_filters { 5560 /* mask[0] filters outer attributes. Then individual nests have their 5561 * filtering mask at the index of the nested attribute. 5562 */ 5563 u32 mask[IFLA_STATS_MAX + 1]; 5564 }; 5565 5566 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 5567 int type, u32 pid, u32 seq, u32 change, 5568 unsigned int flags, 5569 const struct rtnl_stats_dump_filters *filters, 5570 int *idxattr, int *prividx, 5571 struct netlink_ext_ack *extack) 5572 { 5573 unsigned int filter_mask = filters->mask[0]; 5574 struct if_stats_msg *ifsm; 5575 struct nlmsghdr *nlh; 5576 struct nlattr *attr; 5577 int s_prividx = *prividx; 5578 int err; 5579 5580 ASSERT_RTNL(); 5581 5582 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 5583 if (!nlh) 5584 return -EMSGSIZE; 5585 5586 ifsm = nlmsg_data(nlh); 5587 ifsm->family = PF_UNSPEC; 5588 ifsm->pad1 = 0; 5589 ifsm->pad2 = 0; 5590 ifsm->ifindex = dev->ifindex; 5591 ifsm->filter_mask = filter_mask; 5592 5593 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 5594 struct rtnl_link_stats64 *sp; 5595 5596 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 5597 sizeof(struct rtnl_link_stats64), 5598 IFLA_STATS_UNSPEC); 5599 if (!attr) { 5600 err = -EMSGSIZE; 5601 goto nla_put_failure; 5602 } 5603 5604 sp = nla_data(attr); 5605 dev_get_stats(dev, sp); 5606 } 5607 5608 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5609 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5610 5611 if (ops && ops->fill_linkxstats) { 5612 *idxattr = IFLA_STATS_LINK_XSTATS; 5613 attr = nla_nest_start_noflag(skb, 5614 IFLA_STATS_LINK_XSTATS); 5615 if (!attr) { 5616 err = -EMSGSIZE; 5617 goto nla_put_failure; 5618 } 5619 5620 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5621 nla_nest_end(skb, attr); 5622 if (err) 5623 goto nla_put_failure; 5624 *idxattr = 0; 5625 } 5626 } 5627 5628 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5629 *idxattr)) { 5630 const struct rtnl_link_ops *ops = NULL; 5631 const struct net_device *master; 5632 5633 master = netdev_master_upper_dev_get(dev); 5634 if (master) 5635 ops = master->rtnl_link_ops; 5636 if (ops && ops->fill_linkxstats) { 5637 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5638 attr = nla_nest_start_noflag(skb, 5639 IFLA_STATS_LINK_XSTATS_SLAVE); 5640 if (!attr) { 5641 err = -EMSGSIZE; 5642 goto nla_put_failure; 5643 } 5644 5645 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5646 nla_nest_end(skb, attr); 5647 if (err) 5648 goto nla_put_failure; 5649 *idxattr = 0; 5650 } 5651 } 5652 5653 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5654 *idxattr)) { 5655 u32 off_filter_mask; 5656 5657 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5658 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5659 attr = nla_nest_start_noflag(skb, 5660 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5661 if (!attr) { 5662 err = -EMSGSIZE; 5663 goto nla_put_failure; 5664 } 5665 5666 err = rtnl_offload_xstats_fill(skb, dev, prividx, 5667 off_filter_mask, extack); 5668 if (err == -ENODATA) 5669 nla_nest_cancel(skb, attr); 5670 else 5671 nla_nest_end(skb, attr); 5672 5673 if (err && err != -ENODATA) 5674 goto nla_put_failure; 5675 *idxattr = 0; 5676 } 5677 5678 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5679 struct rtnl_af_ops *af_ops; 5680 5681 *idxattr = IFLA_STATS_AF_SPEC; 5682 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5683 if (!attr) { 5684 err = -EMSGSIZE; 5685 goto nla_put_failure; 5686 } 5687 5688 rcu_read_lock(); 5689 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5690 if (af_ops->fill_stats_af) { 5691 struct nlattr *af; 5692 5693 af = nla_nest_start_noflag(skb, 5694 af_ops->family); 5695 if (!af) { 5696 rcu_read_unlock(); 5697 err = -EMSGSIZE; 5698 goto nla_put_failure; 5699 } 5700 err = af_ops->fill_stats_af(skb, dev); 5701 5702 if (err == -ENODATA) { 5703 nla_nest_cancel(skb, af); 5704 } else if (err < 0) { 5705 rcu_read_unlock(); 5706 goto nla_put_failure; 5707 } 5708 5709 nla_nest_end(skb, af); 5710 } 5711 } 5712 rcu_read_unlock(); 5713 5714 nla_nest_end(skb, attr); 5715 5716 *idxattr = 0; 5717 } 5718 5719 nlmsg_end(skb, nlh); 5720 5721 return 0; 5722 5723 nla_put_failure: 5724 /* not a multi message or no progress mean a real error */ 5725 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5726 nlmsg_cancel(skb, nlh); 5727 else 5728 nlmsg_end(skb, nlh); 5729 5730 return err; 5731 } 5732 5733 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5734 const struct rtnl_stats_dump_filters *filters) 5735 { 5736 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5737 unsigned int filter_mask = filters->mask[0]; 5738 5739 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5740 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5741 5742 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5743 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5744 int attr = IFLA_STATS_LINK_XSTATS; 5745 5746 if (ops && ops->get_linkxstats_size) { 5747 size += nla_total_size(ops->get_linkxstats_size(dev, 5748 attr)); 5749 /* for IFLA_STATS_LINK_XSTATS */ 5750 size += nla_total_size(0); 5751 } 5752 } 5753 5754 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5755 struct net_device *_dev = (struct net_device *)dev; 5756 const struct rtnl_link_ops *ops = NULL; 5757 const struct net_device *master; 5758 5759 /* netdev_master_upper_dev_get can't take const */ 5760 master = netdev_master_upper_dev_get(_dev); 5761 if (master) 5762 ops = master->rtnl_link_ops; 5763 if (ops && ops->get_linkxstats_size) { 5764 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5765 5766 size += nla_total_size(ops->get_linkxstats_size(dev, 5767 attr)); 5768 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5769 size += nla_total_size(0); 5770 } 5771 } 5772 5773 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { 5774 u32 off_filter_mask; 5775 5776 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5777 size += rtnl_offload_xstats_get_size(dev, off_filter_mask); 5778 } 5779 5780 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5781 struct rtnl_af_ops *af_ops; 5782 5783 /* for IFLA_STATS_AF_SPEC */ 5784 size += nla_total_size(0); 5785 5786 rcu_read_lock(); 5787 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5788 if (af_ops->get_stats_af_size) { 5789 size += nla_total_size( 5790 af_ops->get_stats_af_size(dev)); 5791 5792 /* for AF_* */ 5793 size += nla_total_size(0); 5794 } 5795 } 5796 rcu_read_unlock(); 5797 } 5798 5799 return size; 5800 } 5801 5802 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) 5803 5804 static const struct nla_policy 5805 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { 5806 [IFLA_STATS_LINK_OFFLOAD_XSTATS] = 5807 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), 5808 }; 5809 5810 static const struct nla_policy 5811 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { 5812 [IFLA_STATS_GET_FILTERS] = 5813 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), 5814 }; 5815 5816 static const struct nla_policy 5817 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { 5818 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), 5819 }; 5820 5821 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, 5822 struct rtnl_stats_dump_filters *filters, 5823 struct netlink_ext_ack *extack) 5824 { 5825 struct nlattr *tb[IFLA_STATS_MAX + 1]; 5826 int err; 5827 int at; 5828 5829 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, 5830 rtnl_stats_get_policy_filters, extack); 5831 if (err < 0) 5832 return err; 5833 5834 for (at = 1; at <= IFLA_STATS_MAX; at++) { 5835 if (tb[at]) { 5836 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { 5837 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); 5838 return -EINVAL; 5839 } 5840 filters->mask[at] = nla_get_u32(tb[at]); 5841 } 5842 } 5843 5844 return 0; 5845 } 5846 5847 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, 5848 u32 filter_mask, 5849 struct rtnl_stats_dump_filters *filters, 5850 struct netlink_ext_ack *extack) 5851 { 5852 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5853 int err; 5854 int i; 5855 5856 filters->mask[0] = filter_mask; 5857 for (i = 1; i < ARRAY_SIZE(filters->mask); i++) 5858 filters->mask[i] = -1U; 5859 5860 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, 5861 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); 5862 if (err < 0) 5863 return err; 5864 5865 if (tb[IFLA_STATS_GET_FILTERS]) { 5866 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], 5867 filters, extack); 5868 if (err) 5869 return err; 5870 } 5871 5872 return 0; 5873 } 5874 5875 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 5876 bool is_dump, struct netlink_ext_ack *extack) 5877 { 5878 struct if_stats_msg *ifsm; 5879 5880 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 5881 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 5882 return -EINVAL; 5883 } 5884 5885 if (!strict_check) 5886 return 0; 5887 5888 ifsm = nlmsg_data(nlh); 5889 5890 /* only requests using strict checks can pass data to influence 5891 * the dump. The legacy exception is filter_mask. 5892 */ 5893 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 5894 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 5895 return -EINVAL; 5896 } 5897 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 5898 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 5899 return -EINVAL; 5900 } 5901 5902 return 0; 5903 } 5904 5905 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 5906 struct netlink_ext_ack *extack) 5907 { 5908 struct rtnl_stats_dump_filters filters; 5909 struct net *net = sock_net(skb->sk); 5910 struct net_device *dev = NULL; 5911 int idxattr = 0, prividx = 0; 5912 struct if_stats_msg *ifsm; 5913 struct sk_buff *nskb; 5914 int err; 5915 5916 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5917 false, extack); 5918 if (err) 5919 return err; 5920 5921 ifsm = nlmsg_data(nlh); 5922 if (ifsm->ifindex > 0) 5923 dev = __dev_get_by_index(net, ifsm->ifindex); 5924 else 5925 return -EINVAL; 5926 5927 if (!dev) 5928 return -ENODEV; 5929 5930 if (!ifsm->filter_mask) { 5931 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); 5932 return -EINVAL; 5933 } 5934 5935 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); 5936 if (err) 5937 return err; 5938 5939 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); 5940 if (!nskb) 5941 return -ENOBUFS; 5942 5943 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 5944 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 5945 0, &filters, &idxattr, &prividx, extack); 5946 if (err < 0) { 5947 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 5948 WARN_ON(err == -EMSGSIZE); 5949 kfree_skb(nskb); 5950 } else { 5951 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 5952 } 5953 5954 return err; 5955 } 5956 5957 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 5958 { 5959 struct netlink_ext_ack *extack = cb->extack; 5960 int h, s_h, err, s_idx, s_idxattr, s_prividx; 5961 struct rtnl_stats_dump_filters filters; 5962 struct net *net = sock_net(skb->sk); 5963 unsigned int flags = NLM_F_MULTI; 5964 struct if_stats_msg *ifsm; 5965 struct hlist_head *head; 5966 struct net_device *dev; 5967 int idx = 0; 5968 5969 s_h = cb->args[0]; 5970 s_idx = cb->args[1]; 5971 s_idxattr = cb->args[2]; 5972 s_prividx = cb->args[3]; 5973 5974 cb->seq = net->dev_base_seq; 5975 5976 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 5977 if (err) 5978 return err; 5979 5980 ifsm = nlmsg_data(cb->nlh); 5981 if (!ifsm->filter_mask) { 5982 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 5983 return -EINVAL; 5984 } 5985 5986 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, 5987 extack); 5988 if (err) 5989 return err; 5990 5991 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 5992 idx = 0; 5993 head = &net->dev_index_head[h]; 5994 hlist_for_each_entry(dev, head, index_hlist) { 5995 if (idx < s_idx) 5996 goto cont; 5997 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 5998 NETLINK_CB(cb->skb).portid, 5999 cb->nlh->nlmsg_seq, 0, 6000 flags, &filters, 6001 &s_idxattr, &s_prividx, 6002 extack); 6003 /* If we ran out of room on the first message, 6004 * we're in trouble 6005 */ 6006 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 6007 6008 if (err < 0) 6009 goto out; 6010 s_prividx = 0; 6011 s_idxattr = 0; 6012 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 6013 cont: 6014 idx++; 6015 } 6016 } 6017 out: 6018 cb->args[3] = s_prividx; 6019 cb->args[2] = s_idxattr; 6020 cb->args[1] = idx; 6021 cb->args[0] = h; 6022 6023 return skb->len; 6024 } 6025 6026 void rtnl_offload_xstats_notify(struct net_device *dev) 6027 { 6028 struct rtnl_stats_dump_filters response_filters = {}; 6029 struct net *net = dev_net(dev); 6030 int idxattr = 0, prividx = 0; 6031 struct sk_buff *skb; 6032 int err = -ENOBUFS; 6033 6034 ASSERT_RTNL(); 6035 6036 response_filters.mask[0] |= 6037 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6038 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6039 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6040 6041 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), 6042 GFP_KERNEL); 6043 if (!skb) 6044 goto errout; 6045 6046 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, 6047 &response_filters, &idxattr, &prividx, NULL); 6048 if (err < 0) { 6049 kfree_skb(skb); 6050 goto errout; 6051 } 6052 6053 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); 6054 return; 6055 6056 errout: 6057 rtnl_set_sk_err(net, RTNLGRP_STATS, err); 6058 } 6059 EXPORT_SYMBOL(rtnl_offload_xstats_notify); 6060 6061 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, 6062 struct netlink_ext_ack *extack) 6063 { 6064 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 6065 struct rtnl_stats_dump_filters response_filters = {}; 6066 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 6067 struct net *net = sock_net(skb->sk); 6068 struct net_device *dev = NULL; 6069 struct if_stats_msg *ifsm; 6070 bool notify = false; 6071 int err; 6072 6073 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 6074 false, extack); 6075 if (err) 6076 return err; 6077 6078 ifsm = nlmsg_data(nlh); 6079 if (ifsm->family != AF_UNSPEC) { 6080 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); 6081 return -EINVAL; 6082 } 6083 6084 if (ifsm->ifindex > 0) 6085 dev = __dev_get_by_index(net, ifsm->ifindex); 6086 else 6087 return -EINVAL; 6088 6089 if (!dev) 6090 return -ENODEV; 6091 6092 if (ifsm->filter_mask) { 6093 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); 6094 return -EINVAL; 6095 } 6096 6097 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, 6098 ifla_stats_set_policy, extack); 6099 if (err < 0) 6100 return err; 6101 6102 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { 6103 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); 6104 6105 if (req) 6106 err = netdev_offload_xstats_enable(dev, t_l3, extack); 6107 else 6108 err = netdev_offload_xstats_disable(dev, t_l3); 6109 6110 if (!err) 6111 notify = true; 6112 else if (err != -EALREADY) 6113 return err; 6114 6115 response_filters.mask[0] |= 6116 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6117 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6118 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6119 } 6120 6121 if (notify) 6122 rtnl_offload_xstats_notify(dev); 6123 6124 return 0; 6125 } 6126 6127 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh, 6128 struct netlink_ext_ack *extack) 6129 { 6130 struct br_port_msg *bpm; 6131 6132 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 6133 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request"); 6134 return -EINVAL; 6135 } 6136 6137 bpm = nlmsg_data(nlh); 6138 if (bpm->ifindex) { 6139 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request"); 6140 return -EINVAL; 6141 } 6142 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 6143 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 6144 return -EINVAL; 6145 } 6146 6147 return 0; 6148 } 6149 6150 struct rtnl_mdb_dump_ctx { 6151 long idx; 6152 }; 6153 6154 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 6155 { 6156 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx; 6157 struct net *net = sock_net(skb->sk); 6158 struct net_device *dev; 6159 int idx, s_idx; 6160 int err; 6161 6162 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx); 6163 6164 if (cb->strict_check) { 6165 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack); 6166 if (err) 6167 return err; 6168 } 6169 6170 s_idx = ctx->idx; 6171 idx = 0; 6172 6173 for_each_netdev(net, dev) { 6174 if (idx < s_idx) 6175 goto skip; 6176 if (!dev->netdev_ops->ndo_mdb_dump) 6177 goto skip; 6178 6179 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb); 6180 if (err == -EMSGSIZE) 6181 goto out; 6182 /* Moving on to next device, reset markers and sequence 6183 * counters since they are all maintained per-device. 6184 */ 6185 memset(cb->ctx, 0, sizeof(cb->ctx)); 6186 cb->prev_seq = 0; 6187 cb->seq = 0; 6188 skip: 6189 idx++; 6190 } 6191 6192 out: 6193 ctx->idx = idx; 6194 return skb->len; 6195 } 6196 6197 static int rtnl_validate_mdb_entry_get(const struct nlattr *attr, 6198 struct netlink_ext_ack *extack) 6199 { 6200 struct br_mdb_entry *entry = nla_data(attr); 6201 6202 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6203 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6204 return -EINVAL; 6205 } 6206 6207 if (entry->ifindex) { 6208 NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified"); 6209 return -EINVAL; 6210 } 6211 6212 if (entry->state) { 6213 NL_SET_ERR_MSG(extack, "Entry state cannot be specified"); 6214 return -EINVAL; 6215 } 6216 6217 if (entry->flags) { 6218 NL_SET_ERR_MSG(extack, "Entry flags cannot be specified"); 6219 return -EINVAL; 6220 } 6221 6222 if (entry->vid >= VLAN_VID_MASK) { 6223 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6224 return -EINVAL; 6225 } 6226 6227 if (entry->addr.proto != htons(ETH_P_IP) && 6228 entry->addr.proto != htons(ETH_P_IPV6) && 6229 entry->addr.proto != 0) { 6230 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6231 return -EINVAL; 6232 } 6233 6234 return 0; 6235 } 6236 6237 static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = { 6238 [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6239 rtnl_validate_mdb_entry_get, 6240 sizeof(struct br_mdb_entry)), 6241 [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6242 }; 6243 6244 static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 6245 struct netlink_ext_ack *extack) 6246 { 6247 struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1]; 6248 struct net *net = sock_net(in_skb->sk); 6249 struct br_port_msg *bpm; 6250 struct net_device *dev; 6251 int err; 6252 6253 err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb, 6254 MDBA_GET_ENTRY_MAX, mdba_get_policy, extack); 6255 if (err) 6256 return err; 6257 6258 bpm = nlmsg_data(nlh); 6259 if (!bpm->ifindex) { 6260 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6261 return -EINVAL; 6262 } 6263 6264 dev = __dev_get_by_index(net, bpm->ifindex); 6265 if (!dev) { 6266 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6267 return -ENODEV; 6268 } 6269 6270 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) { 6271 NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute"); 6272 return -EINVAL; 6273 } 6274 6275 if (!dev->netdev_ops->ndo_mdb_get) { 6276 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6277 return -EOPNOTSUPP; 6278 } 6279 6280 return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid, 6281 nlh->nlmsg_seq, extack); 6282 } 6283 6284 static int rtnl_validate_mdb_entry(const struct nlattr *attr, 6285 struct netlink_ext_ack *extack) 6286 { 6287 struct br_mdb_entry *entry = nla_data(attr); 6288 6289 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6290 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6291 return -EINVAL; 6292 } 6293 6294 if (entry->ifindex == 0) { 6295 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed"); 6296 return -EINVAL; 6297 } 6298 6299 if (entry->addr.proto == htons(ETH_P_IP)) { 6300 if (!ipv4_is_multicast(entry->addr.u.ip4) && 6301 !ipv4_is_zeronet(entry->addr.u.ip4)) { 6302 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0"); 6303 return -EINVAL; 6304 } 6305 if (ipv4_is_local_multicast(entry->addr.u.ip4)) { 6306 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast"); 6307 return -EINVAL; 6308 } 6309 #if IS_ENABLED(CONFIG_IPV6) 6310 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 6311 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) { 6312 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes"); 6313 return -EINVAL; 6314 } 6315 #endif 6316 } else if (entry->addr.proto == 0) { 6317 /* L2 mdb */ 6318 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) { 6319 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast"); 6320 return -EINVAL; 6321 } 6322 } else { 6323 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6324 return -EINVAL; 6325 } 6326 6327 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6328 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6329 return -EINVAL; 6330 } 6331 if (entry->vid >= VLAN_VID_MASK) { 6332 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6333 return -EINVAL; 6334 } 6335 6336 return 0; 6337 } 6338 6339 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = { 6340 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 }, 6341 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6342 rtnl_validate_mdb_entry, 6343 sizeof(struct br_mdb_entry)), 6344 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6345 }; 6346 6347 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 6348 struct netlink_ext_ack *extack) 6349 { 6350 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6351 struct net *net = sock_net(skb->sk); 6352 struct br_port_msg *bpm; 6353 struct net_device *dev; 6354 int err; 6355 6356 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6357 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6358 if (err) 6359 return err; 6360 6361 bpm = nlmsg_data(nlh); 6362 if (!bpm->ifindex) { 6363 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6364 return -EINVAL; 6365 } 6366 6367 dev = __dev_get_by_index(net, bpm->ifindex); 6368 if (!dev) { 6369 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6370 return -ENODEV; 6371 } 6372 6373 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6374 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6375 return -EINVAL; 6376 } 6377 6378 if (!dev->netdev_ops->ndo_mdb_add) { 6379 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6380 return -EOPNOTSUPP; 6381 } 6382 6383 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack); 6384 } 6385 6386 static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr, 6387 struct netlink_ext_ack *extack) 6388 { 6389 struct br_mdb_entry *entry = nla_data(attr); 6390 struct br_mdb_entry zero_entry = {}; 6391 6392 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6393 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6394 return -EINVAL; 6395 } 6396 6397 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6398 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6399 return -EINVAL; 6400 } 6401 6402 if (entry->flags) { 6403 NL_SET_ERR_MSG(extack, "Entry flags cannot be set"); 6404 return -EINVAL; 6405 } 6406 6407 if (entry->vid >= VLAN_N_VID - 1) { 6408 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6409 return -EINVAL; 6410 } 6411 6412 if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) { 6413 NL_SET_ERR_MSG(extack, "Entry address cannot be set"); 6414 return -EINVAL; 6415 } 6416 6417 return 0; 6418 } 6419 6420 static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = { 6421 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6422 rtnl_validate_mdb_entry_del_bulk, 6423 sizeof(struct br_mdb_entry)), 6424 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6425 }; 6426 6427 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 6428 struct netlink_ext_ack *extack) 6429 { 6430 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 6431 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6432 struct net *net = sock_net(skb->sk); 6433 struct br_port_msg *bpm; 6434 struct net_device *dev; 6435 int err; 6436 6437 if (!del_bulk) 6438 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6439 MDBA_SET_ENTRY_MAX, mdba_policy, 6440 extack); 6441 else 6442 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, 6443 mdba_del_bulk_policy, extack); 6444 if (err) 6445 return err; 6446 6447 bpm = nlmsg_data(nlh); 6448 if (!bpm->ifindex) { 6449 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6450 return -EINVAL; 6451 } 6452 6453 dev = __dev_get_by_index(net, bpm->ifindex); 6454 if (!dev) { 6455 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6456 return -ENODEV; 6457 } 6458 6459 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6460 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6461 return -EINVAL; 6462 } 6463 6464 if (del_bulk) { 6465 if (!dev->netdev_ops->ndo_mdb_del_bulk) { 6466 NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion"); 6467 return -EOPNOTSUPP; 6468 } 6469 return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack); 6470 } 6471 6472 if (!dev->netdev_ops->ndo_mdb_del) { 6473 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6474 return -EOPNOTSUPP; 6475 } 6476 6477 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack); 6478 } 6479 6480 /* Process one rtnetlink message. */ 6481 6482 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 6483 struct netlink_ext_ack *extack) 6484 { 6485 struct net *net = sock_net(skb->sk); 6486 struct rtnl_link *link; 6487 enum rtnl_kinds kind; 6488 struct module *owner; 6489 int err = -EOPNOTSUPP; 6490 rtnl_doit_func doit; 6491 unsigned int flags; 6492 int family; 6493 int type; 6494 6495 type = nlh->nlmsg_type; 6496 if (type > RTM_MAX) 6497 return -EOPNOTSUPP; 6498 6499 type -= RTM_BASE; 6500 6501 /* All the messages must have at least 1 byte length */ 6502 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 6503 return 0; 6504 6505 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 6506 kind = rtnl_msgtype_kind(type); 6507 6508 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) 6509 return -EPERM; 6510 6511 rcu_read_lock(); 6512 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { 6513 struct sock *rtnl; 6514 rtnl_dumpit_func dumpit; 6515 u32 min_dump_alloc = 0; 6516 6517 link = rtnl_get_link(family, type); 6518 if (!link || !link->dumpit) { 6519 family = PF_UNSPEC; 6520 link = rtnl_get_link(family, type); 6521 if (!link || !link->dumpit) 6522 goto err_unlock; 6523 } 6524 owner = link->owner; 6525 dumpit = link->dumpit; 6526 6527 if (type == RTM_GETLINK - RTM_BASE) 6528 min_dump_alloc = rtnl_calcit(skb, nlh); 6529 6530 err = 0; 6531 /* need to do this before rcu_read_unlock() */ 6532 if (!try_module_get(owner)) 6533 err = -EPROTONOSUPPORT; 6534 6535 rcu_read_unlock(); 6536 6537 rtnl = net->rtnl; 6538 if (err == 0) { 6539 struct netlink_dump_control c = { 6540 .dump = dumpit, 6541 .min_dump_alloc = min_dump_alloc, 6542 .module = owner, 6543 }; 6544 err = netlink_dump_start(rtnl, skb, nlh, &c); 6545 /* netlink_dump_start() will keep a reference on 6546 * module if dump is still in progress. 6547 */ 6548 module_put(owner); 6549 } 6550 return err; 6551 } 6552 6553 link = rtnl_get_link(family, type); 6554 if (!link || !link->doit) { 6555 family = PF_UNSPEC; 6556 link = rtnl_get_link(PF_UNSPEC, type); 6557 if (!link || !link->doit) 6558 goto out_unlock; 6559 } 6560 6561 owner = link->owner; 6562 if (!try_module_get(owner)) { 6563 err = -EPROTONOSUPPORT; 6564 goto out_unlock; 6565 } 6566 6567 flags = link->flags; 6568 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && 6569 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { 6570 NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); 6571 module_put(owner); 6572 goto err_unlock; 6573 } 6574 6575 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 6576 doit = link->doit; 6577 rcu_read_unlock(); 6578 if (doit) 6579 err = doit(skb, nlh, extack); 6580 module_put(owner); 6581 return err; 6582 } 6583 rcu_read_unlock(); 6584 6585 rtnl_lock(); 6586 link = rtnl_get_link(family, type); 6587 if (link && link->doit) 6588 err = link->doit(skb, nlh, extack); 6589 rtnl_unlock(); 6590 6591 module_put(owner); 6592 6593 return err; 6594 6595 out_unlock: 6596 rcu_read_unlock(); 6597 return err; 6598 6599 err_unlock: 6600 rcu_read_unlock(); 6601 return -EOPNOTSUPP; 6602 } 6603 6604 static void rtnetlink_rcv(struct sk_buff *skb) 6605 { 6606 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 6607 } 6608 6609 static int rtnetlink_bind(struct net *net, int group) 6610 { 6611 switch (group) { 6612 case RTNLGRP_IPV4_MROUTE_R: 6613 case RTNLGRP_IPV6_MROUTE_R: 6614 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 6615 return -EPERM; 6616 break; 6617 } 6618 return 0; 6619 } 6620 6621 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 6622 { 6623 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6624 6625 switch (event) { 6626 case NETDEV_REBOOT: 6627 case NETDEV_CHANGEMTU: 6628 case NETDEV_CHANGEADDR: 6629 case NETDEV_CHANGENAME: 6630 case NETDEV_FEAT_CHANGE: 6631 case NETDEV_BONDING_FAILOVER: 6632 case NETDEV_POST_TYPE_CHANGE: 6633 case NETDEV_NOTIFY_PEERS: 6634 case NETDEV_CHANGEUPPER: 6635 case NETDEV_RESEND_IGMP: 6636 case NETDEV_CHANGEINFODATA: 6637 case NETDEV_CHANGELOWERSTATE: 6638 case NETDEV_CHANGE_TX_QUEUE_LEN: 6639 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 6640 GFP_KERNEL, NULL, 0, 0, NULL); 6641 break; 6642 default: 6643 break; 6644 } 6645 return NOTIFY_DONE; 6646 } 6647 6648 static struct notifier_block rtnetlink_dev_notifier = { 6649 .notifier_call = rtnetlink_event, 6650 }; 6651 6652 6653 static int __net_init rtnetlink_net_init(struct net *net) 6654 { 6655 struct sock *sk; 6656 struct netlink_kernel_cfg cfg = { 6657 .groups = RTNLGRP_MAX, 6658 .input = rtnetlink_rcv, 6659 .cb_mutex = &rtnl_mutex, 6660 .flags = NL_CFG_F_NONROOT_RECV, 6661 .bind = rtnetlink_bind, 6662 }; 6663 6664 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 6665 if (!sk) 6666 return -ENOMEM; 6667 net->rtnl = sk; 6668 return 0; 6669 } 6670 6671 static void __net_exit rtnetlink_net_exit(struct net *net) 6672 { 6673 netlink_kernel_release(net->rtnl); 6674 net->rtnl = NULL; 6675 } 6676 6677 static struct pernet_operations rtnetlink_net_ops = { 6678 .init = rtnetlink_net_init, 6679 .exit = rtnetlink_net_exit, 6680 }; 6681 6682 void __init rtnetlink_init(void) 6683 { 6684 if (register_pernet_subsys(&rtnetlink_net_ops)) 6685 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 6686 6687 register_netdevice_notifier(&rtnetlink_dev_notifier); 6688 6689 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 6690 rtnl_dump_ifinfo, 0); 6691 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 6692 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 6693 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 6694 6695 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 6696 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 6697 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 6698 6699 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 6700 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 6701 6702 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 6703 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 6704 RTNL_FLAG_BULK_DEL_SUPPORTED); 6705 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 6706 6707 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 6708 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 6709 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 6710 6711 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 6712 0); 6713 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); 6714 6715 rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0); 6716 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0); 6717 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 6718 RTNL_FLAG_BULK_DEL_SUPPORTED); 6719 } 6720