1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 #include <net/devlink.h> 57 #if IS_ENABLED(CONFIG_IPV6) 58 #include <net/addrconf.h> 59 #endif 60 61 #include "dev.h" 62 63 #define RTNL_MAX_TYPE 50 64 #define RTNL_SLAVE_MAX_TYPE 42 65 66 struct rtnl_link { 67 rtnl_doit_func doit; 68 rtnl_dumpit_func dumpit; 69 struct module *owner; 70 unsigned int flags; 71 struct rcu_head rcu; 72 }; 73 74 static DEFINE_MUTEX(rtnl_mutex); 75 76 void rtnl_lock(void) 77 { 78 mutex_lock(&rtnl_mutex); 79 } 80 EXPORT_SYMBOL(rtnl_lock); 81 82 int rtnl_lock_killable(void) 83 { 84 return mutex_lock_killable(&rtnl_mutex); 85 } 86 EXPORT_SYMBOL(rtnl_lock_killable); 87 88 static struct sk_buff *defer_kfree_skb_list; 89 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 90 { 91 if (head && tail) { 92 tail->next = defer_kfree_skb_list; 93 defer_kfree_skb_list = head; 94 } 95 } 96 EXPORT_SYMBOL(rtnl_kfree_skbs); 97 98 void __rtnl_unlock(void) 99 { 100 struct sk_buff *head = defer_kfree_skb_list; 101 102 defer_kfree_skb_list = NULL; 103 104 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() 105 * is used. In some places, e.g. in cfg80211, we have code that will do 106 * something like 107 * rtnl_lock() 108 * wiphy_lock() 109 * ... 110 * rtnl_unlock() 111 * 112 * and because netdev_run_todo() acquires the RTNL for items on the list 113 * we could cause a situation such as this: 114 * Thread 1 Thread 2 115 * rtnl_lock() 116 * unregister_netdevice() 117 * __rtnl_unlock() 118 * rtnl_lock() 119 * wiphy_lock() 120 * rtnl_unlock() 121 * netdev_run_todo() 122 * __rtnl_unlock() 123 * 124 * // list not empty now 125 * // because of thread 2 126 * rtnl_lock() 127 * while (!list_empty(...)) 128 * rtnl_lock() 129 * wiphy_lock() 130 * **** DEADLOCK **** 131 * 132 * However, usage of __rtnl_unlock() is rare, and so we can ensure that 133 * it's not used in cases where something is added to do the list. 134 */ 135 WARN_ON(!list_empty(&net_todo_list)); 136 137 mutex_unlock(&rtnl_mutex); 138 139 while (head) { 140 struct sk_buff *next = head->next; 141 142 kfree_skb(head); 143 cond_resched(); 144 head = next; 145 } 146 } 147 148 void rtnl_unlock(void) 149 { 150 /* This fellow will unlock it for us. */ 151 netdev_run_todo(); 152 } 153 EXPORT_SYMBOL(rtnl_unlock); 154 155 int rtnl_trylock(void) 156 { 157 return mutex_trylock(&rtnl_mutex); 158 } 159 EXPORT_SYMBOL(rtnl_trylock); 160 161 int rtnl_is_locked(void) 162 { 163 return mutex_is_locked(&rtnl_mutex); 164 } 165 EXPORT_SYMBOL(rtnl_is_locked); 166 167 bool refcount_dec_and_rtnl_lock(refcount_t *r) 168 { 169 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 170 } 171 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 172 173 #ifdef CONFIG_PROVE_LOCKING 174 bool lockdep_rtnl_is_held(void) 175 { 176 return lockdep_is_held(&rtnl_mutex); 177 } 178 EXPORT_SYMBOL(lockdep_rtnl_is_held); 179 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 180 181 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 182 183 static inline int rtm_msgindex(int msgtype) 184 { 185 int msgindex = msgtype - RTM_BASE; 186 187 /* 188 * msgindex < 0 implies someone tried to register a netlink 189 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 190 * the message type has not been added to linux/rtnetlink.h 191 */ 192 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 193 194 return msgindex; 195 } 196 197 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 198 { 199 struct rtnl_link __rcu **tab; 200 201 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 202 protocol = PF_UNSPEC; 203 204 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 205 if (!tab) 206 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 207 208 return rcu_dereference_rtnl(tab[msgtype]); 209 } 210 211 static int rtnl_register_internal(struct module *owner, 212 int protocol, int msgtype, 213 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 214 unsigned int flags) 215 { 216 struct rtnl_link *link, *old; 217 struct rtnl_link __rcu **tab; 218 int msgindex; 219 int ret = -ENOBUFS; 220 221 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 222 msgindex = rtm_msgindex(msgtype); 223 224 rtnl_lock(); 225 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 226 if (tab == NULL) { 227 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 228 if (!tab) 229 goto unlock; 230 231 /* ensures we see the 0 stores */ 232 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 233 } 234 235 old = rtnl_dereference(tab[msgindex]); 236 if (old) { 237 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 238 if (!link) 239 goto unlock; 240 } else { 241 link = kzalloc(sizeof(*link), GFP_KERNEL); 242 if (!link) 243 goto unlock; 244 } 245 246 WARN_ON(link->owner && link->owner != owner); 247 link->owner = owner; 248 249 WARN_ON(doit && link->doit && link->doit != doit); 250 if (doit) 251 link->doit = doit; 252 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 253 if (dumpit) 254 link->dumpit = dumpit; 255 256 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && 257 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); 258 link->flags |= flags; 259 260 /* publish protocol:msgtype */ 261 rcu_assign_pointer(tab[msgindex], link); 262 ret = 0; 263 if (old) 264 kfree_rcu(old, rcu); 265 unlock: 266 rtnl_unlock(); 267 return ret; 268 } 269 270 /** 271 * rtnl_register_module - Register a rtnetlink message type 272 * 273 * @owner: module registering the hook (THIS_MODULE) 274 * @protocol: Protocol family or PF_UNSPEC 275 * @msgtype: rtnetlink message type 276 * @doit: Function pointer called for each request message 277 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 278 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 279 * 280 * Like rtnl_register, but for use by removable modules. 281 */ 282 int rtnl_register_module(struct module *owner, 283 int protocol, int msgtype, 284 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 285 unsigned int flags) 286 { 287 return rtnl_register_internal(owner, protocol, msgtype, 288 doit, dumpit, flags); 289 } 290 EXPORT_SYMBOL_GPL(rtnl_register_module); 291 292 /** 293 * rtnl_register - Register a rtnetlink message type 294 * @protocol: Protocol family or PF_UNSPEC 295 * @msgtype: rtnetlink message type 296 * @doit: Function pointer called for each request message 297 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 298 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 299 * 300 * Registers the specified function pointers (at least one of them has 301 * to be non-NULL) to be called whenever a request message for the 302 * specified protocol family and message type is received. 303 * 304 * The special protocol family PF_UNSPEC may be used to define fallback 305 * function pointers for the case when no entry for the specific protocol 306 * family exists. 307 */ 308 void rtnl_register(int protocol, int msgtype, 309 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 310 unsigned int flags) 311 { 312 int err; 313 314 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 315 flags); 316 if (err) 317 pr_err("Unable to register rtnetlink message handler, " 318 "protocol = %d, message type = %d\n", protocol, msgtype); 319 } 320 321 /** 322 * rtnl_unregister - Unregister a rtnetlink message type 323 * @protocol: Protocol family or PF_UNSPEC 324 * @msgtype: rtnetlink message type 325 * 326 * Returns 0 on success or a negative error code. 327 */ 328 int rtnl_unregister(int protocol, int msgtype) 329 { 330 struct rtnl_link __rcu **tab; 331 struct rtnl_link *link; 332 int msgindex; 333 334 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 335 msgindex = rtm_msgindex(msgtype); 336 337 rtnl_lock(); 338 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 339 if (!tab) { 340 rtnl_unlock(); 341 return -ENOENT; 342 } 343 344 link = rtnl_dereference(tab[msgindex]); 345 RCU_INIT_POINTER(tab[msgindex], NULL); 346 rtnl_unlock(); 347 348 kfree_rcu(link, rcu); 349 350 return 0; 351 } 352 EXPORT_SYMBOL_GPL(rtnl_unregister); 353 354 /** 355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 356 * @protocol : Protocol family or PF_UNSPEC 357 * 358 * Identical to calling rtnl_unregster() for all registered message types 359 * of a certain protocol family. 360 */ 361 void rtnl_unregister_all(int protocol) 362 { 363 struct rtnl_link __rcu **tab; 364 struct rtnl_link *link; 365 int msgindex; 366 367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 368 369 rtnl_lock(); 370 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 371 if (!tab) { 372 rtnl_unlock(); 373 return; 374 } 375 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); 376 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 377 link = rtnl_dereference(tab[msgindex]); 378 if (!link) 379 continue; 380 381 RCU_INIT_POINTER(tab[msgindex], NULL); 382 kfree_rcu(link, rcu); 383 } 384 rtnl_unlock(); 385 386 synchronize_net(); 387 388 kfree(tab); 389 } 390 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 391 392 static LIST_HEAD(link_ops); 393 394 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 395 { 396 const struct rtnl_link_ops *ops; 397 398 list_for_each_entry(ops, &link_ops, list) { 399 if (!strcmp(ops->kind, kind)) 400 return ops; 401 } 402 return NULL; 403 } 404 405 /** 406 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 407 * @ops: struct rtnl_link_ops * to register 408 * 409 * The caller must hold the rtnl_mutex. This function should be used 410 * by drivers that create devices during module initialization. It 411 * must be called before registering the devices. 412 * 413 * Returns 0 on success or a negative error code. 414 */ 415 int __rtnl_link_register(struct rtnl_link_ops *ops) 416 { 417 if (rtnl_link_ops_get(ops->kind)) 418 return -EEXIST; 419 420 /* The check for alloc/setup is here because if ops 421 * does not have that filled up, it is not possible 422 * to use the ops for creating device. So do not 423 * fill up dellink as well. That disables rtnl_dellink. 424 */ 425 if ((ops->alloc || ops->setup) && !ops->dellink) 426 ops->dellink = unregister_netdevice_queue; 427 428 list_add_tail(&ops->list, &link_ops); 429 return 0; 430 } 431 EXPORT_SYMBOL_GPL(__rtnl_link_register); 432 433 /** 434 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 435 * @ops: struct rtnl_link_ops * to register 436 * 437 * Returns 0 on success or a negative error code. 438 */ 439 int rtnl_link_register(struct rtnl_link_ops *ops) 440 { 441 int err; 442 443 /* Sanity-check max sizes to avoid stack buffer overflow. */ 444 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 445 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 446 return -EINVAL; 447 448 rtnl_lock(); 449 err = __rtnl_link_register(ops); 450 rtnl_unlock(); 451 return err; 452 } 453 EXPORT_SYMBOL_GPL(rtnl_link_register); 454 455 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 456 { 457 struct net_device *dev; 458 LIST_HEAD(list_kill); 459 460 for_each_netdev(net, dev) { 461 if (dev->rtnl_link_ops == ops) 462 ops->dellink(dev, &list_kill); 463 } 464 unregister_netdevice_many(&list_kill); 465 } 466 467 /** 468 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 469 * @ops: struct rtnl_link_ops * to unregister 470 * 471 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 472 * integrity (hold pernet_ops_rwsem for writing to close the race 473 * with setup_net() and cleanup_net()). 474 */ 475 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 476 { 477 struct net *net; 478 479 for_each_net(net) { 480 __rtnl_kill_links(net, ops); 481 } 482 list_del(&ops->list); 483 } 484 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 485 486 /* Return with the rtnl_lock held when there are no network 487 * devices unregistering in any network namespace. 488 */ 489 static void rtnl_lock_unregistering_all(void) 490 { 491 struct net *net; 492 bool unregistering; 493 DEFINE_WAIT_FUNC(wait, woken_wake_function); 494 495 add_wait_queue(&netdev_unregistering_wq, &wait); 496 for (;;) { 497 unregistering = false; 498 rtnl_lock(); 499 /* We held write locked pernet_ops_rwsem, and parallel 500 * setup_net() and cleanup_net() are not possible. 501 */ 502 for_each_net(net) { 503 if (atomic_read(&net->dev_unreg_count) > 0) { 504 unregistering = true; 505 break; 506 } 507 } 508 if (!unregistering) 509 break; 510 __rtnl_unlock(); 511 512 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 513 } 514 remove_wait_queue(&netdev_unregistering_wq, &wait); 515 } 516 517 /** 518 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 519 * @ops: struct rtnl_link_ops * to unregister 520 */ 521 void rtnl_link_unregister(struct rtnl_link_ops *ops) 522 { 523 /* Close the race with setup_net() and cleanup_net() */ 524 down_write(&pernet_ops_rwsem); 525 rtnl_lock_unregistering_all(); 526 __rtnl_link_unregister(ops); 527 rtnl_unlock(); 528 up_write(&pernet_ops_rwsem); 529 } 530 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 531 532 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 533 { 534 struct net_device *master_dev; 535 const struct rtnl_link_ops *ops; 536 size_t size = 0; 537 538 rcu_read_lock(); 539 540 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 541 if (!master_dev) 542 goto out; 543 544 ops = master_dev->rtnl_link_ops; 545 if (!ops || !ops->get_slave_size) 546 goto out; 547 /* IFLA_INFO_SLAVE_DATA + nested data */ 548 size = nla_total_size(sizeof(struct nlattr)) + 549 ops->get_slave_size(master_dev, dev); 550 551 out: 552 rcu_read_unlock(); 553 return size; 554 } 555 556 static size_t rtnl_link_get_size(const struct net_device *dev) 557 { 558 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 559 size_t size; 560 561 if (!ops) 562 return 0; 563 564 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 565 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 566 567 if (ops->get_size) 568 /* IFLA_INFO_DATA + nested data */ 569 size += nla_total_size(sizeof(struct nlattr)) + 570 ops->get_size(dev); 571 572 if (ops->get_xstats_size) 573 /* IFLA_INFO_XSTATS */ 574 size += nla_total_size(ops->get_xstats_size(dev)); 575 576 size += rtnl_link_get_slave_info_data_size(dev); 577 578 return size; 579 } 580 581 static LIST_HEAD(rtnl_af_ops); 582 583 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 584 { 585 const struct rtnl_af_ops *ops; 586 587 ASSERT_RTNL(); 588 589 list_for_each_entry(ops, &rtnl_af_ops, list) { 590 if (ops->family == family) 591 return ops; 592 } 593 594 return NULL; 595 } 596 597 /** 598 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 599 * @ops: struct rtnl_af_ops * to register 600 * 601 * Returns 0 on success or a negative error code. 602 */ 603 void rtnl_af_register(struct rtnl_af_ops *ops) 604 { 605 rtnl_lock(); 606 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 607 rtnl_unlock(); 608 } 609 EXPORT_SYMBOL_GPL(rtnl_af_register); 610 611 /** 612 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 613 * @ops: struct rtnl_af_ops * to unregister 614 */ 615 void rtnl_af_unregister(struct rtnl_af_ops *ops) 616 { 617 rtnl_lock(); 618 list_del_rcu(&ops->list); 619 rtnl_unlock(); 620 621 synchronize_rcu(); 622 } 623 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 624 625 static size_t rtnl_link_get_af_size(const struct net_device *dev, 626 u32 ext_filter_mask) 627 { 628 struct rtnl_af_ops *af_ops; 629 size_t size; 630 631 /* IFLA_AF_SPEC */ 632 size = nla_total_size(sizeof(struct nlattr)); 633 634 rcu_read_lock(); 635 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 636 if (af_ops->get_link_af_size) { 637 /* AF_* + nested data */ 638 size += nla_total_size(sizeof(struct nlattr)) + 639 af_ops->get_link_af_size(dev, ext_filter_mask); 640 } 641 } 642 rcu_read_unlock(); 643 644 return size; 645 } 646 647 static bool rtnl_have_link_slave_info(const struct net_device *dev) 648 { 649 struct net_device *master_dev; 650 bool ret = false; 651 652 rcu_read_lock(); 653 654 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 655 if (master_dev && master_dev->rtnl_link_ops) 656 ret = true; 657 rcu_read_unlock(); 658 return ret; 659 } 660 661 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 662 const struct net_device *dev) 663 { 664 struct net_device *master_dev; 665 const struct rtnl_link_ops *ops; 666 struct nlattr *slave_data; 667 int err; 668 669 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 670 if (!master_dev) 671 return 0; 672 ops = master_dev->rtnl_link_ops; 673 if (!ops) 674 return 0; 675 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 676 return -EMSGSIZE; 677 if (ops->fill_slave_info) { 678 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 679 if (!slave_data) 680 return -EMSGSIZE; 681 err = ops->fill_slave_info(skb, master_dev, dev); 682 if (err < 0) 683 goto err_cancel_slave_data; 684 nla_nest_end(skb, slave_data); 685 } 686 return 0; 687 688 err_cancel_slave_data: 689 nla_nest_cancel(skb, slave_data); 690 return err; 691 } 692 693 static int rtnl_link_info_fill(struct sk_buff *skb, 694 const struct net_device *dev) 695 { 696 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 697 struct nlattr *data; 698 int err; 699 700 if (!ops) 701 return 0; 702 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 703 return -EMSGSIZE; 704 if (ops->fill_xstats) { 705 err = ops->fill_xstats(skb, dev); 706 if (err < 0) 707 return err; 708 } 709 if (ops->fill_info) { 710 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 711 if (data == NULL) 712 return -EMSGSIZE; 713 err = ops->fill_info(skb, dev); 714 if (err < 0) 715 goto err_cancel_data; 716 nla_nest_end(skb, data); 717 } 718 return 0; 719 720 err_cancel_data: 721 nla_nest_cancel(skb, data); 722 return err; 723 } 724 725 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 726 { 727 struct nlattr *linkinfo; 728 int err = -EMSGSIZE; 729 730 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 731 if (linkinfo == NULL) 732 goto out; 733 734 err = rtnl_link_info_fill(skb, dev); 735 if (err < 0) 736 goto err_cancel_link; 737 738 err = rtnl_link_slave_info_fill(skb, dev); 739 if (err < 0) 740 goto err_cancel_link; 741 742 nla_nest_end(skb, linkinfo); 743 return 0; 744 745 err_cancel_link: 746 nla_nest_cancel(skb, linkinfo); 747 out: 748 return err; 749 } 750 751 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 752 { 753 struct sock *rtnl = net->rtnl; 754 755 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); 756 } 757 758 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 759 { 760 struct sock *rtnl = net->rtnl; 761 762 return nlmsg_unicast(rtnl, skb, pid); 763 } 764 EXPORT_SYMBOL(rtnl_unicast); 765 766 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 767 const struct nlmsghdr *nlh, gfp_t flags) 768 { 769 struct sock *rtnl = net->rtnl; 770 771 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); 772 } 773 EXPORT_SYMBOL(rtnl_notify); 774 775 void rtnl_set_sk_err(struct net *net, u32 group, int error) 776 { 777 struct sock *rtnl = net->rtnl; 778 779 netlink_set_err(rtnl, 0, group, error); 780 } 781 EXPORT_SYMBOL(rtnl_set_sk_err); 782 783 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 784 { 785 struct nlattr *mx; 786 int i, valid = 0; 787 788 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 789 if (metrics == dst_default_metrics.metrics) 790 return 0; 791 792 mx = nla_nest_start_noflag(skb, RTA_METRICS); 793 if (mx == NULL) 794 return -ENOBUFS; 795 796 for (i = 0; i < RTAX_MAX; i++) { 797 if (metrics[i]) { 798 if (i == RTAX_CC_ALGO - 1) { 799 char tmp[TCP_CA_NAME_MAX], *name; 800 801 name = tcp_ca_get_name_by_key(metrics[i], tmp); 802 if (!name) 803 continue; 804 if (nla_put_string(skb, i + 1, name)) 805 goto nla_put_failure; 806 } else if (i == RTAX_FEATURES - 1) { 807 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 808 809 if (!user_features) 810 continue; 811 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 812 if (nla_put_u32(skb, i + 1, user_features)) 813 goto nla_put_failure; 814 } else { 815 if (nla_put_u32(skb, i + 1, metrics[i])) 816 goto nla_put_failure; 817 } 818 valid++; 819 } 820 } 821 822 if (!valid) { 823 nla_nest_cancel(skb, mx); 824 return 0; 825 } 826 827 return nla_nest_end(skb, mx); 828 829 nla_put_failure: 830 nla_nest_cancel(skb, mx); 831 return -EMSGSIZE; 832 } 833 EXPORT_SYMBOL(rtnetlink_put_metrics); 834 835 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 836 long expires, u32 error) 837 { 838 struct rta_cacheinfo ci = { 839 .rta_error = error, 840 .rta_id = id, 841 }; 842 843 if (dst) { 844 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 845 ci.rta_used = dst->__use; 846 ci.rta_clntref = atomic_read(&dst->__refcnt); 847 } 848 if (expires) { 849 unsigned long clock; 850 851 clock = jiffies_to_clock_t(abs(expires)); 852 clock = min_t(unsigned long, clock, INT_MAX); 853 ci.rta_expires = (expires > 0) ? clock : -clock; 854 } 855 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 856 } 857 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 858 859 static void set_operstate(struct net_device *dev, unsigned char transition) 860 { 861 unsigned char operstate = dev->operstate; 862 863 switch (transition) { 864 case IF_OPER_UP: 865 if ((operstate == IF_OPER_DORMANT || 866 operstate == IF_OPER_TESTING || 867 operstate == IF_OPER_UNKNOWN) && 868 !netif_dormant(dev) && !netif_testing(dev)) 869 operstate = IF_OPER_UP; 870 break; 871 872 case IF_OPER_TESTING: 873 if (netif_oper_up(dev)) 874 operstate = IF_OPER_TESTING; 875 break; 876 877 case IF_OPER_DORMANT: 878 if (netif_oper_up(dev)) 879 operstate = IF_OPER_DORMANT; 880 break; 881 } 882 883 if (dev->operstate != operstate) { 884 write_lock(&dev_base_lock); 885 dev->operstate = operstate; 886 write_unlock(&dev_base_lock); 887 netdev_state_change(dev); 888 } 889 } 890 891 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 892 { 893 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 894 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 895 } 896 897 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 898 const struct ifinfomsg *ifm) 899 { 900 unsigned int flags = ifm->ifi_flags; 901 902 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 903 if (ifm->ifi_change) 904 flags = (flags & ifm->ifi_change) | 905 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 906 907 return flags; 908 } 909 910 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 911 const struct rtnl_link_stats64 *b) 912 { 913 a->rx_packets = b->rx_packets; 914 a->tx_packets = b->tx_packets; 915 a->rx_bytes = b->rx_bytes; 916 a->tx_bytes = b->tx_bytes; 917 a->rx_errors = b->rx_errors; 918 a->tx_errors = b->tx_errors; 919 a->rx_dropped = b->rx_dropped; 920 a->tx_dropped = b->tx_dropped; 921 922 a->multicast = b->multicast; 923 a->collisions = b->collisions; 924 925 a->rx_length_errors = b->rx_length_errors; 926 a->rx_over_errors = b->rx_over_errors; 927 a->rx_crc_errors = b->rx_crc_errors; 928 a->rx_frame_errors = b->rx_frame_errors; 929 a->rx_fifo_errors = b->rx_fifo_errors; 930 a->rx_missed_errors = b->rx_missed_errors; 931 932 a->tx_aborted_errors = b->tx_aborted_errors; 933 a->tx_carrier_errors = b->tx_carrier_errors; 934 a->tx_fifo_errors = b->tx_fifo_errors; 935 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 936 a->tx_window_errors = b->tx_window_errors; 937 938 a->rx_compressed = b->rx_compressed; 939 a->tx_compressed = b->tx_compressed; 940 941 a->rx_nohandler = b->rx_nohandler; 942 } 943 944 /* All VF info */ 945 static inline int rtnl_vfinfo_size(const struct net_device *dev, 946 u32 ext_filter_mask) 947 { 948 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 949 int num_vfs = dev_num_vf(dev->dev.parent); 950 size_t size = nla_total_size(0); 951 size += num_vfs * 952 (nla_total_size(0) + 953 nla_total_size(sizeof(struct ifla_vf_mac)) + 954 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 955 nla_total_size(sizeof(struct ifla_vf_vlan)) + 956 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 957 nla_total_size(MAX_VLAN_LIST_LEN * 958 sizeof(struct ifla_vf_vlan_info)) + 959 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 960 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 961 nla_total_size(sizeof(struct ifla_vf_rate)) + 962 nla_total_size(sizeof(struct ifla_vf_link_state)) + 963 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 964 nla_total_size(0) + /* nest IFLA_VF_STATS */ 965 /* IFLA_VF_STATS_RX_PACKETS */ 966 nla_total_size_64bit(sizeof(__u64)) + 967 /* IFLA_VF_STATS_TX_PACKETS */ 968 nla_total_size_64bit(sizeof(__u64)) + 969 /* IFLA_VF_STATS_RX_BYTES */ 970 nla_total_size_64bit(sizeof(__u64)) + 971 /* IFLA_VF_STATS_TX_BYTES */ 972 nla_total_size_64bit(sizeof(__u64)) + 973 /* IFLA_VF_STATS_BROADCAST */ 974 nla_total_size_64bit(sizeof(__u64)) + 975 /* IFLA_VF_STATS_MULTICAST */ 976 nla_total_size_64bit(sizeof(__u64)) + 977 /* IFLA_VF_STATS_RX_DROPPED */ 978 nla_total_size_64bit(sizeof(__u64)) + 979 /* IFLA_VF_STATS_TX_DROPPED */ 980 nla_total_size_64bit(sizeof(__u64)) + 981 nla_total_size(sizeof(struct ifla_vf_trust))); 982 return size; 983 } else 984 return 0; 985 } 986 987 static size_t rtnl_port_size(const struct net_device *dev, 988 u32 ext_filter_mask) 989 { 990 size_t port_size = nla_total_size(4) /* PORT_VF */ 991 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 992 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 993 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 994 + nla_total_size(1) /* PROT_VDP_REQUEST */ 995 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 996 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 997 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 998 + port_size; 999 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 1000 + port_size; 1001 1002 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1003 !(ext_filter_mask & RTEXT_FILTER_VF)) 1004 return 0; 1005 if (dev_num_vf(dev->dev.parent)) 1006 return port_self_size + vf_ports_size + 1007 vf_port_size * dev_num_vf(dev->dev.parent); 1008 else 1009 return port_self_size; 1010 } 1011 1012 static size_t rtnl_xdp_size(void) 1013 { 1014 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 1015 nla_total_size(1) + /* XDP_ATTACHED */ 1016 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 1017 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 1018 1019 return xdp_size; 1020 } 1021 1022 static size_t rtnl_prop_list_size(const struct net_device *dev) 1023 { 1024 struct netdev_name_node *name_node; 1025 size_t size; 1026 1027 if (list_empty(&dev->name_node->list)) 1028 return 0; 1029 size = nla_total_size(0); 1030 list_for_each_entry(name_node, &dev->name_node->list, list) 1031 size += nla_total_size(ALTIFNAMSIZ); 1032 return size; 1033 } 1034 1035 static size_t rtnl_proto_down_size(const struct net_device *dev) 1036 { 1037 size_t size = nla_total_size(1); 1038 1039 if (dev->proto_down_reason) 1040 size += nla_total_size(0) + nla_total_size(4); 1041 1042 return size; 1043 } 1044 1045 static size_t rtnl_devlink_port_size(const struct net_device *dev) 1046 { 1047 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */ 1048 1049 if (dev->devlink_port) 1050 size += devlink_nl_port_handle_size(dev->devlink_port); 1051 1052 return size; 1053 } 1054 1055 static noinline size_t if_nlmsg_size(const struct net_device *dev, 1056 u32 ext_filter_mask) 1057 { 1058 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1059 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1060 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1061 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1062 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1063 + nla_total_size(sizeof(struct rtnl_link_stats)) 1064 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1065 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1066 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1067 + nla_total_size(4) /* IFLA_TXQLEN */ 1068 + nla_total_size(4) /* IFLA_WEIGHT */ 1069 + nla_total_size(4) /* IFLA_MTU */ 1070 + nla_total_size(4) /* IFLA_LINK */ 1071 + nla_total_size(4) /* IFLA_MASTER */ 1072 + nla_total_size(1) /* IFLA_CARRIER */ 1073 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1074 + nla_total_size(4) /* IFLA_ALLMULTI */ 1075 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1076 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1077 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1078 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1079 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ 1080 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */ 1081 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */ 1082 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */ 1083 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ 1084 + nla_total_size(1) /* IFLA_OPERSTATE */ 1085 + nla_total_size(1) /* IFLA_LINKMODE */ 1086 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1087 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1088 + nla_total_size(4) /* IFLA_GROUP */ 1089 + nla_total_size(ext_filter_mask 1090 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1091 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1092 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1093 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1094 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1095 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1096 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1097 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1098 + rtnl_xdp_size() /* IFLA_XDP */ 1099 + nla_total_size(4) /* IFLA_EVENT */ 1100 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1101 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1102 + rtnl_proto_down_size(dev) /* proto down */ 1103 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1104 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1105 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1106 + nla_total_size(4) /* IFLA_MIN_MTU */ 1107 + nla_total_size(4) /* IFLA_MAX_MTU */ 1108 + rtnl_prop_list_size(dev) 1109 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ 1110 + rtnl_devlink_port_size(dev) 1111 + 0; 1112 } 1113 1114 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1115 { 1116 struct nlattr *vf_ports; 1117 struct nlattr *vf_port; 1118 int vf; 1119 int err; 1120 1121 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1122 if (!vf_ports) 1123 return -EMSGSIZE; 1124 1125 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1126 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1127 if (!vf_port) 1128 goto nla_put_failure; 1129 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1130 goto nla_put_failure; 1131 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1132 if (err == -EMSGSIZE) 1133 goto nla_put_failure; 1134 if (err) { 1135 nla_nest_cancel(skb, vf_port); 1136 continue; 1137 } 1138 nla_nest_end(skb, vf_port); 1139 } 1140 1141 nla_nest_end(skb, vf_ports); 1142 1143 return 0; 1144 1145 nla_put_failure: 1146 nla_nest_cancel(skb, vf_ports); 1147 return -EMSGSIZE; 1148 } 1149 1150 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1151 { 1152 struct nlattr *port_self; 1153 int err; 1154 1155 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1156 if (!port_self) 1157 return -EMSGSIZE; 1158 1159 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1160 if (err) { 1161 nla_nest_cancel(skb, port_self); 1162 return (err == -EMSGSIZE) ? err : 0; 1163 } 1164 1165 nla_nest_end(skb, port_self); 1166 1167 return 0; 1168 } 1169 1170 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1171 u32 ext_filter_mask) 1172 { 1173 int err; 1174 1175 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1176 !(ext_filter_mask & RTEXT_FILTER_VF)) 1177 return 0; 1178 1179 err = rtnl_port_self_fill(skb, dev); 1180 if (err) 1181 return err; 1182 1183 if (dev_num_vf(dev->dev.parent)) { 1184 err = rtnl_vf_ports_fill(skb, dev); 1185 if (err) 1186 return err; 1187 } 1188 1189 return 0; 1190 } 1191 1192 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1193 { 1194 int err; 1195 struct netdev_phys_item_id ppid; 1196 1197 err = dev_get_phys_port_id(dev, &ppid); 1198 if (err) { 1199 if (err == -EOPNOTSUPP) 1200 return 0; 1201 return err; 1202 } 1203 1204 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1205 return -EMSGSIZE; 1206 1207 return 0; 1208 } 1209 1210 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1211 { 1212 char name[IFNAMSIZ]; 1213 int err; 1214 1215 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1216 if (err) { 1217 if (err == -EOPNOTSUPP) 1218 return 0; 1219 return err; 1220 } 1221 1222 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1223 return -EMSGSIZE; 1224 1225 return 0; 1226 } 1227 1228 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1229 { 1230 struct netdev_phys_item_id ppid = { }; 1231 int err; 1232 1233 err = dev_get_port_parent_id(dev, &ppid, false); 1234 if (err) { 1235 if (err == -EOPNOTSUPP) 1236 return 0; 1237 return err; 1238 } 1239 1240 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1241 return -EMSGSIZE; 1242 1243 return 0; 1244 } 1245 1246 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1247 struct net_device *dev) 1248 { 1249 struct rtnl_link_stats64 *sp; 1250 struct nlattr *attr; 1251 1252 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1253 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1254 if (!attr) 1255 return -EMSGSIZE; 1256 1257 sp = nla_data(attr); 1258 dev_get_stats(dev, sp); 1259 1260 attr = nla_reserve(skb, IFLA_STATS, 1261 sizeof(struct rtnl_link_stats)); 1262 if (!attr) 1263 return -EMSGSIZE; 1264 1265 copy_rtnl_link_stats(nla_data(attr), sp); 1266 1267 return 0; 1268 } 1269 1270 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1271 struct net_device *dev, 1272 int vfs_num, 1273 struct nlattr *vfinfo) 1274 { 1275 struct ifla_vf_rss_query_en vf_rss_query_en; 1276 struct nlattr *vf, *vfstats, *vfvlanlist; 1277 struct ifla_vf_link_state vf_linkstate; 1278 struct ifla_vf_vlan_info vf_vlan_info; 1279 struct ifla_vf_spoofchk vf_spoofchk; 1280 struct ifla_vf_tx_rate vf_tx_rate; 1281 struct ifla_vf_stats vf_stats; 1282 struct ifla_vf_trust vf_trust; 1283 struct ifla_vf_vlan vf_vlan; 1284 struct ifla_vf_rate vf_rate; 1285 struct ifla_vf_mac vf_mac; 1286 struct ifla_vf_broadcast vf_broadcast; 1287 struct ifla_vf_info ivi; 1288 struct ifla_vf_guid node_guid; 1289 struct ifla_vf_guid port_guid; 1290 1291 memset(&ivi, 0, sizeof(ivi)); 1292 1293 /* Not all SR-IOV capable drivers support the 1294 * spoofcheck and "RSS query enable" query. Preset to 1295 * -1 so the user space tool can detect that the driver 1296 * didn't report anything. 1297 */ 1298 ivi.spoofchk = -1; 1299 ivi.rss_query_en = -1; 1300 ivi.trusted = -1; 1301 /* The default value for VF link state is "auto" 1302 * IFLA_VF_LINK_STATE_AUTO which equals zero 1303 */ 1304 ivi.linkstate = 0; 1305 /* VLAN Protocol by default is 802.1Q */ 1306 ivi.vlan_proto = htons(ETH_P_8021Q); 1307 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1308 return 0; 1309 1310 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1311 memset(&node_guid, 0, sizeof(node_guid)); 1312 memset(&port_guid, 0, sizeof(port_guid)); 1313 1314 vf_mac.vf = 1315 vf_vlan.vf = 1316 vf_vlan_info.vf = 1317 vf_rate.vf = 1318 vf_tx_rate.vf = 1319 vf_spoofchk.vf = 1320 vf_linkstate.vf = 1321 vf_rss_query_en.vf = 1322 vf_trust.vf = 1323 node_guid.vf = 1324 port_guid.vf = ivi.vf; 1325 1326 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1327 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1328 vf_vlan.vlan = ivi.vlan; 1329 vf_vlan.qos = ivi.qos; 1330 vf_vlan_info.vlan = ivi.vlan; 1331 vf_vlan_info.qos = ivi.qos; 1332 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1333 vf_tx_rate.rate = ivi.max_tx_rate; 1334 vf_rate.min_tx_rate = ivi.min_tx_rate; 1335 vf_rate.max_tx_rate = ivi.max_tx_rate; 1336 vf_spoofchk.setting = ivi.spoofchk; 1337 vf_linkstate.link_state = ivi.linkstate; 1338 vf_rss_query_en.setting = ivi.rss_query_en; 1339 vf_trust.setting = ivi.trusted; 1340 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1341 if (!vf) 1342 goto nla_put_vfinfo_failure; 1343 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1344 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1345 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1346 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1347 &vf_rate) || 1348 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1349 &vf_tx_rate) || 1350 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1351 &vf_spoofchk) || 1352 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1353 &vf_linkstate) || 1354 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1355 sizeof(vf_rss_query_en), 1356 &vf_rss_query_en) || 1357 nla_put(skb, IFLA_VF_TRUST, 1358 sizeof(vf_trust), &vf_trust)) 1359 goto nla_put_vf_failure; 1360 1361 if (dev->netdev_ops->ndo_get_vf_guid && 1362 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1363 &port_guid)) { 1364 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1365 &node_guid) || 1366 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1367 &port_guid)) 1368 goto nla_put_vf_failure; 1369 } 1370 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1371 if (!vfvlanlist) 1372 goto nla_put_vf_failure; 1373 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1374 &vf_vlan_info)) { 1375 nla_nest_cancel(skb, vfvlanlist); 1376 goto nla_put_vf_failure; 1377 } 1378 nla_nest_end(skb, vfvlanlist); 1379 memset(&vf_stats, 0, sizeof(vf_stats)); 1380 if (dev->netdev_ops->ndo_get_vf_stats) 1381 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1382 &vf_stats); 1383 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1384 if (!vfstats) 1385 goto nla_put_vf_failure; 1386 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1387 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1388 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1389 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1390 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1391 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1392 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1393 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1394 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1395 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1396 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1397 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1398 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1399 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1400 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1401 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1402 nla_nest_cancel(skb, vfstats); 1403 goto nla_put_vf_failure; 1404 } 1405 nla_nest_end(skb, vfstats); 1406 nla_nest_end(skb, vf); 1407 return 0; 1408 1409 nla_put_vf_failure: 1410 nla_nest_cancel(skb, vf); 1411 nla_put_vfinfo_failure: 1412 nla_nest_cancel(skb, vfinfo); 1413 return -EMSGSIZE; 1414 } 1415 1416 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1417 struct net_device *dev, 1418 u32 ext_filter_mask) 1419 { 1420 struct nlattr *vfinfo; 1421 int i, num_vfs; 1422 1423 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1424 return 0; 1425 1426 num_vfs = dev_num_vf(dev->dev.parent); 1427 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1428 return -EMSGSIZE; 1429 1430 if (!dev->netdev_ops->ndo_get_vf_config) 1431 return 0; 1432 1433 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1434 if (!vfinfo) 1435 return -EMSGSIZE; 1436 1437 for (i = 0; i < num_vfs; i++) { 1438 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo)) 1439 return -EMSGSIZE; 1440 } 1441 1442 nla_nest_end(skb, vfinfo); 1443 return 0; 1444 } 1445 1446 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1447 { 1448 struct rtnl_link_ifmap map; 1449 1450 memset(&map, 0, sizeof(map)); 1451 map.mem_start = dev->mem_start; 1452 map.mem_end = dev->mem_end; 1453 map.base_addr = dev->base_addr; 1454 map.irq = dev->irq; 1455 map.dma = dev->dma; 1456 map.port = dev->if_port; 1457 1458 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1459 return -EMSGSIZE; 1460 1461 return 0; 1462 } 1463 1464 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1465 { 1466 const struct bpf_prog *generic_xdp_prog; 1467 1468 ASSERT_RTNL(); 1469 1470 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1471 if (!generic_xdp_prog) 1472 return 0; 1473 return generic_xdp_prog->aux->id; 1474 } 1475 1476 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1477 { 1478 return dev_xdp_prog_id(dev, XDP_MODE_DRV); 1479 } 1480 1481 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1482 { 1483 return dev_xdp_prog_id(dev, XDP_MODE_HW); 1484 } 1485 1486 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1487 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1488 u32 (*get_prog_id)(struct net_device *dev)) 1489 { 1490 u32 curr_id; 1491 int err; 1492 1493 curr_id = get_prog_id(dev); 1494 if (!curr_id) 1495 return 0; 1496 1497 *prog_id = curr_id; 1498 err = nla_put_u32(skb, attr, curr_id); 1499 if (err) 1500 return err; 1501 1502 if (*mode != XDP_ATTACHED_NONE) 1503 *mode = XDP_ATTACHED_MULTI; 1504 else 1505 *mode = tgt_mode; 1506 1507 return 0; 1508 } 1509 1510 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1511 { 1512 struct nlattr *xdp; 1513 u32 prog_id; 1514 int err; 1515 u8 mode; 1516 1517 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1518 if (!xdp) 1519 return -EMSGSIZE; 1520 1521 prog_id = 0; 1522 mode = XDP_ATTACHED_NONE; 1523 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1524 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1525 if (err) 1526 goto err_cancel; 1527 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1528 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1529 if (err) 1530 goto err_cancel; 1531 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1532 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1533 if (err) 1534 goto err_cancel; 1535 1536 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1537 if (err) 1538 goto err_cancel; 1539 1540 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1541 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1542 if (err) 1543 goto err_cancel; 1544 } 1545 1546 nla_nest_end(skb, xdp); 1547 return 0; 1548 1549 err_cancel: 1550 nla_nest_cancel(skb, xdp); 1551 return err; 1552 } 1553 1554 static u32 rtnl_get_event(unsigned long event) 1555 { 1556 u32 rtnl_event_type = IFLA_EVENT_NONE; 1557 1558 switch (event) { 1559 case NETDEV_REBOOT: 1560 rtnl_event_type = IFLA_EVENT_REBOOT; 1561 break; 1562 case NETDEV_FEAT_CHANGE: 1563 rtnl_event_type = IFLA_EVENT_FEATURES; 1564 break; 1565 case NETDEV_BONDING_FAILOVER: 1566 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1567 break; 1568 case NETDEV_NOTIFY_PEERS: 1569 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1570 break; 1571 case NETDEV_RESEND_IGMP: 1572 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1573 break; 1574 case NETDEV_CHANGEINFODATA: 1575 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1576 break; 1577 default: 1578 break; 1579 } 1580 1581 return rtnl_event_type; 1582 } 1583 1584 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1585 { 1586 const struct net_device *upper_dev; 1587 int ret = 0; 1588 1589 rcu_read_lock(); 1590 1591 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1592 if (upper_dev) 1593 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1594 1595 rcu_read_unlock(); 1596 return ret; 1597 } 1598 1599 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1600 bool force) 1601 { 1602 int ifindex = dev_get_iflink(dev); 1603 1604 if (force || dev->ifindex != ifindex) 1605 return nla_put_u32(skb, IFLA_LINK, ifindex); 1606 1607 return 0; 1608 } 1609 1610 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1611 struct net_device *dev) 1612 { 1613 char buf[IFALIASZ]; 1614 int ret; 1615 1616 ret = dev_get_alias(dev, buf, sizeof(buf)); 1617 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1618 } 1619 1620 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1621 const struct net_device *dev, 1622 struct net *src_net, gfp_t gfp) 1623 { 1624 bool put_iflink = false; 1625 1626 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1627 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1628 1629 if (!net_eq(dev_net(dev), link_net)) { 1630 int id = peernet2id_alloc(src_net, link_net, gfp); 1631 1632 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1633 return -EMSGSIZE; 1634 1635 put_iflink = true; 1636 } 1637 } 1638 1639 return nla_put_iflink(skb, dev, put_iflink); 1640 } 1641 1642 static int rtnl_fill_link_af(struct sk_buff *skb, 1643 const struct net_device *dev, 1644 u32 ext_filter_mask) 1645 { 1646 const struct rtnl_af_ops *af_ops; 1647 struct nlattr *af_spec; 1648 1649 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1650 if (!af_spec) 1651 return -EMSGSIZE; 1652 1653 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1654 struct nlattr *af; 1655 int err; 1656 1657 if (!af_ops->fill_link_af) 1658 continue; 1659 1660 af = nla_nest_start_noflag(skb, af_ops->family); 1661 if (!af) 1662 return -EMSGSIZE; 1663 1664 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1665 /* 1666 * Caller may return ENODATA to indicate that there 1667 * was no data to be dumped. This is not an error, it 1668 * means we should trim the attribute header and 1669 * continue. 1670 */ 1671 if (err == -ENODATA) 1672 nla_nest_cancel(skb, af); 1673 else if (err < 0) 1674 return -EMSGSIZE; 1675 1676 nla_nest_end(skb, af); 1677 } 1678 1679 nla_nest_end(skb, af_spec); 1680 return 0; 1681 } 1682 1683 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1684 const struct net_device *dev) 1685 { 1686 struct netdev_name_node *name_node; 1687 int count = 0; 1688 1689 list_for_each_entry(name_node, &dev->name_node->list, list) { 1690 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1691 return -EMSGSIZE; 1692 count++; 1693 } 1694 return count; 1695 } 1696 1697 static int rtnl_fill_prop_list(struct sk_buff *skb, 1698 const struct net_device *dev) 1699 { 1700 struct nlattr *prop_list; 1701 int ret; 1702 1703 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1704 if (!prop_list) 1705 return -EMSGSIZE; 1706 1707 ret = rtnl_fill_alt_ifnames(skb, dev); 1708 if (ret <= 0) 1709 goto nest_cancel; 1710 1711 nla_nest_end(skb, prop_list); 1712 return 0; 1713 1714 nest_cancel: 1715 nla_nest_cancel(skb, prop_list); 1716 return ret; 1717 } 1718 1719 static int rtnl_fill_proto_down(struct sk_buff *skb, 1720 const struct net_device *dev) 1721 { 1722 struct nlattr *pr; 1723 u32 preason; 1724 1725 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1726 goto nla_put_failure; 1727 1728 preason = dev->proto_down_reason; 1729 if (!preason) 1730 return 0; 1731 1732 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); 1733 if (!pr) 1734 return -EMSGSIZE; 1735 1736 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { 1737 nla_nest_cancel(skb, pr); 1738 goto nla_put_failure; 1739 } 1740 1741 nla_nest_end(skb, pr); 1742 return 0; 1743 1744 nla_put_failure: 1745 return -EMSGSIZE; 1746 } 1747 1748 static int rtnl_fill_devlink_port(struct sk_buff *skb, 1749 const struct net_device *dev) 1750 { 1751 struct nlattr *devlink_port_nest; 1752 int ret; 1753 1754 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT); 1755 if (!devlink_port_nest) 1756 return -EMSGSIZE; 1757 1758 if (dev->devlink_port) { 1759 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port); 1760 if (ret < 0) 1761 goto nest_cancel; 1762 } 1763 1764 nla_nest_end(skb, devlink_port_nest); 1765 return 0; 1766 1767 nest_cancel: 1768 nla_nest_cancel(skb, devlink_port_nest); 1769 return ret; 1770 } 1771 1772 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1773 struct net_device *dev, struct net *src_net, 1774 int type, u32 pid, u32 seq, u32 change, 1775 unsigned int flags, u32 ext_filter_mask, 1776 u32 event, int *new_nsid, int new_ifindex, 1777 int tgt_netnsid, gfp_t gfp) 1778 { 1779 struct ifinfomsg *ifm; 1780 struct nlmsghdr *nlh; 1781 struct Qdisc *qdisc; 1782 1783 ASSERT_RTNL(); 1784 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1785 if (nlh == NULL) 1786 return -EMSGSIZE; 1787 1788 ifm = nlmsg_data(nlh); 1789 ifm->ifi_family = AF_UNSPEC; 1790 ifm->__ifi_pad = 0; 1791 ifm->ifi_type = dev->type; 1792 ifm->ifi_index = dev->ifindex; 1793 ifm->ifi_flags = dev_get_flags(dev); 1794 ifm->ifi_change = change; 1795 1796 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1797 goto nla_put_failure; 1798 1799 qdisc = rtnl_dereference(dev->qdisc); 1800 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1801 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1802 nla_put_u8(skb, IFLA_OPERSTATE, 1803 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1804 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1805 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1806 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || 1807 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || 1808 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1809 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1810 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) || 1811 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1812 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1813 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1814 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) || 1815 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) || 1816 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) || 1817 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) || 1818 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) || 1819 #ifdef CONFIG_RPS 1820 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1821 #endif 1822 put_master_ifindex(skb, dev) || 1823 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1824 (qdisc && 1825 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || 1826 nla_put_ifalias(skb, dev) || 1827 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1828 atomic_read(&dev->carrier_up_count) + 1829 atomic_read(&dev->carrier_down_count)) || 1830 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1831 atomic_read(&dev->carrier_up_count)) || 1832 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1833 atomic_read(&dev->carrier_down_count))) 1834 goto nla_put_failure; 1835 1836 if (rtnl_fill_proto_down(skb, dev)) 1837 goto nla_put_failure; 1838 1839 if (event != IFLA_EVENT_NONE) { 1840 if (nla_put_u32(skb, IFLA_EVENT, event)) 1841 goto nla_put_failure; 1842 } 1843 1844 if (rtnl_fill_link_ifmap(skb, dev)) 1845 goto nla_put_failure; 1846 1847 if (dev->addr_len) { 1848 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1849 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1850 goto nla_put_failure; 1851 } 1852 1853 if (rtnl_phys_port_id_fill(skb, dev)) 1854 goto nla_put_failure; 1855 1856 if (rtnl_phys_port_name_fill(skb, dev)) 1857 goto nla_put_failure; 1858 1859 if (rtnl_phys_switch_id_fill(skb, dev)) 1860 goto nla_put_failure; 1861 1862 if (rtnl_fill_stats(skb, dev)) 1863 goto nla_put_failure; 1864 1865 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1866 goto nla_put_failure; 1867 1868 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1869 goto nla_put_failure; 1870 1871 if (rtnl_xdp_fill(skb, dev)) 1872 goto nla_put_failure; 1873 1874 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1875 if (rtnl_link_fill(skb, dev) < 0) 1876 goto nla_put_failure; 1877 } 1878 1879 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp)) 1880 goto nla_put_failure; 1881 1882 if (new_nsid && 1883 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1884 goto nla_put_failure; 1885 if (new_ifindex && 1886 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1887 goto nla_put_failure; 1888 1889 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) && 1890 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr)) 1891 goto nla_put_failure; 1892 1893 rcu_read_lock(); 1894 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1895 goto nla_put_failure_rcu; 1896 rcu_read_unlock(); 1897 1898 if (rtnl_fill_prop_list(skb, dev)) 1899 goto nla_put_failure; 1900 1901 if (dev->dev.parent && 1902 nla_put_string(skb, IFLA_PARENT_DEV_NAME, 1903 dev_name(dev->dev.parent))) 1904 goto nla_put_failure; 1905 1906 if (dev->dev.parent && dev->dev.parent->bus && 1907 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, 1908 dev->dev.parent->bus->name)) 1909 goto nla_put_failure; 1910 1911 if (rtnl_fill_devlink_port(skb, dev)) 1912 goto nla_put_failure; 1913 1914 nlmsg_end(skb, nlh); 1915 return 0; 1916 1917 nla_put_failure_rcu: 1918 rcu_read_unlock(); 1919 nla_put_failure: 1920 nlmsg_cancel(skb, nlh); 1921 return -EMSGSIZE; 1922 } 1923 1924 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1925 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1926 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1927 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1928 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1929 [IFLA_MTU] = { .type = NLA_U32 }, 1930 [IFLA_LINK] = { .type = NLA_U32 }, 1931 [IFLA_MASTER] = { .type = NLA_U32 }, 1932 [IFLA_CARRIER] = { .type = NLA_U8 }, 1933 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1934 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1935 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1936 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1937 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1938 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1939 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1940 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1941 * allow 0-length string (needed to remove an alias). 1942 */ 1943 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1944 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1945 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1946 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1947 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1948 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1949 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1950 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1951 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1952 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1953 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 1954 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1955 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1956 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1957 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1958 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1959 [IFLA_XDP] = { .type = NLA_NESTED }, 1960 [IFLA_EVENT] = { .type = NLA_U32 }, 1961 [IFLA_GROUP] = { .type = NLA_U32 }, 1962 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 1963 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 1964 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 1965 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 1966 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 1967 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 1968 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 1969 .len = ALTIFNAMSIZ - 1 }, 1970 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, 1971 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, 1972 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 1973 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, 1974 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, 1975 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, 1976 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, 1977 [IFLA_ALLMULTI] = { .type = NLA_REJECT }, 1978 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 1979 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 1980 }; 1981 1982 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1983 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 1984 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 1985 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 1986 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1987 }; 1988 1989 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1990 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1991 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 1992 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1993 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 1994 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 1995 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 1996 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 1997 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 1998 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 1999 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 2000 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 2001 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2002 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2003 }; 2004 2005 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 2006 [IFLA_PORT_VF] = { .type = NLA_U32 }, 2007 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 2008 .len = PORT_PROFILE_MAX }, 2009 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 2010 .len = PORT_UUID_MAX }, 2011 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 2012 .len = PORT_UUID_MAX }, 2013 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 2014 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 2015 2016 /* Unused, but we need to keep it here since user space could 2017 * fill it. It's also broken with regard to NLA_BINARY use in 2018 * combination with structs. 2019 */ 2020 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 2021 .len = sizeof(struct ifla_port_vsi) }, 2022 }; 2023 2024 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 2025 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, 2026 [IFLA_XDP_FD] = { .type = NLA_S32 }, 2027 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, 2028 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 2029 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 2030 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 2031 }; 2032 2033 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 2034 { 2035 const struct rtnl_link_ops *ops = NULL; 2036 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 2037 2038 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 2039 return NULL; 2040 2041 if (linfo[IFLA_INFO_KIND]) { 2042 char kind[MODULE_NAME_LEN]; 2043 2044 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 2045 ops = rtnl_link_ops_get(kind); 2046 } 2047 2048 return ops; 2049 } 2050 2051 static bool link_master_filtered(struct net_device *dev, int master_idx) 2052 { 2053 struct net_device *master; 2054 2055 if (!master_idx) 2056 return false; 2057 2058 master = netdev_master_upper_dev_get(dev); 2059 2060 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need 2061 * another invalid value for ifindex to denote "no master". 2062 */ 2063 if (master_idx == -1) 2064 return !!master; 2065 2066 if (!master || master->ifindex != master_idx) 2067 return true; 2068 2069 return false; 2070 } 2071 2072 static bool link_kind_filtered(const struct net_device *dev, 2073 const struct rtnl_link_ops *kind_ops) 2074 { 2075 if (kind_ops && dev->rtnl_link_ops != kind_ops) 2076 return true; 2077 2078 return false; 2079 } 2080 2081 static bool link_dump_filtered(struct net_device *dev, 2082 int master_idx, 2083 const struct rtnl_link_ops *kind_ops) 2084 { 2085 if (link_master_filtered(dev, master_idx) || 2086 link_kind_filtered(dev, kind_ops)) 2087 return true; 2088 2089 return false; 2090 } 2091 2092 /** 2093 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 2094 * @sk: netlink socket 2095 * @netnsid: network namespace identifier 2096 * 2097 * Returns the network namespace identified by netnsid on success or an error 2098 * pointer on failure. 2099 */ 2100 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 2101 { 2102 struct net *net; 2103 2104 net = get_net_ns_by_id(sock_net(sk), netnsid); 2105 if (!net) 2106 return ERR_PTR(-EINVAL); 2107 2108 /* For now, the caller is required to have CAP_NET_ADMIN in 2109 * the user namespace owning the target net ns. 2110 */ 2111 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 2112 put_net(net); 2113 return ERR_PTR(-EACCES); 2114 } 2115 return net; 2116 } 2117 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 2118 2119 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 2120 bool strict_check, struct nlattr **tb, 2121 struct netlink_ext_ack *extack) 2122 { 2123 int hdrlen; 2124 2125 if (strict_check) { 2126 struct ifinfomsg *ifm; 2127 2128 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2129 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 2130 return -EINVAL; 2131 } 2132 2133 ifm = nlmsg_data(nlh); 2134 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2135 ifm->ifi_change) { 2136 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 2137 return -EINVAL; 2138 } 2139 if (ifm->ifi_index) { 2140 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 2141 return -EINVAL; 2142 } 2143 2144 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 2145 IFLA_MAX, ifla_policy, 2146 extack); 2147 } 2148 2149 /* A hack to preserve kernel<->userspace interface. 2150 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 2151 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 2152 * what iproute2 < v3.9.0 used. 2153 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 2154 * attribute, its netlink message is shorter than struct ifinfomsg. 2155 */ 2156 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 2157 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 2158 2159 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 2160 extack); 2161 } 2162 2163 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 2164 { 2165 struct netlink_ext_ack *extack = cb->extack; 2166 const struct nlmsghdr *nlh = cb->nlh; 2167 struct net *net = sock_net(skb->sk); 2168 struct net *tgt_net = net; 2169 int h, s_h; 2170 int idx = 0, s_idx; 2171 struct net_device *dev; 2172 struct hlist_head *head; 2173 struct nlattr *tb[IFLA_MAX+1]; 2174 u32 ext_filter_mask = 0; 2175 const struct rtnl_link_ops *kind_ops = NULL; 2176 unsigned int flags = NLM_F_MULTI; 2177 int master_idx = 0; 2178 int netnsid = -1; 2179 int err, i; 2180 2181 s_h = cb->args[0]; 2182 s_idx = cb->args[1]; 2183 2184 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2185 if (err < 0) { 2186 if (cb->strict_check) 2187 return err; 2188 2189 goto walk_entries; 2190 } 2191 2192 for (i = 0; i <= IFLA_MAX; ++i) { 2193 if (!tb[i]) 2194 continue; 2195 2196 /* new attributes should only be added with strict checking */ 2197 switch (i) { 2198 case IFLA_TARGET_NETNSID: 2199 netnsid = nla_get_s32(tb[i]); 2200 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2201 if (IS_ERR(tgt_net)) { 2202 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2203 return PTR_ERR(tgt_net); 2204 } 2205 break; 2206 case IFLA_EXT_MASK: 2207 ext_filter_mask = nla_get_u32(tb[i]); 2208 break; 2209 case IFLA_MASTER: 2210 master_idx = nla_get_u32(tb[i]); 2211 break; 2212 case IFLA_LINKINFO: 2213 kind_ops = linkinfo_to_kind_ops(tb[i]); 2214 break; 2215 default: 2216 if (cb->strict_check) { 2217 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2218 return -EINVAL; 2219 } 2220 } 2221 } 2222 2223 if (master_idx || kind_ops) 2224 flags |= NLM_F_DUMP_FILTERED; 2225 2226 walk_entries: 2227 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 2228 idx = 0; 2229 head = &tgt_net->dev_index_head[h]; 2230 hlist_for_each_entry(dev, head, index_hlist) { 2231 if (link_dump_filtered(dev, master_idx, kind_ops)) 2232 goto cont; 2233 if (idx < s_idx) 2234 goto cont; 2235 err = rtnl_fill_ifinfo(skb, dev, net, 2236 RTM_NEWLINK, 2237 NETLINK_CB(cb->skb).portid, 2238 nlh->nlmsg_seq, 0, flags, 2239 ext_filter_mask, 0, NULL, 0, 2240 netnsid, GFP_KERNEL); 2241 2242 if (err < 0) { 2243 if (likely(skb->len)) 2244 goto out; 2245 2246 goto out_err; 2247 } 2248 cont: 2249 idx++; 2250 } 2251 } 2252 out: 2253 err = skb->len; 2254 out_err: 2255 cb->args[1] = idx; 2256 cb->args[0] = h; 2257 cb->seq = tgt_net->dev_base_seq; 2258 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2259 if (netnsid >= 0) 2260 put_net(tgt_net); 2261 2262 return err; 2263 } 2264 2265 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 2266 struct netlink_ext_ack *exterr) 2267 { 2268 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, 2269 exterr); 2270 } 2271 EXPORT_SYMBOL(rtnl_nla_parse_ifla); 2272 2273 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2274 { 2275 struct net *net; 2276 /* Examine the link attributes and figure out which 2277 * network namespace we are talking about. 2278 */ 2279 if (tb[IFLA_NET_NS_PID]) 2280 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2281 else if (tb[IFLA_NET_NS_FD]) 2282 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2283 else 2284 net = get_net(src_net); 2285 return net; 2286 } 2287 EXPORT_SYMBOL(rtnl_link_get_net); 2288 2289 /* Figure out which network namespace we are talking about by 2290 * examining the link attributes in the following order: 2291 * 2292 * 1. IFLA_NET_NS_PID 2293 * 2. IFLA_NET_NS_FD 2294 * 3. IFLA_TARGET_NETNSID 2295 */ 2296 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2297 struct nlattr *tb[]) 2298 { 2299 struct net *net; 2300 2301 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2302 return rtnl_link_get_net(src_net, tb); 2303 2304 if (!tb[IFLA_TARGET_NETNSID]) 2305 return get_net(src_net); 2306 2307 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2308 if (!net) 2309 return ERR_PTR(-EINVAL); 2310 2311 return net; 2312 } 2313 2314 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2315 struct net *src_net, 2316 struct nlattr *tb[], int cap) 2317 { 2318 struct net *net; 2319 2320 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2321 if (IS_ERR(net)) 2322 return net; 2323 2324 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2325 put_net(net); 2326 return ERR_PTR(-EPERM); 2327 } 2328 2329 return net; 2330 } 2331 2332 /* Verify that rtnetlink requests do not pass additional properties 2333 * potentially referring to different network namespaces. 2334 */ 2335 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2336 struct netlink_ext_ack *extack, 2337 bool netns_id_only) 2338 { 2339 2340 if (netns_id_only) { 2341 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2342 return 0; 2343 2344 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2345 return -EOPNOTSUPP; 2346 } 2347 2348 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2349 goto invalid_attr; 2350 2351 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2352 goto invalid_attr; 2353 2354 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2355 goto invalid_attr; 2356 2357 return 0; 2358 2359 invalid_attr: 2360 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2361 return -EINVAL; 2362 } 2363 2364 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2365 int max_tx_rate) 2366 { 2367 const struct net_device_ops *ops = dev->netdev_ops; 2368 2369 if (!ops->ndo_set_vf_rate) 2370 return -EOPNOTSUPP; 2371 if (max_tx_rate && max_tx_rate < min_tx_rate) 2372 return -EINVAL; 2373 2374 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate); 2375 } 2376 2377 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], 2378 struct netlink_ext_ack *extack) 2379 { 2380 if (dev) { 2381 if (tb[IFLA_ADDRESS] && 2382 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2383 return -EINVAL; 2384 2385 if (tb[IFLA_BROADCAST] && 2386 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2387 return -EINVAL; 2388 } 2389 2390 if (tb[IFLA_AF_SPEC]) { 2391 struct nlattr *af; 2392 int rem, err; 2393 2394 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2395 const struct rtnl_af_ops *af_ops; 2396 2397 af_ops = rtnl_af_lookup(nla_type(af)); 2398 if (!af_ops) 2399 return -EAFNOSUPPORT; 2400 2401 if (!af_ops->set_link_af) 2402 return -EOPNOTSUPP; 2403 2404 if (af_ops->validate_link_af) { 2405 err = af_ops->validate_link_af(dev, af, extack); 2406 if (err < 0) 2407 return err; 2408 } 2409 } 2410 } 2411 2412 return 0; 2413 } 2414 2415 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2416 int guid_type) 2417 { 2418 const struct net_device_ops *ops = dev->netdev_ops; 2419 2420 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2421 } 2422 2423 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2424 { 2425 if (dev->type != ARPHRD_INFINIBAND) 2426 return -EOPNOTSUPP; 2427 2428 return handle_infiniband_guid(dev, ivt, guid_type); 2429 } 2430 2431 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2432 { 2433 const struct net_device_ops *ops = dev->netdev_ops; 2434 int err = -EINVAL; 2435 2436 if (tb[IFLA_VF_MAC]) { 2437 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2438 2439 if (ivm->vf >= INT_MAX) 2440 return -EINVAL; 2441 err = -EOPNOTSUPP; 2442 if (ops->ndo_set_vf_mac) 2443 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2444 ivm->mac); 2445 if (err < 0) 2446 return err; 2447 } 2448 2449 if (tb[IFLA_VF_VLAN]) { 2450 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2451 2452 if (ivv->vf >= INT_MAX) 2453 return -EINVAL; 2454 err = -EOPNOTSUPP; 2455 if (ops->ndo_set_vf_vlan) 2456 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2457 ivv->qos, 2458 htons(ETH_P_8021Q)); 2459 if (err < 0) 2460 return err; 2461 } 2462 2463 if (tb[IFLA_VF_VLAN_LIST]) { 2464 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2465 struct nlattr *attr; 2466 int rem, len = 0; 2467 2468 err = -EOPNOTSUPP; 2469 if (!ops->ndo_set_vf_vlan) 2470 return err; 2471 2472 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2473 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2474 nla_len(attr) < NLA_HDRLEN) { 2475 return -EINVAL; 2476 } 2477 if (len >= MAX_VLAN_LIST_LEN) 2478 return -EOPNOTSUPP; 2479 ivvl[len] = nla_data(attr); 2480 2481 len++; 2482 } 2483 if (len == 0) 2484 return -EINVAL; 2485 2486 if (ivvl[0]->vf >= INT_MAX) 2487 return -EINVAL; 2488 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2489 ivvl[0]->qos, ivvl[0]->vlan_proto); 2490 if (err < 0) 2491 return err; 2492 } 2493 2494 if (tb[IFLA_VF_TX_RATE]) { 2495 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2496 struct ifla_vf_info ivf; 2497 2498 if (ivt->vf >= INT_MAX) 2499 return -EINVAL; 2500 err = -EOPNOTSUPP; 2501 if (ops->ndo_get_vf_config) 2502 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2503 if (err < 0) 2504 return err; 2505 2506 err = rtnl_set_vf_rate(dev, ivt->vf, 2507 ivf.min_tx_rate, ivt->rate); 2508 if (err < 0) 2509 return err; 2510 } 2511 2512 if (tb[IFLA_VF_RATE]) { 2513 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2514 2515 if (ivt->vf >= INT_MAX) 2516 return -EINVAL; 2517 2518 err = rtnl_set_vf_rate(dev, ivt->vf, 2519 ivt->min_tx_rate, ivt->max_tx_rate); 2520 if (err < 0) 2521 return err; 2522 } 2523 2524 if (tb[IFLA_VF_SPOOFCHK]) { 2525 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2526 2527 if (ivs->vf >= INT_MAX) 2528 return -EINVAL; 2529 err = -EOPNOTSUPP; 2530 if (ops->ndo_set_vf_spoofchk) 2531 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2532 ivs->setting); 2533 if (err < 0) 2534 return err; 2535 } 2536 2537 if (tb[IFLA_VF_LINK_STATE]) { 2538 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2539 2540 if (ivl->vf >= INT_MAX) 2541 return -EINVAL; 2542 err = -EOPNOTSUPP; 2543 if (ops->ndo_set_vf_link_state) 2544 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2545 ivl->link_state); 2546 if (err < 0) 2547 return err; 2548 } 2549 2550 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2551 struct ifla_vf_rss_query_en *ivrssq_en; 2552 2553 err = -EOPNOTSUPP; 2554 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2555 if (ivrssq_en->vf >= INT_MAX) 2556 return -EINVAL; 2557 if (ops->ndo_set_vf_rss_query_en) 2558 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2559 ivrssq_en->setting); 2560 if (err < 0) 2561 return err; 2562 } 2563 2564 if (tb[IFLA_VF_TRUST]) { 2565 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2566 2567 if (ivt->vf >= INT_MAX) 2568 return -EINVAL; 2569 err = -EOPNOTSUPP; 2570 if (ops->ndo_set_vf_trust) 2571 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2572 if (err < 0) 2573 return err; 2574 } 2575 2576 if (tb[IFLA_VF_IB_NODE_GUID]) { 2577 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2578 2579 if (ivt->vf >= INT_MAX) 2580 return -EINVAL; 2581 if (!ops->ndo_set_vf_guid) 2582 return -EOPNOTSUPP; 2583 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2584 } 2585 2586 if (tb[IFLA_VF_IB_PORT_GUID]) { 2587 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2588 2589 if (ivt->vf >= INT_MAX) 2590 return -EINVAL; 2591 if (!ops->ndo_set_vf_guid) 2592 return -EOPNOTSUPP; 2593 2594 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2595 } 2596 2597 return err; 2598 } 2599 2600 static int do_set_master(struct net_device *dev, int ifindex, 2601 struct netlink_ext_ack *extack) 2602 { 2603 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2604 const struct net_device_ops *ops; 2605 int err; 2606 2607 if (upper_dev) { 2608 if (upper_dev->ifindex == ifindex) 2609 return 0; 2610 ops = upper_dev->netdev_ops; 2611 if (ops->ndo_del_slave) { 2612 err = ops->ndo_del_slave(upper_dev, dev); 2613 if (err) 2614 return err; 2615 } else { 2616 return -EOPNOTSUPP; 2617 } 2618 } 2619 2620 if (ifindex) { 2621 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2622 if (!upper_dev) 2623 return -EINVAL; 2624 ops = upper_dev->netdev_ops; 2625 if (ops->ndo_add_slave) { 2626 err = ops->ndo_add_slave(upper_dev, dev, extack); 2627 if (err) 2628 return err; 2629 } else { 2630 return -EOPNOTSUPP; 2631 } 2632 } 2633 return 0; 2634 } 2635 2636 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { 2637 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, 2638 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, 2639 }; 2640 2641 static int do_set_proto_down(struct net_device *dev, 2642 struct nlattr *nl_proto_down, 2643 struct nlattr *nl_proto_down_reason, 2644 struct netlink_ext_ack *extack) 2645 { 2646 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; 2647 unsigned long mask = 0; 2648 u32 value; 2649 bool proto_down; 2650 int err; 2651 2652 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) { 2653 NL_SET_ERR_MSG(extack, "Protodown not supported by device"); 2654 return -EOPNOTSUPP; 2655 } 2656 2657 if (nl_proto_down_reason) { 2658 err = nla_parse_nested_deprecated(pdreason, 2659 IFLA_PROTO_DOWN_REASON_MAX, 2660 nl_proto_down_reason, 2661 ifla_proto_down_reason_policy, 2662 NULL); 2663 if (err < 0) 2664 return err; 2665 2666 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { 2667 NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); 2668 return -EINVAL; 2669 } 2670 2671 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); 2672 2673 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) 2674 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); 2675 2676 dev_change_proto_down_reason(dev, mask, value); 2677 } 2678 2679 if (nl_proto_down) { 2680 proto_down = nla_get_u8(nl_proto_down); 2681 2682 /* Don't turn off protodown if there are active reasons */ 2683 if (!proto_down && dev->proto_down_reason) { 2684 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); 2685 return -EBUSY; 2686 } 2687 err = dev_change_proto_down(dev, 2688 proto_down); 2689 if (err) 2690 return err; 2691 } 2692 2693 return 0; 2694 } 2695 2696 #define DO_SETLINK_MODIFIED 0x01 2697 /* notify flag means notify + modified. */ 2698 #define DO_SETLINK_NOTIFY 0x03 2699 static int do_setlink(const struct sk_buff *skb, 2700 struct net_device *dev, struct ifinfomsg *ifm, 2701 struct netlink_ext_ack *extack, 2702 struct nlattr **tb, int status) 2703 { 2704 const struct net_device_ops *ops = dev->netdev_ops; 2705 char ifname[IFNAMSIZ]; 2706 int err; 2707 2708 err = validate_linkmsg(dev, tb, extack); 2709 if (err < 0) 2710 return err; 2711 2712 if (tb[IFLA_IFNAME]) 2713 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2714 else 2715 ifname[0] = '\0'; 2716 2717 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2718 const char *pat = ifname[0] ? ifname : NULL; 2719 struct net *net; 2720 int new_ifindex; 2721 2722 net = rtnl_link_get_net_capable(skb, dev_net(dev), 2723 tb, CAP_NET_ADMIN); 2724 if (IS_ERR(net)) { 2725 err = PTR_ERR(net); 2726 goto errout; 2727 } 2728 2729 if (tb[IFLA_NEW_IFINDEX]) 2730 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); 2731 else 2732 new_ifindex = 0; 2733 2734 err = __dev_change_net_namespace(dev, net, pat, new_ifindex); 2735 put_net(net); 2736 if (err) 2737 goto errout; 2738 status |= DO_SETLINK_MODIFIED; 2739 } 2740 2741 if (tb[IFLA_MAP]) { 2742 struct rtnl_link_ifmap *u_map; 2743 struct ifmap k_map; 2744 2745 if (!ops->ndo_set_config) { 2746 err = -EOPNOTSUPP; 2747 goto errout; 2748 } 2749 2750 if (!netif_device_present(dev)) { 2751 err = -ENODEV; 2752 goto errout; 2753 } 2754 2755 u_map = nla_data(tb[IFLA_MAP]); 2756 k_map.mem_start = (unsigned long) u_map->mem_start; 2757 k_map.mem_end = (unsigned long) u_map->mem_end; 2758 k_map.base_addr = (unsigned short) u_map->base_addr; 2759 k_map.irq = (unsigned char) u_map->irq; 2760 k_map.dma = (unsigned char) u_map->dma; 2761 k_map.port = (unsigned char) u_map->port; 2762 2763 err = ops->ndo_set_config(dev, &k_map); 2764 if (err < 0) 2765 goto errout; 2766 2767 status |= DO_SETLINK_NOTIFY; 2768 } 2769 2770 if (tb[IFLA_ADDRESS]) { 2771 struct sockaddr *sa; 2772 int len; 2773 2774 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2775 sizeof(*sa)); 2776 sa = kmalloc(len, GFP_KERNEL); 2777 if (!sa) { 2778 err = -ENOMEM; 2779 goto errout; 2780 } 2781 sa->sa_family = dev->type; 2782 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2783 dev->addr_len); 2784 err = dev_set_mac_address_user(dev, sa, extack); 2785 kfree(sa); 2786 if (err) 2787 goto errout; 2788 status |= DO_SETLINK_MODIFIED; 2789 } 2790 2791 if (tb[IFLA_MTU]) { 2792 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2793 if (err < 0) 2794 goto errout; 2795 status |= DO_SETLINK_MODIFIED; 2796 } 2797 2798 if (tb[IFLA_GROUP]) { 2799 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2800 status |= DO_SETLINK_NOTIFY; 2801 } 2802 2803 /* 2804 * Interface selected by interface index but interface 2805 * name provided implies that a name change has been 2806 * requested. 2807 */ 2808 if (ifm->ifi_index > 0 && ifname[0]) { 2809 err = dev_change_name(dev, ifname); 2810 if (err < 0) 2811 goto errout; 2812 status |= DO_SETLINK_MODIFIED; 2813 } 2814 2815 if (tb[IFLA_IFALIAS]) { 2816 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2817 nla_len(tb[IFLA_IFALIAS])); 2818 if (err < 0) 2819 goto errout; 2820 status |= DO_SETLINK_NOTIFY; 2821 } 2822 2823 if (tb[IFLA_BROADCAST]) { 2824 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2825 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2826 } 2827 2828 if (tb[IFLA_MASTER]) { 2829 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2830 if (err) 2831 goto errout; 2832 status |= DO_SETLINK_MODIFIED; 2833 } 2834 2835 if (ifm->ifi_flags || ifm->ifi_change) { 2836 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2837 extack); 2838 if (err < 0) 2839 goto errout; 2840 } 2841 2842 if (tb[IFLA_CARRIER]) { 2843 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2844 if (err) 2845 goto errout; 2846 status |= DO_SETLINK_MODIFIED; 2847 } 2848 2849 if (tb[IFLA_TXQLEN]) { 2850 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2851 2852 err = dev_change_tx_queue_len(dev, value); 2853 if (err) 2854 goto errout; 2855 status |= DO_SETLINK_MODIFIED; 2856 } 2857 2858 if (tb[IFLA_GSO_MAX_SIZE]) { 2859 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2860 2861 if (max_size > dev->tso_max_size) { 2862 err = -EINVAL; 2863 goto errout; 2864 } 2865 2866 if (dev->gso_max_size ^ max_size) { 2867 netif_set_gso_max_size(dev, max_size); 2868 status |= DO_SETLINK_MODIFIED; 2869 } 2870 } 2871 2872 if (tb[IFLA_GSO_MAX_SEGS]) { 2873 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2874 2875 if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) { 2876 err = -EINVAL; 2877 goto errout; 2878 } 2879 2880 if (dev->gso_max_segs ^ max_segs) { 2881 netif_set_gso_max_segs(dev, max_segs); 2882 status |= DO_SETLINK_MODIFIED; 2883 } 2884 } 2885 2886 if (tb[IFLA_GRO_MAX_SIZE]) { 2887 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 2888 2889 if (dev->gro_max_size ^ gro_max_size) { 2890 netif_set_gro_max_size(dev, gro_max_size); 2891 status |= DO_SETLINK_MODIFIED; 2892 } 2893 } 2894 2895 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) { 2896 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]); 2897 2898 if (max_size > dev->tso_max_size) { 2899 err = -EINVAL; 2900 goto errout; 2901 } 2902 2903 if (dev->gso_ipv4_max_size ^ max_size) { 2904 netif_set_gso_ipv4_max_size(dev, max_size); 2905 status |= DO_SETLINK_MODIFIED; 2906 } 2907 } 2908 2909 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) { 2910 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]); 2911 2912 if (dev->gro_ipv4_max_size ^ gro_max_size) { 2913 netif_set_gro_ipv4_max_size(dev, gro_max_size); 2914 status |= DO_SETLINK_MODIFIED; 2915 } 2916 } 2917 2918 if (tb[IFLA_OPERSTATE]) 2919 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2920 2921 if (tb[IFLA_LINKMODE]) { 2922 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2923 2924 write_lock(&dev_base_lock); 2925 if (dev->link_mode ^ value) 2926 status |= DO_SETLINK_NOTIFY; 2927 dev->link_mode = value; 2928 write_unlock(&dev_base_lock); 2929 } 2930 2931 if (tb[IFLA_VFINFO_LIST]) { 2932 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2933 struct nlattr *attr; 2934 int rem; 2935 2936 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2937 if (nla_type(attr) != IFLA_VF_INFO || 2938 nla_len(attr) < NLA_HDRLEN) { 2939 err = -EINVAL; 2940 goto errout; 2941 } 2942 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 2943 attr, 2944 ifla_vf_policy, 2945 NULL); 2946 if (err < 0) 2947 goto errout; 2948 err = do_setvfinfo(dev, vfinfo); 2949 if (err < 0) 2950 goto errout; 2951 status |= DO_SETLINK_NOTIFY; 2952 } 2953 } 2954 err = 0; 2955 2956 if (tb[IFLA_VF_PORTS]) { 2957 struct nlattr *port[IFLA_PORT_MAX+1]; 2958 struct nlattr *attr; 2959 int vf; 2960 int rem; 2961 2962 err = -EOPNOTSUPP; 2963 if (!ops->ndo_set_vf_port) 2964 goto errout; 2965 2966 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 2967 if (nla_type(attr) != IFLA_VF_PORT || 2968 nla_len(attr) < NLA_HDRLEN) { 2969 err = -EINVAL; 2970 goto errout; 2971 } 2972 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 2973 attr, 2974 ifla_port_policy, 2975 NULL); 2976 if (err < 0) 2977 goto errout; 2978 if (!port[IFLA_PORT_VF]) { 2979 err = -EOPNOTSUPP; 2980 goto errout; 2981 } 2982 vf = nla_get_u32(port[IFLA_PORT_VF]); 2983 err = ops->ndo_set_vf_port(dev, vf, port); 2984 if (err < 0) 2985 goto errout; 2986 status |= DO_SETLINK_NOTIFY; 2987 } 2988 } 2989 err = 0; 2990 2991 if (tb[IFLA_PORT_SELF]) { 2992 struct nlattr *port[IFLA_PORT_MAX+1]; 2993 2994 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 2995 tb[IFLA_PORT_SELF], 2996 ifla_port_policy, NULL); 2997 if (err < 0) 2998 goto errout; 2999 3000 err = -EOPNOTSUPP; 3001 if (ops->ndo_set_vf_port) 3002 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 3003 if (err < 0) 3004 goto errout; 3005 status |= DO_SETLINK_NOTIFY; 3006 } 3007 3008 if (tb[IFLA_AF_SPEC]) { 3009 struct nlattr *af; 3010 int rem; 3011 3012 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 3013 const struct rtnl_af_ops *af_ops; 3014 3015 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 3016 3017 err = af_ops->set_link_af(dev, af, extack); 3018 if (err < 0) 3019 goto errout; 3020 3021 status |= DO_SETLINK_NOTIFY; 3022 } 3023 } 3024 err = 0; 3025 3026 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { 3027 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], 3028 tb[IFLA_PROTO_DOWN_REASON], extack); 3029 if (err) 3030 goto errout; 3031 status |= DO_SETLINK_NOTIFY; 3032 } 3033 3034 if (tb[IFLA_XDP]) { 3035 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 3036 u32 xdp_flags = 0; 3037 3038 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 3039 tb[IFLA_XDP], 3040 ifla_xdp_policy, NULL); 3041 if (err < 0) 3042 goto errout; 3043 3044 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 3045 err = -EINVAL; 3046 goto errout; 3047 } 3048 3049 if (xdp[IFLA_XDP_FLAGS]) { 3050 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 3051 if (xdp_flags & ~XDP_FLAGS_MASK) { 3052 err = -EINVAL; 3053 goto errout; 3054 } 3055 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 3056 err = -EINVAL; 3057 goto errout; 3058 } 3059 } 3060 3061 if (xdp[IFLA_XDP_FD]) { 3062 int expected_fd = -1; 3063 3064 if (xdp_flags & XDP_FLAGS_REPLACE) { 3065 if (!xdp[IFLA_XDP_EXPECTED_FD]) { 3066 err = -EINVAL; 3067 goto errout; 3068 } 3069 expected_fd = 3070 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]); 3071 } 3072 3073 err = dev_change_xdp_fd(dev, extack, 3074 nla_get_s32(xdp[IFLA_XDP_FD]), 3075 expected_fd, 3076 xdp_flags); 3077 if (err) 3078 goto errout; 3079 status |= DO_SETLINK_NOTIFY; 3080 } 3081 } 3082 3083 errout: 3084 if (status & DO_SETLINK_MODIFIED) { 3085 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 3086 netdev_state_change(dev); 3087 3088 if (err < 0) 3089 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 3090 dev->name); 3091 } 3092 3093 return err; 3094 } 3095 3096 static struct net_device *rtnl_dev_get(struct net *net, 3097 struct nlattr *tb[]) 3098 { 3099 char ifname[ALTIFNAMSIZ]; 3100 3101 if (tb[IFLA_IFNAME]) 3102 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3103 else if (tb[IFLA_ALT_IFNAME]) 3104 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ); 3105 else 3106 return NULL; 3107 3108 return __dev_get_by_name(net, ifname); 3109 } 3110 3111 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3112 struct netlink_ext_ack *extack) 3113 { 3114 struct net *net = sock_net(skb->sk); 3115 struct ifinfomsg *ifm; 3116 struct net_device *dev; 3117 int err; 3118 struct nlattr *tb[IFLA_MAX+1]; 3119 3120 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3121 ifla_policy, extack); 3122 if (err < 0) 3123 goto errout; 3124 3125 err = rtnl_ensure_unique_netns(tb, extack, false); 3126 if (err < 0) 3127 goto errout; 3128 3129 err = -EINVAL; 3130 ifm = nlmsg_data(nlh); 3131 if (ifm->ifi_index > 0) 3132 dev = __dev_get_by_index(net, ifm->ifi_index); 3133 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3134 dev = rtnl_dev_get(net, tb); 3135 else 3136 goto errout; 3137 3138 if (dev == NULL) { 3139 err = -ENODEV; 3140 goto errout; 3141 } 3142 3143 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3144 errout: 3145 return err; 3146 } 3147 3148 static int rtnl_group_dellink(const struct net *net, int group) 3149 { 3150 struct net_device *dev, *aux; 3151 LIST_HEAD(list_kill); 3152 bool found = false; 3153 3154 if (!group) 3155 return -EPERM; 3156 3157 for_each_netdev(net, dev) { 3158 if (dev->group == group) { 3159 const struct rtnl_link_ops *ops; 3160 3161 found = true; 3162 ops = dev->rtnl_link_ops; 3163 if (!ops || !ops->dellink) 3164 return -EOPNOTSUPP; 3165 } 3166 } 3167 3168 if (!found) 3169 return -ENODEV; 3170 3171 for_each_netdev_safe(net, dev, aux) { 3172 if (dev->group == group) { 3173 const struct rtnl_link_ops *ops; 3174 3175 ops = dev->rtnl_link_ops; 3176 ops->dellink(dev, &list_kill); 3177 } 3178 } 3179 unregister_netdevice_many(&list_kill); 3180 3181 return 0; 3182 } 3183 3184 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh) 3185 { 3186 const struct rtnl_link_ops *ops; 3187 LIST_HEAD(list_kill); 3188 3189 ops = dev->rtnl_link_ops; 3190 if (!ops || !ops->dellink) 3191 return -EOPNOTSUPP; 3192 3193 ops->dellink(dev, &list_kill); 3194 unregister_netdevice_many_notify(&list_kill, portid, nlh); 3195 3196 return 0; 3197 } 3198 EXPORT_SYMBOL_GPL(rtnl_delete_link); 3199 3200 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 3201 struct netlink_ext_ack *extack) 3202 { 3203 struct net *net = sock_net(skb->sk); 3204 u32 portid = NETLINK_CB(skb).portid; 3205 struct net *tgt_net = net; 3206 struct net_device *dev = NULL; 3207 struct ifinfomsg *ifm; 3208 struct nlattr *tb[IFLA_MAX+1]; 3209 int err; 3210 int netnsid = -1; 3211 3212 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3213 ifla_policy, extack); 3214 if (err < 0) 3215 return err; 3216 3217 err = rtnl_ensure_unique_netns(tb, extack, true); 3218 if (err < 0) 3219 return err; 3220 3221 if (tb[IFLA_TARGET_NETNSID]) { 3222 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3223 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3224 if (IS_ERR(tgt_net)) 3225 return PTR_ERR(tgt_net); 3226 } 3227 3228 err = -EINVAL; 3229 ifm = nlmsg_data(nlh); 3230 if (ifm->ifi_index > 0) 3231 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3232 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3233 dev = rtnl_dev_get(net, tb); 3234 else if (tb[IFLA_GROUP]) 3235 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3236 else 3237 goto out; 3238 3239 if (!dev) { 3240 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0) 3241 err = -ENODEV; 3242 3243 goto out; 3244 } 3245 3246 err = rtnl_delete_link(dev, portid, nlh); 3247 3248 out: 3249 if (netnsid >= 0) 3250 put_net(tgt_net); 3251 3252 return err; 3253 } 3254 3255 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, 3256 u32 portid, const struct nlmsghdr *nlh) 3257 { 3258 unsigned int old_flags; 3259 int err; 3260 3261 old_flags = dev->flags; 3262 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 3263 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3264 NULL); 3265 if (err < 0) 3266 return err; 3267 } 3268 3269 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3270 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh); 3271 } else { 3272 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3273 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh); 3274 } 3275 return 0; 3276 } 3277 EXPORT_SYMBOL(rtnl_configure_link); 3278 3279 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3280 unsigned char name_assign_type, 3281 const struct rtnl_link_ops *ops, 3282 struct nlattr *tb[], 3283 struct netlink_ext_ack *extack) 3284 { 3285 struct net_device *dev; 3286 unsigned int num_tx_queues = 1; 3287 unsigned int num_rx_queues = 1; 3288 3289 if (tb[IFLA_NUM_TX_QUEUES]) 3290 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3291 else if (ops->get_num_tx_queues) 3292 num_tx_queues = ops->get_num_tx_queues(); 3293 3294 if (tb[IFLA_NUM_RX_QUEUES]) 3295 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3296 else if (ops->get_num_rx_queues) 3297 num_rx_queues = ops->get_num_rx_queues(); 3298 3299 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3300 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3301 return ERR_PTR(-EINVAL); 3302 } 3303 3304 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3305 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3306 return ERR_PTR(-EINVAL); 3307 } 3308 3309 if (ops->alloc) { 3310 dev = ops->alloc(tb, ifname, name_assign_type, 3311 num_tx_queues, num_rx_queues); 3312 if (IS_ERR(dev)) 3313 return dev; 3314 } else { 3315 dev = alloc_netdev_mqs(ops->priv_size, ifname, 3316 name_assign_type, ops->setup, 3317 num_tx_queues, num_rx_queues); 3318 } 3319 3320 if (!dev) 3321 return ERR_PTR(-ENOMEM); 3322 3323 dev_net_set(dev, net); 3324 dev->rtnl_link_ops = ops; 3325 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3326 3327 if (tb[IFLA_MTU]) { 3328 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3329 int err; 3330 3331 err = dev_validate_mtu(dev, mtu, extack); 3332 if (err) { 3333 free_netdev(dev); 3334 return ERR_PTR(err); 3335 } 3336 dev->mtu = mtu; 3337 } 3338 if (tb[IFLA_ADDRESS]) { 3339 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), 3340 nla_len(tb[IFLA_ADDRESS])); 3341 dev->addr_assign_type = NET_ADDR_SET; 3342 } 3343 if (tb[IFLA_BROADCAST]) 3344 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3345 nla_len(tb[IFLA_BROADCAST])); 3346 if (tb[IFLA_TXQLEN]) 3347 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3348 if (tb[IFLA_OPERSTATE]) 3349 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3350 if (tb[IFLA_LINKMODE]) 3351 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3352 if (tb[IFLA_GROUP]) 3353 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3354 if (tb[IFLA_GSO_MAX_SIZE]) 3355 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3356 if (tb[IFLA_GSO_MAX_SEGS]) 3357 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); 3358 if (tb[IFLA_GRO_MAX_SIZE]) 3359 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE])); 3360 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) 3361 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE])); 3362 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) 3363 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE])); 3364 3365 return dev; 3366 } 3367 EXPORT_SYMBOL(rtnl_create_link); 3368 3369 static int rtnl_group_changelink(const struct sk_buff *skb, 3370 struct net *net, int group, 3371 struct ifinfomsg *ifm, 3372 struct netlink_ext_ack *extack, 3373 struct nlattr **tb) 3374 { 3375 struct net_device *dev, *aux; 3376 int err; 3377 3378 for_each_netdev_safe(net, dev, aux) { 3379 if (dev->group == group) { 3380 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3381 if (err < 0) 3382 return err; 3383 } 3384 } 3385 3386 return 0; 3387 } 3388 3389 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, 3390 const struct rtnl_link_ops *ops, 3391 const struct nlmsghdr *nlh, 3392 struct nlattr **tb, struct nlattr **data, 3393 struct netlink_ext_ack *extack) 3394 { 3395 unsigned char name_assign_type = NET_NAME_USER; 3396 struct net *net = sock_net(skb->sk); 3397 u32 portid = NETLINK_CB(skb).portid; 3398 struct net *dest_net, *link_net; 3399 struct net_device *dev; 3400 char ifname[IFNAMSIZ]; 3401 int err; 3402 3403 if (!ops->alloc && !ops->setup) 3404 return -EOPNOTSUPP; 3405 3406 if (tb[IFLA_IFNAME]) { 3407 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3408 } else { 3409 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3410 name_assign_type = NET_NAME_ENUM; 3411 } 3412 3413 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3414 if (IS_ERR(dest_net)) 3415 return PTR_ERR(dest_net); 3416 3417 if (tb[IFLA_LINK_NETNSID]) { 3418 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3419 3420 link_net = get_net_ns_by_id(dest_net, id); 3421 if (!link_net) { 3422 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3423 err = -EINVAL; 3424 goto out; 3425 } 3426 err = -EPERM; 3427 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3428 goto out; 3429 } else { 3430 link_net = NULL; 3431 } 3432 3433 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3434 name_assign_type, ops, tb, extack); 3435 if (IS_ERR(dev)) { 3436 err = PTR_ERR(dev); 3437 goto out; 3438 } 3439 3440 dev->ifindex = ifm->ifi_index; 3441 3442 if (ops->newlink) 3443 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3444 else 3445 err = register_netdevice(dev); 3446 if (err < 0) { 3447 free_netdev(dev); 3448 goto out; 3449 } 3450 3451 err = rtnl_configure_link(dev, ifm, portid, nlh); 3452 if (err < 0) 3453 goto out_unregister; 3454 if (link_net) { 3455 err = dev_change_net_namespace(dev, dest_net, ifname); 3456 if (err < 0) 3457 goto out_unregister; 3458 } 3459 if (tb[IFLA_MASTER]) { 3460 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3461 if (err) 3462 goto out_unregister; 3463 } 3464 out: 3465 if (link_net) 3466 put_net(link_net); 3467 put_net(dest_net); 3468 return err; 3469 out_unregister: 3470 if (ops->newlink) { 3471 LIST_HEAD(list_kill); 3472 3473 ops->dellink(dev, &list_kill); 3474 unregister_netdevice_many(&list_kill); 3475 } else { 3476 unregister_netdevice(dev); 3477 } 3478 goto out; 3479 } 3480 3481 struct rtnl_newlink_tbs { 3482 struct nlattr *tb[IFLA_MAX + 1]; 3483 struct nlattr *attr[RTNL_MAX_TYPE + 1]; 3484 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3485 }; 3486 3487 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3488 struct rtnl_newlink_tbs *tbs, 3489 struct netlink_ext_ack *extack) 3490 { 3491 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3492 struct nlattr ** const tb = tbs->tb; 3493 const struct rtnl_link_ops *m_ops; 3494 struct net_device *master_dev; 3495 struct net *net = sock_net(skb->sk); 3496 const struct rtnl_link_ops *ops; 3497 struct nlattr **slave_data; 3498 char kind[MODULE_NAME_LEN]; 3499 struct net_device *dev; 3500 struct ifinfomsg *ifm; 3501 struct nlattr **data; 3502 bool link_specified; 3503 int err; 3504 3505 #ifdef CONFIG_MODULES 3506 replay: 3507 #endif 3508 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3509 ifla_policy, extack); 3510 if (err < 0) 3511 return err; 3512 3513 err = rtnl_ensure_unique_netns(tb, extack, false); 3514 if (err < 0) 3515 return err; 3516 3517 ifm = nlmsg_data(nlh); 3518 if (ifm->ifi_index > 0) { 3519 link_specified = true; 3520 dev = __dev_get_by_index(net, ifm->ifi_index); 3521 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { 3522 link_specified = true; 3523 dev = rtnl_dev_get(net, tb); 3524 } else { 3525 link_specified = false; 3526 dev = NULL; 3527 } 3528 3529 master_dev = NULL; 3530 m_ops = NULL; 3531 if (dev) { 3532 master_dev = netdev_master_upper_dev_get(dev); 3533 if (master_dev) 3534 m_ops = master_dev->rtnl_link_ops; 3535 } 3536 3537 err = validate_linkmsg(dev, tb, extack); 3538 if (err < 0) 3539 return err; 3540 3541 if (tb[IFLA_LINKINFO]) { 3542 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3543 tb[IFLA_LINKINFO], 3544 ifla_info_policy, NULL); 3545 if (err < 0) 3546 return err; 3547 } else 3548 memset(linkinfo, 0, sizeof(linkinfo)); 3549 3550 if (linkinfo[IFLA_INFO_KIND]) { 3551 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3552 ops = rtnl_link_ops_get(kind); 3553 } else { 3554 kind[0] = '\0'; 3555 ops = NULL; 3556 } 3557 3558 data = NULL; 3559 if (ops) { 3560 if (ops->maxtype > RTNL_MAX_TYPE) 3561 return -EINVAL; 3562 3563 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3564 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, 3565 linkinfo[IFLA_INFO_DATA], 3566 ops->policy, extack); 3567 if (err < 0) 3568 return err; 3569 data = tbs->attr; 3570 } 3571 if (ops->validate) { 3572 err = ops->validate(tb, data, extack); 3573 if (err < 0) 3574 return err; 3575 } 3576 } 3577 3578 slave_data = NULL; 3579 if (m_ops) { 3580 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3581 return -EINVAL; 3582 3583 if (m_ops->slave_maxtype && 3584 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3585 err = nla_parse_nested_deprecated(tbs->slave_attr, 3586 m_ops->slave_maxtype, 3587 linkinfo[IFLA_INFO_SLAVE_DATA], 3588 m_ops->slave_policy, 3589 extack); 3590 if (err < 0) 3591 return err; 3592 slave_data = tbs->slave_attr; 3593 } 3594 } 3595 3596 if (dev) { 3597 int status = 0; 3598 3599 if (nlh->nlmsg_flags & NLM_F_EXCL) 3600 return -EEXIST; 3601 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3602 return -EOPNOTSUPP; 3603 3604 if (linkinfo[IFLA_INFO_DATA]) { 3605 if (!ops || ops != dev->rtnl_link_ops || 3606 !ops->changelink) 3607 return -EOPNOTSUPP; 3608 3609 err = ops->changelink(dev, tb, data, extack); 3610 if (err < 0) 3611 return err; 3612 status |= DO_SETLINK_NOTIFY; 3613 } 3614 3615 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3616 if (!m_ops || !m_ops->slave_changelink) 3617 return -EOPNOTSUPP; 3618 3619 err = m_ops->slave_changelink(master_dev, dev, tb, 3620 slave_data, extack); 3621 if (err < 0) 3622 return err; 3623 status |= DO_SETLINK_NOTIFY; 3624 } 3625 3626 return do_setlink(skb, dev, ifm, extack, tb, status); 3627 } 3628 3629 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3630 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, 3631 * or it's for a group 3632 */ 3633 if (link_specified) 3634 return -ENODEV; 3635 if (tb[IFLA_GROUP]) 3636 return rtnl_group_changelink(skb, net, 3637 nla_get_u32(tb[IFLA_GROUP]), 3638 ifm, extack, tb); 3639 return -ENODEV; 3640 } 3641 3642 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3643 return -EOPNOTSUPP; 3644 3645 if (!ops) { 3646 #ifdef CONFIG_MODULES 3647 if (kind[0]) { 3648 __rtnl_unlock(); 3649 request_module("rtnl-link-%s", kind); 3650 rtnl_lock(); 3651 ops = rtnl_link_ops_get(kind); 3652 if (ops) 3653 goto replay; 3654 } 3655 #endif 3656 NL_SET_ERR_MSG(extack, "Unknown device type"); 3657 return -EOPNOTSUPP; 3658 } 3659 3660 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack); 3661 } 3662 3663 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3664 struct netlink_ext_ack *extack) 3665 { 3666 struct rtnl_newlink_tbs *tbs; 3667 int ret; 3668 3669 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); 3670 if (!tbs) 3671 return -ENOMEM; 3672 3673 ret = __rtnl_newlink(skb, nlh, tbs, extack); 3674 kfree(tbs); 3675 return ret; 3676 } 3677 3678 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3679 const struct nlmsghdr *nlh, 3680 struct nlattr **tb, 3681 struct netlink_ext_ack *extack) 3682 { 3683 struct ifinfomsg *ifm; 3684 int i, err; 3685 3686 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3687 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3688 return -EINVAL; 3689 } 3690 3691 if (!netlink_strict_get_check(skb)) 3692 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3693 ifla_policy, extack); 3694 3695 ifm = nlmsg_data(nlh); 3696 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3697 ifm->ifi_change) { 3698 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3699 return -EINVAL; 3700 } 3701 3702 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3703 ifla_policy, extack); 3704 if (err) 3705 return err; 3706 3707 for (i = 0; i <= IFLA_MAX; i++) { 3708 if (!tb[i]) 3709 continue; 3710 3711 switch (i) { 3712 case IFLA_IFNAME: 3713 case IFLA_ALT_IFNAME: 3714 case IFLA_EXT_MASK: 3715 case IFLA_TARGET_NETNSID: 3716 break; 3717 default: 3718 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3719 return -EINVAL; 3720 } 3721 } 3722 3723 return 0; 3724 } 3725 3726 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3727 struct netlink_ext_ack *extack) 3728 { 3729 struct net *net = sock_net(skb->sk); 3730 struct net *tgt_net = net; 3731 struct ifinfomsg *ifm; 3732 struct nlattr *tb[IFLA_MAX+1]; 3733 struct net_device *dev = NULL; 3734 struct sk_buff *nskb; 3735 int netnsid = -1; 3736 int err; 3737 u32 ext_filter_mask = 0; 3738 3739 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3740 if (err < 0) 3741 return err; 3742 3743 err = rtnl_ensure_unique_netns(tb, extack, true); 3744 if (err < 0) 3745 return err; 3746 3747 if (tb[IFLA_TARGET_NETNSID]) { 3748 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3749 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3750 if (IS_ERR(tgt_net)) 3751 return PTR_ERR(tgt_net); 3752 } 3753 3754 if (tb[IFLA_EXT_MASK]) 3755 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3756 3757 err = -EINVAL; 3758 ifm = nlmsg_data(nlh); 3759 if (ifm->ifi_index > 0) 3760 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3761 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3762 dev = rtnl_dev_get(tgt_net, tb); 3763 else 3764 goto out; 3765 3766 err = -ENODEV; 3767 if (dev == NULL) 3768 goto out; 3769 3770 err = -ENOBUFS; 3771 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); 3772 if (nskb == NULL) 3773 goto out; 3774 3775 err = rtnl_fill_ifinfo(nskb, dev, net, 3776 RTM_NEWLINK, NETLINK_CB(skb).portid, 3777 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3778 0, NULL, 0, netnsid, GFP_KERNEL); 3779 if (err < 0) { 3780 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3781 WARN_ON(err == -EMSGSIZE); 3782 kfree_skb(nskb); 3783 } else 3784 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3785 out: 3786 if (netnsid >= 0) 3787 put_net(tgt_net); 3788 3789 return err; 3790 } 3791 3792 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3793 bool *changed, struct netlink_ext_ack *extack) 3794 { 3795 char *alt_ifname; 3796 size_t size; 3797 int err; 3798 3799 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3800 if (err) 3801 return err; 3802 3803 if (cmd == RTM_NEWLINKPROP) { 3804 size = rtnl_prop_list_size(dev); 3805 size += nla_total_size(ALTIFNAMSIZ); 3806 if (size >= U16_MAX) { 3807 NL_SET_ERR_MSG(extack, 3808 "effective property list too long"); 3809 return -EINVAL; 3810 } 3811 } 3812 3813 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 3814 if (!alt_ifname) 3815 return -ENOMEM; 3816 3817 if (cmd == RTM_NEWLINKPROP) { 3818 err = netdev_name_node_alt_create(dev, alt_ifname); 3819 if (!err) 3820 alt_ifname = NULL; 3821 } else if (cmd == RTM_DELLINKPROP) { 3822 err = netdev_name_node_alt_destroy(dev, alt_ifname); 3823 } else { 3824 WARN_ON_ONCE(1); 3825 err = -EINVAL; 3826 } 3827 3828 kfree(alt_ifname); 3829 if (!err) 3830 *changed = true; 3831 return err; 3832 } 3833 3834 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 3835 struct netlink_ext_ack *extack) 3836 { 3837 struct net *net = sock_net(skb->sk); 3838 struct nlattr *tb[IFLA_MAX + 1]; 3839 struct net_device *dev; 3840 struct ifinfomsg *ifm; 3841 bool changed = false; 3842 struct nlattr *attr; 3843 int err, rem; 3844 3845 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3846 if (err) 3847 return err; 3848 3849 err = rtnl_ensure_unique_netns(tb, extack, true); 3850 if (err) 3851 return err; 3852 3853 ifm = nlmsg_data(nlh); 3854 if (ifm->ifi_index > 0) 3855 dev = __dev_get_by_index(net, ifm->ifi_index); 3856 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3857 dev = rtnl_dev_get(net, tb); 3858 else 3859 return -EINVAL; 3860 3861 if (!dev) 3862 return -ENODEV; 3863 3864 if (!tb[IFLA_PROP_LIST]) 3865 return 0; 3866 3867 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 3868 switch (nla_type(attr)) { 3869 case IFLA_ALT_IFNAME: 3870 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 3871 if (err) 3872 return err; 3873 break; 3874 } 3875 } 3876 3877 if (changed) 3878 netdev_state_change(dev); 3879 return 0; 3880 } 3881 3882 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3883 struct netlink_ext_ack *extack) 3884 { 3885 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 3886 } 3887 3888 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3889 struct netlink_ext_ack *extack) 3890 { 3891 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 3892 } 3893 3894 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3895 { 3896 struct net *net = sock_net(skb->sk); 3897 size_t min_ifinfo_dump_size = 0; 3898 struct nlattr *tb[IFLA_MAX+1]; 3899 u32 ext_filter_mask = 0; 3900 struct net_device *dev; 3901 int hdrlen; 3902 3903 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3904 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3905 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3906 3907 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3908 if (tb[IFLA_EXT_MASK]) 3909 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3910 } 3911 3912 if (!ext_filter_mask) 3913 return NLMSG_GOODSIZE; 3914 /* 3915 * traverse the list of net devices and compute the minimum 3916 * buffer size based upon the filter mask. 3917 */ 3918 rcu_read_lock(); 3919 for_each_netdev_rcu(net, dev) { 3920 min_ifinfo_dump_size = max(min_ifinfo_dump_size, 3921 if_nlmsg_size(dev, ext_filter_mask)); 3922 } 3923 rcu_read_unlock(); 3924 3925 return nlmsg_total_size(min_ifinfo_dump_size); 3926 } 3927 3928 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 3929 { 3930 int idx; 3931 int s_idx = cb->family; 3932 int type = cb->nlh->nlmsg_type - RTM_BASE; 3933 int ret = 0; 3934 3935 if (s_idx == 0) 3936 s_idx = 1; 3937 3938 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 3939 struct rtnl_link __rcu **tab; 3940 struct rtnl_link *link; 3941 rtnl_dumpit_func dumpit; 3942 3943 if (idx < s_idx || idx == PF_PACKET) 3944 continue; 3945 3946 if (type < 0 || type >= RTM_NR_MSGTYPES) 3947 continue; 3948 3949 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 3950 if (!tab) 3951 continue; 3952 3953 link = rcu_dereference_rtnl(tab[type]); 3954 if (!link) 3955 continue; 3956 3957 dumpit = link->dumpit; 3958 if (!dumpit) 3959 continue; 3960 3961 if (idx > s_idx) { 3962 memset(&cb->args[0], 0, sizeof(cb->args)); 3963 cb->prev_seq = 0; 3964 cb->seq = 0; 3965 } 3966 ret = dumpit(skb, cb); 3967 if (ret) 3968 break; 3969 } 3970 cb->family = idx; 3971 3972 return skb->len ? : ret; 3973 } 3974 3975 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 3976 unsigned int change, 3977 u32 event, gfp_t flags, int *new_nsid, 3978 int new_ifindex, u32 portid, u32 seq) 3979 { 3980 struct net *net = dev_net(dev); 3981 struct sk_buff *skb; 3982 int err = -ENOBUFS; 3983 3984 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); 3985 if (skb == NULL) 3986 goto errout; 3987 3988 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 3989 type, portid, seq, change, 0, 0, event, 3990 new_nsid, new_ifindex, -1, flags); 3991 if (err < 0) { 3992 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 3993 WARN_ON(err == -EMSGSIZE); 3994 kfree_skb(skb); 3995 goto errout; 3996 } 3997 return skb; 3998 errout: 3999 if (err < 0) 4000 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4001 return NULL; 4002 } 4003 4004 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags, 4005 u32 portid, const struct nlmsghdr *nlh) 4006 { 4007 struct net *net = dev_net(dev); 4008 4009 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags); 4010 } 4011 4012 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 4013 unsigned int change, u32 event, 4014 gfp_t flags, int *new_nsid, int new_ifindex, 4015 u32 portid, const struct nlmsghdr *nlh) 4016 { 4017 struct sk_buff *skb; 4018 4019 if (dev->reg_state != NETREG_REGISTERED) 4020 return; 4021 4022 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 4023 new_ifindex, portid, nlmsg_seq(nlh)); 4024 if (skb) 4025 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh); 4026 } 4027 4028 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 4029 gfp_t flags, u32 portid, const struct nlmsghdr *nlh) 4030 { 4031 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4032 NULL, 0, portid, nlh); 4033 } 4034 4035 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 4036 gfp_t flags, int *new_nsid, int new_ifindex) 4037 { 4038 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4039 new_nsid, new_ifindex, 0, NULL); 4040 } 4041 4042 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 4043 struct net_device *dev, 4044 u8 *addr, u16 vid, u32 pid, u32 seq, 4045 int type, unsigned int flags, 4046 int nlflags, u16 ndm_state) 4047 { 4048 struct nlmsghdr *nlh; 4049 struct ndmsg *ndm; 4050 4051 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 4052 if (!nlh) 4053 return -EMSGSIZE; 4054 4055 ndm = nlmsg_data(nlh); 4056 ndm->ndm_family = AF_BRIDGE; 4057 ndm->ndm_pad1 = 0; 4058 ndm->ndm_pad2 = 0; 4059 ndm->ndm_flags = flags; 4060 ndm->ndm_type = 0; 4061 ndm->ndm_ifindex = dev->ifindex; 4062 ndm->ndm_state = ndm_state; 4063 4064 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) 4065 goto nla_put_failure; 4066 if (vid) 4067 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 4068 goto nla_put_failure; 4069 4070 nlmsg_end(skb, nlh); 4071 return 0; 4072 4073 nla_put_failure: 4074 nlmsg_cancel(skb, nlh); 4075 return -EMSGSIZE; 4076 } 4077 4078 static inline size_t rtnl_fdb_nlmsg_size(void) 4079 { 4080 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 4081 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ 4082 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 4083 0; 4084 } 4085 4086 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 4087 u16 ndm_state) 4088 { 4089 struct net *net = dev_net(dev); 4090 struct sk_buff *skb; 4091 int err = -ENOBUFS; 4092 4093 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); 4094 if (!skb) 4095 goto errout; 4096 4097 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 4098 0, 0, type, NTF_SELF, 0, ndm_state); 4099 if (err < 0) { 4100 kfree_skb(skb); 4101 goto errout; 4102 } 4103 4104 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 4105 return; 4106 errout: 4107 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 4108 } 4109 4110 /* 4111 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 4112 */ 4113 int ndo_dflt_fdb_add(struct ndmsg *ndm, 4114 struct nlattr *tb[], 4115 struct net_device *dev, 4116 const unsigned char *addr, u16 vid, 4117 u16 flags) 4118 { 4119 int err = -EINVAL; 4120 4121 /* If aging addresses are supported device will need to 4122 * implement its own handler for this. 4123 */ 4124 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 4125 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4126 return err; 4127 } 4128 4129 if (tb[NDA_FLAGS_EXT]) { 4130 netdev_info(dev, "invalid flags given to default FDB implementation\n"); 4131 return err; 4132 } 4133 4134 if (vid) { 4135 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n"); 4136 return err; 4137 } 4138 4139 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4140 err = dev_uc_add_excl(dev, addr); 4141 else if (is_multicast_ether_addr(addr)) 4142 err = dev_mc_add_excl(dev, addr); 4143 4144 /* Only return duplicate errors if NLM_F_EXCL is set */ 4145 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 4146 err = 0; 4147 4148 return err; 4149 } 4150 EXPORT_SYMBOL(ndo_dflt_fdb_add); 4151 4152 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 4153 struct netlink_ext_ack *extack) 4154 { 4155 u16 vid = 0; 4156 4157 if (vlan_attr) { 4158 if (nla_len(vlan_attr) != sizeof(u16)) { 4159 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 4160 return -EINVAL; 4161 } 4162 4163 vid = nla_get_u16(vlan_attr); 4164 4165 if (!vid || vid >= VLAN_VID_MASK) { 4166 NL_SET_ERR_MSG(extack, "invalid vlan id"); 4167 return -EINVAL; 4168 } 4169 } 4170 *p_vid = vid; 4171 return 0; 4172 } 4173 4174 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 4175 struct netlink_ext_ack *extack) 4176 { 4177 struct net *net = sock_net(skb->sk); 4178 struct ndmsg *ndm; 4179 struct nlattr *tb[NDA_MAX+1]; 4180 struct net_device *dev; 4181 u8 *addr; 4182 u16 vid; 4183 int err; 4184 4185 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 4186 extack); 4187 if (err < 0) 4188 return err; 4189 4190 ndm = nlmsg_data(nlh); 4191 if (ndm->ndm_ifindex == 0) { 4192 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4193 return -EINVAL; 4194 } 4195 4196 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4197 if (dev == NULL) { 4198 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4199 return -ENODEV; 4200 } 4201 4202 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4203 NL_SET_ERR_MSG(extack, "invalid address"); 4204 return -EINVAL; 4205 } 4206 4207 if (dev->type != ARPHRD_ETHER) { 4208 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 4209 return -EINVAL; 4210 } 4211 4212 addr = nla_data(tb[NDA_LLADDR]); 4213 4214 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4215 if (err) 4216 return err; 4217 4218 err = -EOPNOTSUPP; 4219 4220 /* Support fdb on master device the net/bridge default case */ 4221 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4222 netif_is_bridge_port(dev)) { 4223 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4224 const struct net_device_ops *ops = br_dev->netdev_ops; 4225 4226 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 4227 nlh->nlmsg_flags, extack); 4228 if (err) 4229 goto out; 4230 else 4231 ndm->ndm_flags &= ~NTF_MASTER; 4232 } 4233 4234 /* Embedded bridge, macvlan, and any other device support */ 4235 if ((ndm->ndm_flags & NTF_SELF)) { 4236 if (dev->netdev_ops->ndo_fdb_add) 4237 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 4238 vid, 4239 nlh->nlmsg_flags, 4240 extack); 4241 else 4242 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 4243 nlh->nlmsg_flags); 4244 4245 if (!err) { 4246 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 4247 ndm->ndm_state); 4248 ndm->ndm_flags &= ~NTF_SELF; 4249 } 4250 } 4251 out: 4252 return err; 4253 } 4254 4255 /* 4256 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 4257 */ 4258 int ndo_dflt_fdb_del(struct ndmsg *ndm, 4259 struct nlattr *tb[], 4260 struct net_device *dev, 4261 const unsigned char *addr, u16 vid) 4262 { 4263 int err = -EINVAL; 4264 4265 /* If aging addresses are supported device will need to 4266 * implement its own handler for this. 4267 */ 4268 if (!(ndm->ndm_state & NUD_PERMANENT)) { 4269 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4270 return err; 4271 } 4272 4273 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4274 err = dev_uc_del(dev, addr); 4275 else if (is_multicast_ether_addr(addr)) 4276 err = dev_mc_del(dev, addr); 4277 4278 return err; 4279 } 4280 EXPORT_SYMBOL(ndo_dflt_fdb_del); 4281 4282 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = { 4283 [NDA_VLAN] = { .type = NLA_U16 }, 4284 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 4285 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, 4286 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, 4287 }; 4288 4289 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 4290 struct netlink_ext_ack *extack) 4291 { 4292 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 4293 struct net *net = sock_net(skb->sk); 4294 const struct net_device_ops *ops; 4295 struct ndmsg *ndm; 4296 struct nlattr *tb[NDA_MAX+1]; 4297 struct net_device *dev; 4298 __u8 *addr = NULL; 4299 int err; 4300 u16 vid; 4301 4302 if (!netlink_capable(skb, CAP_NET_ADMIN)) 4303 return -EPERM; 4304 4305 if (!del_bulk) { 4306 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4307 NULL, extack); 4308 } else { 4309 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, 4310 fdb_del_bulk_policy, extack); 4311 } 4312 if (err < 0) 4313 return err; 4314 4315 ndm = nlmsg_data(nlh); 4316 if (ndm->ndm_ifindex == 0) { 4317 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4318 return -EINVAL; 4319 } 4320 4321 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4322 if (dev == NULL) { 4323 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4324 return -ENODEV; 4325 } 4326 4327 if (!del_bulk) { 4328 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4329 NL_SET_ERR_MSG(extack, "invalid address"); 4330 return -EINVAL; 4331 } 4332 addr = nla_data(tb[NDA_LLADDR]); 4333 } 4334 4335 if (dev->type != ARPHRD_ETHER) { 4336 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 4337 return -EINVAL; 4338 } 4339 4340 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4341 if (err) 4342 return err; 4343 4344 err = -EOPNOTSUPP; 4345 4346 /* Support fdb on master device the net/bridge default case */ 4347 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4348 netif_is_bridge_port(dev)) { 4349 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4350 4351 ops = br_dev->netdev_ops; 4352 if (!del_bulk) { 4353 if (ops->ndo_fdb_del) 4354 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4355 } else { 4356 if (ops->ndo_fdb_del_bulk) 4357 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4358 extack); 4359 } 4360 4361 if (err) 4362 goto out; 4363 else 4364 ndm->ndm_flags &= ~NTF_MASTER; 4365 } 4366 4367 /* Embedded bridge, macvlan, and any other device support */ 4368 if (ndm->ndm_flags & NTF_SELF) { 4369 ops = dev->netdev_ops; 4370 if (!del_bulk) { 4371 if (ops->ndo_fdb_del) 4372 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4373 else 4374 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4375 } else { 4376 /* in case err was cleared by NTF_MASTER call */ 4377 err = -EOPNOTSUPP; 4378 if (ops->ndo_fdb_del_bulk) 4379 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4380 extack); 4381 } 4382 4383 if (!err) { 4384 if (!del_bulk) 4385 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4386 ndm->ndm_state); 4387 ndm->ndm_flags &= ~NTF_SELF; 4388 } 4389 } 4390 out: 4391 return err; 4392 } 4393 4394 static int nlmsg_populate_fdb(struct sk_buff *skb, 4395 struct netlink_callback *cb, 4396 struct net_device *dev, 4397 int *idx, 4398 struct netdev_hw_addr_list *list) 4399 { 4400 struct netdev_hw_addr *ha; 4401 int err; 4402 u32 portid, seq; 4403 4404 portid = NETLINK_CB(cb->skb).portid; 4405 seq = cb->nlh->nlmsg_seq; 4406 4407 list_for_each_entry(ha, &list->list, list) { 4408 if (*idx < cb->args[2]) 4409 goto skip; 4410 4411 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4412 portid, seq, 4413 RTM_NEWNEIGH, NTF_SELF, 4414 NLM_F_MULTI, NUD_PERMANENT); 4415 if (err < 0) 4416 return err; 4417 skip: 4418 *idx += 1; 4419 } 4420 return 0; 4421 } 4422 4423 /** 4424 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4425 * @skb: socket buffer to store message in 4426 * @cb: netlink callback 4427 * @dev: netdevice 4428 * @filter_dev: ignored 4429 * @idx: the number of FDB table entries dumped is added to *@idx 4430 * 4431 * Default netdevice operation to dump the existing unicast address list. 4432 * Returns number of addresses from list put in skb. 4433 */ 4434 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4435 struct netlink_callback *cb, 4436 struct net_device *dev, 4437 struct net_device *filter_dev, 4438 int *idx) 4439 { 4440 int err; 4441 4442 if (dev->type != ARPHRD_ETHER) 4443 return -EINVAL; 4444 4445 netif_addr_lock_bh(dev); 4446 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4447 if (err) 4448 goto out; 4449 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4450 out: 4451 netif_addr_unlock_bh(dev); 4452 return err; 4453 } 4454 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4455 4456 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4457 int *br_idx, int *brport_idx, 4458 struct netlink_ext_ack *extack) 4459 { 4460 struct nlattr *tb[NDA_MAX + 1]; 4461 struct ndmsg *ndm; 4462 int err, i; 4463 4464 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4465 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4466 return -EINVAL; 4467 } 4468 4469 ndm = nlmsg_data(nlh); 4470 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4471 ndm->ndm_flags || ndm->ndm_type) { 4472 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4473 return -EINVAL; 4474 } 4475 4476 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4477 NDA_MAX, NULL, extack); 4478 if (err < 0) 4479 return err; 4480 4481 *brport_idx = ndm->ndm_ifindex; 4482 for (i = 0; i <= NDA_MAX; ++i) { 4483 if (!tb[i]) 4484 continue; 4485 4486 switch (i) { 4487 case NDA_IFINDEX: 4488 if (nla_len(tb[i]) != sizeof(u32)) { 4489 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4490 return -EINVAL; 4491 } 4492 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4493 break; 4494 case NDA_MASTER: 4495 if (nla_len(tb[i]) != sizeof(u32)) { 4496 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4497 return -EINVAL; 4498 } 4499 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4500 break; 4501 default: 4502 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4503 return -EINVAL; 4504 } 4505 } 4506 4507 return 0; 4508 } 4509 4510 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4511 int *br_idx, int *brport_idx, 4512 struct netlink_ext_ack *extack) 4513 { 4514 struct nlattr *tb[IFLA_MAX+1]; 4515 int err; 4516 4517 /* A hack to preserve kernel<->userspace interface. 4518 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4519 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4520 * So, check for ndmsg with an optional u32 attribute (not used here). 4521 * Fortunately these sizes don't conflict with the size of ifinfomsg 4522 * with an optional attribute. 4523 */ 4524 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4525 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4526 nla_attr_size(sizeof(u32)))) { 4527 struct ifinfomsg *ifm; 4528 4529 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4530 tb, IFLA_MAX, ifla_policy, 4531 extack); 4532 if (err < 0) { 4533 return -EINVAL; 4534 } else if (err == 0) { 4535 if (tb[IFLA_MASTER]) 4536 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4537 } 4538 4539 ifm = nlmsg_data(nlh); 4540 *brport_idx = ifm->ifi_index; 4541 } 4542 return 0; 4543 } 4544 4545 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4546 { 4547 struct net_device *dev; 4548 struct net_device *br_dev = NULL; 4549 const struct net_device_ops *ops = NULL; 4550 const struct net_device_ops *cops = NULL; 4551 struct net *net = sock_net(skb->sk); 4552 struct hlist_head *head; 4553 int brport_idx = 0; 4554 int br_idx = 0; 4555 int h, s_h; 4556 int idx = 0, s_idx; 4557 int err = 0; 4558 int fidx = 0; 4559 4560 if (cb->strict_check) 4561 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4562 cb->extack); 4563 else 4564 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4565 cb->extack); 4566 if (err < 0) 4567 return err; 4568 4569 if (br_idx) { 4570 br_dev = __dev_get_by_index(net, br_idx); 4571 if (!br_dev) 4572 return -ENODEV; 4573 4574 ops = br_dev->netdev_ops; 4575 } 4576 4577 s_h = cb->args[0]; 4578 s_idx = cb->args[1]; 4579 4580 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4581 idx = 0; 4582 head = &net->dev_index_head[h]; 4583 hlist_for_each_entry(dev, head, index_hlist) { 4584 4585 if (brport_idx && (dev->ifindex != brport_idx)) 4586 continue; 4587 4588 if (!br_idx) { /* user did not specify a specific bridge */ 4589 if (netif_is_bridge_port(dev)) { 4590 br_dev = netdev_master_upper_dev_get(dev); 4591 cops = br_dev->netdev_ops; 4592 } 4593 } else { 4594 if (dev != br_dev && 4595 !netif_is_bridge_port(dev)) 4596 continue; 4597 4598 if (br_dev != netdev_master_upper_dev_get(dev) && 4599 !netif_is_bridge_master(dev)) 4600 continue; 4601 cops = ops; 4602 } 4603 4604 if (idx < s_idx) 4605 goto cont; 4606 4607 if (netif_is_bridge_port(dev)) { 4608 if (cops && cops->ndo_fdb_dump) { 4609 err = cops->ndo_fdb_dump(skb, cb, 4610 br_dev, dev, 4611 &fidx); 4612 if (err == -EMSGSIZE) 4613 goto out; 4614 } 4615 } 4616 4617 if (dev->netdev_ops->ndo_fdb_dump) 4618 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4619 dev, NULL, 4620 &fidx); 4621 else 4622 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4623 &fidx); 4624 if (err == -EMSGSIZE) 4625 goto out; 4626 4627 cops = NULL; 4628 4629 /* reset fdb offset to 0 for rest of the interfaces */ 4630 cb->args[2] = 0; 4631 fidx = 0; 4632 cont: 4633 idx++; 4634 } 4635 } 4636 4637 out: 4638 cb->args[0] = h; 4639 cb->args[1] = idx; 4640 cb->args[2] = fidx; 4641 4642 return skb->len; 4643 } 4644 4645 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4646 struct nlattr **tb, u8 *ndm_flags, 4647 int *br_idx, int *brport_idx, u8 **addr, 4648 u16 *vid, struct netlink_ext_ack *extack) 4649 { 4650 struct ndmsg *ndm; 4651 int err, i; 4652 4653 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4654 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4655 return -EINVAL; 4656 } 4657 4658 ndm = nlmsg_data(nlh); 4659 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4660 ndm->ndm_type) { 4661 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4662 return -EINVAL; 4663 } 4664 4665 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4666 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4667 return -EINVAL; 4668 } 4669 4670 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4671 NDA_MAX, nda_policy, extack); 4672 if (err < 0) 4673 return err; 4674 4675 *ndm_flags = ndm->ndm_flags; 4676 *brport_idx = ndm->ndm_ifindex; 4677 for (i = 0; i <= NDA_MAX; ++i) { 4678 if (!tb[i]) 4679 continue; 4680 4681 switch (i) { 4682 case NDA_MASTER: 4683 *br_idx = nla_get_u32(tb[i]); 4684 break; 4685 case NDA_LLADDR: 4686 if (nla_len(tb[i]) != ETH_ALEN) { 4687 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4688 return -EINVAL; 4689 } 4690 *addr = nla_data(tb[i]); 4691 break; 4692 case NDA_VLAN: 4693 err = fdb_vid_parse(tb[i], vid, extack); 4694 if (err) 4695 return err; 4696 break; 4697 case NDA_VNI: 4698 break; 4699 default: 4700 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4701 return -EINVAL; 4702 } 4703 } 4704 4705 return 0; 4706 } 4707 4708 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4709 struct netlink_ext_ack *extack) 4710 { 4711 struct net_device *dev = NULL, *br_dev = NULL; 4712 const struct net_device_ops *ops = NULL; 4713 struct net *net = sock_net(in_skb->sk); 4714 struct nlattr *tb[NDA_MAX + 1]; 4715 struct sk_buff *skb; 4716 int brport_idx = 0; 4717 u8 ndm_flags = 0; 4718 int br_idx = 0; 4719 u8 *addr = NULL; 4720 u16 vid = 0; 4721 int err; 4722 4723 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4724 &brport_idx, &addr, &vid, extack); 4725 if (err < 0) 4726 return err; 4727 4728 if (!addr) { 4729 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4730 return -EINVAL; 4731 } 4732 4733 if (brport_idx) { 4734 dev = __dev_get_by_index(net, brport_idx); 4735 if (!dev) { 4736 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4737 return -ENODEV; 4738 } 4739 } 4740 4741 if (br_idx) { 4742 if (dev) { 4743 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4744 return -EINVAL; 4745 } 4746 4747 br_dev = __dev_get_by_index(net, br_idx); 4748 if (!br_dev) { 4749 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4750 return -EINVAL; 4751 } 4752 ops = br_dev->netdev_ops; 4753 } 4754 4755 if (dev) { 4756 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4757 if (!netif_is_bridge_port(dev)) { 4758 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4759 return -EINVAL; 4760 } 4761 br_dev = netdev_master_upper_dev_get(dev); 4762 if (!br_dev) { 4763 NL_SET_ERR_MSG(extack, "Master of device not found"); 4764 return -EINVAL; 4765 } 4766 ops = br_dev->netdev_ops; 4767 } else { 4768 if (!(ndm_flags & NTF_SELF)) { 4769 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4770 return -EINVAL; 4771 } 4772 ops = dev->netdev_ops; 4773 } 4774 } 4775 4776 if (!br_dev && !dev) { 4777 NL_SET_ERR_MSG(extack, "No device specified"); 4778 return -ENODEV; 4779 } 4780 4781 if (!ops || !ops->ndo_fdb_get) { 4782 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4783 return -EOPNOTSUPP; 4784 } 4785 4786 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4787 if (!skb) 4788 return -ENOBUFS; 4789 4790 if (br_dev) 4791 dev = br_dev; 4792 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4793 NETLINK_CB(in_skb).portid, 4794 nlh->nlmsg_seq, extack); 4795 if (err) 4796 goto out; 4797 4798 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4799 out: 4800 kfree_skb(skb); 4801 return err; 4802 } 4803 4804 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4805 unsigned int attrnum, unsigned int flag) 4806 { 4807 if (mask & flag) 4808 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4809 return 0; 4810 } 4811 4812 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4813 struct net_device *dev, u16 mode, 4814 u32 flags, u32 mask, int nlflags, 4815 u32 filter_mask, 4816 int (*vlan_fill)(struct sk_buff *skb, 4817 struct net_device *dev, 4818 u32 filter_mask)) 4819 { 4820 struct nlmsghdr *nlh; 4821 struct ifinfomsg *ifm; 4822 struct nlattr *br_afspec; 4823 struct nlattr *protinfo; 4824 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4825 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4826 int err = 0; 4827 4828 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4829 if (nlh == NULL) 4830 return -EMSGSIZE; 4831 4832 ifm = nlmsg_data(nlh); 4833 ifm->ifi_family = AF_BRIDGE; 4834 ifm->__ifi_pad = 0; 4835 ifm->ifi_type = dev->type; 4836 ifm->ifi_index = dev->ifindex; 4837 ifm->ifi_flags = dev_get_flags(dev); 4838 ifm->ifi_change = 0; 4839 4840 4841 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4842 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4843 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4844 (br_dev && 4845 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4846 (dev->addr_len && 4847 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4848 (dev->ifindex != dev_get_iflink(dev) && 4849 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4850 goto nla_put_failure; 4851 4852 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 4853 if (!br_afspec) 4854 goto nla_put_failure; 4855 4856 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4857 nla_nest_cancel(skb, br_afspec); 4858 goto nla_put_failure; 4859 } 4860 4861 if (mode != BRIDGE_MODE_UNDEF) { 4862 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4863 nla_nest_cancel(skb, br_afspec); 4864 goto nla_put_failure; 4865 } 4866 } 4867 if (vlan_fill) { 4868 err = vlan_fill(skb, dev, filter_mask); 4869 if (err) { 4870 nla_nest_cancel(skb, br_afspec); 4871 goto nla_put_failure; 4872 } 4873 } 4874 nla_nest_end(skb, br_afspec); 4875 4876 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 4877 if (!protinfo) 4878 goto nla_put_failure; 4879 4880 if (brport_nla_put_flag(skb, flags, mask, 4881 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4882 brport_nla_put_flag(skb, flags, mask, 4883 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4884 brport_nla_put_flag(skb, flags, mask, 4885 IFLA_BRPORT_FAST_LEAVE, 4886 BR_MULTICAST_FAST_LEAVE) || 4887 brport_nla_put_flag(skb, flags, mask, 4888 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4889 brport_nla_put_flag(skb, flags, mask, 4890 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4891 brport_nla_put_flag(skb, flags, mask, 4892 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4893 brport_nla_put_flag(skb, flags, mask, 4894 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4895 brport_nla_put_flag(skb, flags, mask, 4896 IFLA_BRPORT_PROXYARP, BR_PROXYARP) || 4897 brport_nla_put_flag(skb, flags, mask, 4898 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || 4899 brport_nla_put_flag(skb, flags, mask, 4900 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { 4901 nla_nest_cancel(skb, protinfo); 4902 goto nla_put_failure; 4903 } 4904 4905 nla_nest_end(skb, protinfo); 4906 4907 nlmsg_end(skb, nlh); 4908 return 0; 4909 nla_put_failure: 4910 nlmsg_cancel(skb, nlh); 4911 return err ? err : -EMSGSIZE; 4912 } 4913 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 4914 4915 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 4916 bool strict_check, u32 *filter_mask, 4917 struct netlink_ext_ack *extack) 4918 { 4919 struct nlattr *tb[IFLA_MAX+1]; 4920 int err, i; 4921 4922 if (strict_check) { 4923 struct ifinfomsg *ifm; 4924 4925 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 4926 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 4927 return -EINVAL; 4928 } 4929 4930 ifm = nlmsg_data(nlh); 4931 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 4932 ifm->ifi_change || ifm->ifi_index) { 4933 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 4934 return -EINVAL; 4935 } 4936 4937 err = nlmsg_parse_deprecated_strict(nlh, 4938 sizeof(struct ifinfomsg), 4939 tb, IFLA_MAX, ifla_policy, 4940 extack); 4941 } else { 4942 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4943 tb, IFLA_MAX, ifla_policy, 4944 extack); 4945 } 4946 if (err < 0) 4947 return err; 4948 4949 /* new attributes should only be added with strict checking */ 4950 for (i = 0; i <= IFLA_MAX; ++i) { 4951 if (!tb[i]) 4952 continue; 4953 4954 switch (i) { 4955 case IFLA_EXT_MASK: 4956 *filter_mask = nla_get_u32(tb[i]); 4957 break; 4958 default: 4959 if (strict_check) { 4960 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 4961 return -EINVAL; 4962 } 4963 } 4964 } 4965 4966 return 0; 4967 } 4968 4969 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 4970 { 4971 const struct nlmsghdr *nlh = cb->nlh; 4972 struct net *net = sock_net(skb->sk); 4973 struct net_device *dev; 4974 int idx = 0; 4975 u32 portid = NETLINK_CB(cb->skb).portid; 4976 u32 seq = nlh->nlmsg_seq; 4977 u32 filter_mask = 0; 4978 int err; 4979 4980 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 4981 cb->extack); 4982 if (err < 0 && cb->strict_check) 4983 return err; 4984 4985 rcu_read_lock(); 4986 for_each_netdev_rcu(net, dev) { 4987 const struct net_device_ops *ops = dev->netdev_ops; 4988 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4989 4990 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 4991 if (idx >= cb->args[0]) { 4992 err = br_dev->netdev_ops->ndo_bridge_getlink( 4993 skb, portid, seq, dev, 4994 filter_mask, NLM_F_MULTI); 4995 if (err < 0 && err != -EOPNOTSUPP) { 4996 if (likely(skb->len)) 4997 break; 4998 4999 goto out_err; 5000 } 5001 } 5002 idx++; 5003 } 5004 5005 if (ops->ndo_bridge_getlink) { 5006 if (idx >= cb->args[0]) { 5007 err = ops->ndo_bridge_getlink(skb, portid, 5008 seq, dev, 5009 filter_mask, 5010 NLM_F_MULTI); 5011 if (err < 0 && err != -EOPNOTSUPP) { 5012 if (likely(skb->len)) 5013 break; 5014 5015 goto out_err; 5016 } 5017 } 5018 idx++; 5019 } 5020 } 5021 err = skb->len; 5022 out_err: 5023 rcu_read_unlock(); 5024 cb->args[0] = idx; 5025 5026 return err; 5027 } 5028 5029 static inline size_t bridge_nlmsg_size(void) 5030 { 5031 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 5032 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 5033 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 5034 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 5035 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 5036 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 5037 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 5038 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 5039 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 5040 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 5041 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 5042 } 5043 5044 static int rtnl_bridge_notify(struct net_device *dev) 5045 { 5046 struct net *net = dev_net(dev); 5047 struct sk_buff *skb; 5048 int err = -EOPNOTSUPP; 5049 5050 if (!dev->netdev_ops->ndo_bridge_getlink) 5051 return 0; 5052 5053 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 5054 if (!skb) { 5055 err = -ENOMEM; 5056 goto errout; 5057 } 5058 5059 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 5060 if (err < 0) 5061 goto errout; 5062 5063 /* Notification info is only filled for bridge ports, not the bridge 5064 * device itself. Therefore, a zero notification length is valid and 5065 * should not result in an error. 5066 */ 5067 if (!skb->len) 5068 goto errout; 5069 5070 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 5071 return 0; 5072 errout: 5073 WARN_ON(err == -EMSGSIZE); 5074 kfree_skb(skb); 5075 if (err) 5076 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 5077 return err; 5078 } 5079 5080 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 5081 struct netlink_ext_ack *extack) 5082 { 5083 struct net *net = sock_net(skb->sk); 5084 struct ifinfomsg *ifm; 5085 struct net_device *dev; 5086 struct nlattr *br_spec, *attr = NULL; 5087 int rem, err = -EOPNOTSUPP; 5088 u16 flags = 0; 5089 bool have_flags = false; 5090 5091 if (nlmsg_len(nlh) < sizeof(*ifm)) 5092 return -EINVAL; 5093 5094 ifm = nlmsg_data(nlh); 5095 if (ifm->ifi_family != AF_BRIDGE) 5096 return -EPFNOSUPPORT; 5097 5098 dev = __dev_get_by_index(net, ifm->ifi_index); 5099 if (!dev) { 5100 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5101 return -ENODEV; 5102 } 5103 5104 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5105 if (br_spec) { 5106 nla_for_each_nested(attr, br_spec, rem) { 5107 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5108 if (nla_len(attr) < sizeof(flags)) 5109 return -EINVAL; 5110 5111 have_flags = true; 5112 flags = nla_get_u16(attr); 5113 break; 5114 } 5115 } 5116 } 5117 5118 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5119 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5120 5121 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 5122 err = -EOPNOTSUPP; 5123 goto out; 5124 } 5125 5126 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 5127 extack); 5128 if (err) 5129 goto out; 5130 5131 flags &= ~BRIDGE_FLAGS_MASTER; 5132 } 5133 5134 if ((flags & BRIDGE_FLAGS_SELF)) { 5135 if (!dev->netdev_ops->ndo_bridge_setlink) 5136 err = -EOPNOTSUPP; 5137 else 5138 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 5139 flags, 5140 extack); 5141 if (!err) { 5142 flags &= ~BRIDGE_FLAGS_SELF; 5143 5144 /* Generate event to notify upper layer of bridge 5145 * change 5146 */ 5147 err = rtnl_bridge_notify(dev); 5148 } 5149 } 5150 5151 if (have_flags) 5152 memcpy(nla_data(attr), &flags, sizeof(flags)); 5153 out: 5154 return err; 5155 } 5156 5157 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 5158 struct netlink_ext_ack *extack) 5159 { 5160 struct net *net = sock_net(skb->sk); 5161 struct ifinfomsg *ifm; 5162 struct net_device *dev; 5163 struct nlattr *br_spec, *attr = NULL; 5164 int rem, err = -EOPNOTSUPP; 5165 u16 flags = 0; 5166 bool have_flags = false; 5167 5168 if (nlmsg_len(nlh) < sizeof(*ifm)) 5169 return -EINVAL; 5170 5171 ifm = nlmsg_data(nlh); 5172 if (ifm->ifi_family != AF_BRIDGE) 5173 return -EPFNOSUPPORT; 5174 5175 dev = __dev_get_by_index(net, ifm->ifi_index); 5176 if (!dev) { 5177 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5178 return -ENODEV; 5179 } 5180 5181 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5182 if (br_spec) { 5183 nla_for_each_nested(attr, br_spec, rem) { 5184 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5185 if (nla_len(attr) < sizeof(flags)) 5186 return -EINVAL; 5187 5188 have_flags = true; 5189 flags = nla_get_u16(attr); 5190 break; 5191 } 5192 } 5193 } 5194 5195 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5196 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5197 5198 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 5199 err = -EOPNOTSUPP; 5200 goto out; 5201 } 5202 5203 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 5204 if (err) 5205 goto out; 5206 5207 flags &= ~BRIDGE_FLAGS_MASTER; 5208 } 5209 5210 if ((flags & BRIDGE_FLAGS_SELF)) { 5211 if (!dev->netdev_ops->ndo_bridge_dellink) 5212 err = -EOPNOTSUPP; 5213 else 5214 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 5215 flags); 5216 5217 if (!err) { 5218 flags &= ~BRIDGE_FLAGS_SELF; 5219 5220 /* Generate event to notify upper layer of bridge 5221 * change 5222 */ 5223 err = rtnl_bridge_notify(dev); 5224 } 5225 } 5226 5227 if (have_flags) 5228 memcpy(nla_data(attr), &flags, sizeof(flags)); 5229 out: 5230 return err; 5231 } 5232 5233 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 5234 { 5235 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 5236 (!idxattr || idxattr == attrid); 5237 } 5238 5239 static bool 5240 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) 5241 { 5242 return dev->netdev_ops && 5243 dev->netdev_ops->ndo_has_offload_stats && 5244 dev->netdev_ops->ndo_get_offload_stats && 5245 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); 5246 } 5247 5248 static unsigned int 5249 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) 5250 { 5251 return rtnl_offload_xstats_have_ndo(dev, attr_id) ? 5252 sizeof(struct rtnl_link_stats64) : 0; 5253 } 5254 5255 static int 5256 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, 5257 struct sk_buff *skb) 5258 { 5259 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); 5260 struct nlattr *attr = NULL; 5261 void *attr_data; 5262 int err; 5263 5264 if (!size) 5265 return -ENODATA; 5266 5267 attr = nla_reserve_64bit(skb, attr_id, size, 5268 IFLA_OFFLOAD_XSTATS_UNSPEC); 5269 if (!attr) 5270 return -EMSGSIZE; 5271 5272 attr_data = nla_data(attr); 5273 memset(attr_data, 0, size); 5274 5275 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); 5276 if (err) 5277 return err; 5278 5279 return 0; 5280 } 5281 5282 static unsigned int 5283 rtnl_offload_xstats_get_size_stats(const struct net_device *dev, 5284 enum netdev_offload_xstats_type type) 5285 { 5286 bool enabled = netdev_offload_xstats_enabled(dev, type); 5287 5288 return enabled ? sizeof(struct rtnl_hw_stats64) : 0; 5289 } 5290 5291 struct rtnl_offload_xstats_request_used { 5292 bool request; 5293 bool used; 5294 }; 5295 5296 static int 5297 rtnl_offload_xstats_get_stats(struct net_device *dev, 5298 enum netdev_offload_xstats_type type, 5299 struct rtnl_offload_xstats_request_used *ru, 5300 struct rtnl_hw_stats64 *stats, 5301 struct netlink_ext_ack *extack) 5302 { 5303 bool request; 5304 bool used; 5305 int err; 5306 5307 request = netdev_offload_xstats_enabled(dev, type); 5308 if (!request) { 5309 used = false; 5310 goto out; 5311 } 5312 5313 err = netdev_offload_xstats_get(dev, type, stats, &used, extack); 5314 if (err) 5315 return err; 5316 5317 out: 5318 if (ru) { 5319 ru->request = request; 5320 ru->used = used; 5321 } 5322 return 0; 5323 } 5324 5325 static int 5326 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, 5327 struct rtnl_offload_xstats_request_used *ru) 5328 { 5329 struct nlattr *nest; 5330 5331 nest = nla_nest_start(skb, attr_id); 5332 if (!nest) 5333 return -EMSGSIZE; 5334 5335 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) 5336 goto nla_put_failure; 5337 5338 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) 5339 goto nla_put_failure; 5340 5341 nla_nest_end(skb, nest); 5342 return 0; 5343 5344 nla_put_failure: 5345 nla_nest_cancel(skb, nest); 5346 return -EMSGSIZE; 5347 } 5348 5349 static int 5350 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, 5351 struct netlink_ext_ack *extack) 5352 { 5353 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5354 struct rtnl_offload_xstats_request_used ru_l3; 5355 struct nlattr *nest; 5356 int err; 5357 5358 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); 5359 if (err) 5360 return err; 5361 5362 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5363 if (!nest) 5364 return -EMSGSIZE; 5365 5366 if (rtnl_offload_xstats_fill_hw_s_info_one(skb, 5367 IFLA_OFFLOAD_XSTATS_L3_STATS, 5368 &ru_l3)) 5369 goto nla_put_failure; 5370 5371 nla_nest_end(skb, nest); 5372 return 0; 5373 5374 nla_put_failure: 5375 nla_nest_cancel(skb, nest); 5376 return -EMSGSIZE; 5377 } 5378 5379 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, 5380 int *prividx, u32 off_filter_mask, 5381 struct netlink_ext_ack *extack) 5382 { 5383 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5384 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; 5385 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; 5386 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5387 bool have_data = false; 5388 int err; 5389 5390 if (*prividx <= attr_id_cpu_hit && 5391 (off_filter_mask & 5392 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { 5393 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); 5394 if (!err) { 5395 have_data = true; 5396 } else if (err != -ENODATA) { 5397 *prividx = attr_id_cpu_hit; 5398 return err; 5399 } 5400 } 5401 5402 if (*prividx <= attr_id_hw_s_info && 5403 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { 5404 *prividx = attr_id_hw_s_info; 5405 5406 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); 5407 if (err) 5408 return err; 5409 5410 have_data = true; 5411 *prividx = 0; 5412 } 5413 5414 if (*prividx <= attr_id_l3_stats && 5415 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { 5416 unsigned int size_l3; 5417 struct nlattr *attr; 5418 5419 *prividx = attr_id_l3_stats; 5420 5421 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5422 if (!size_l3) 5423 goto skip_l3_stats; 5424 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, 5425 IFLA_OFFLOAD_XSTATS_UNSPEC); 5426 if (!attr) 5427 return -EMSGSIZE; 5428 5429 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, 5430 nla_data(attr), extack); 5431 if (err) 5432 return err; 5433 5434 have_data = true; 5435 skip_l3_stats: 5436 *prividx = 0; 5437 } 5438 5439 if (!have_data) 5440 return -ENODATA; 5441 5442 *prividx = 0; 5443 return 0; 5444 } 5445 5446 static unsigned int 5447 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, 5448 enum netdev_offload_xstats_type type) 5449 { 5450 bool enabled = netdev_offload_xstats_enabled(dev, type); 5451 5452 return nla_total_size(0) + 5453 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ 5454 nla_total_size(sizeof(u8)) + 5455 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ 5456 (enabled ? nla_total_size(sizeof(u8)) : 0) + 5457 0; 5458 } 5459 5460 static unsigned int 5461 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) 5462 { 5463 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5464 5465 return nla_total_size(0) + 5466 /* IFLA_OFFLOAD_XSTATS_L3_STATS */ 5467 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + 5468 0; 5469 } 5470 5471 static int rtnl_offload_xstats_get_size(const struct net_device *dev, 5472 u32 off_filter_mask) 5473 { 5474 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5475 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5476 int nla_size = 0; 5477 int size; 5478 5479 if (off_filter_mask & 5480 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { 5481 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); 5482 nla_size += nla_total_size_64bit(size); 5483 } 5484 5485 if (off_filter_mask & 5486 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) 5487 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); 5488 5489 if (off_filter_mask & 5490 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { 5491 size = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5492 nla_size += nla_total_size_64bit(size); 5493 } 5494 5495 if (nla_size != 0) 5496 nla_size += nla_total_size(0); 5497 5498 return nla_size; 5499 } 5500 5501 struct rtnl_stats_dump_filters { 5502 /* mask[0] filters outer attributes. Then individual nests have their 5503 * filtering mask at the index of the nested attribute. 5504 */ 5505 u32 mask[IFLA_STATS_MAX + 1]; 5506 }; 5507 5508 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 5509 int type, u32 pid, u32 seq, u32 change, 5510 unsigned int flags, 5511 const struct rtnl_stats_dump_filters *filters, 5512 int *idxattr, int *prividx, 5513 struct netlink_ext_ack *extack) 5514 { 5515 unsigned int filter_mask = filters->mask[0]; 5516 struct if_stats_msg *ifsm; 5517 struct nlmsghdr *nlh; 5518 struct nlattr *attr; 5519 int s_prividx = *prividx; 5520 int err; 5521 5522 ASSERT_RTNL(); 5523 5524 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 5525 if (!nlh) 5526 return -EMSGSIZE; 5527 5528 ifsm = nlmsg_data(nlh); 5529 ifsm->family = PF_UNSPEC; 5530 ifsm->pad1 = 0; 5531 ifsm->pad2 = 0; 5532 ifsm->ifindex = dev->ifindex; 5533 ifsm->filter_mask = filter_mask; 5534 5535 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 5536 struct rtnl_link_stats64 *sp; 5537 5538 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 5539 sizeof(struct rtnl_link_stats64), 5540 IFLA_STATS_UNSPEC); 5541 if (!attr) { 5542 err = -EMSGSIZE; 5543 goto nla_put_failure; 5544 } 5545 5546 sp = nla_data(attr); 5547 dev_get_stats(dev, sp); 5548 } 5549 5550 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5551 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5552 5553 if (ops && ops->fill_linkxstats) { 5554 *idxattr = IFLA_STATS_LINK_XSTATS; 5555 attr = nla_nest_start_noflag(skb, 5556 IFLA_STATS_LINK_XSTATS); 5557 if (!attr) { 5558 err = -EMSGSIZE; 5559 goto nla_put_failure; 5560 } 5561 5562 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5563 nla_nest_end(skb, attr); 5564 if (err) 5565 goto nla_put_failure; 5566 *idxattr = 0; 5567 } 5568 } 5569 5570 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5571 *idxattr)) { 5572 const struct rtnl_link_ops *ops = NULL; 5573 const struct net_device *master; 5574 5575 master = netdev_master_upper_dev_get(dev); 5576 if (master) 5577 ops = master->rtnl_link_ops; 5578 if (ops && ops->fill_linkxstats) { 5579 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5580 attr = nla_nest_start_noflag(skb, 5581 IFLA_STATS_LINK_XSTATS_SLAVE); 5582 if (!attr) { 5583 err = -EMSGSIZE; 5584 goto nla_put_failure; 5585 } 5586 5587 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5588 nla_nest_end(skb, attr); 5589 if (err) 5590 goto nla_put_failure; 5591 *idxattr = 0; 5592 } 5593 } 5594 5595 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5596 *idxattr)) { 5597 u32 off_filter_mask; 5598 5599 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5600 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5601 attr = nla_nest_start_noflag(skb, 5602 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5603 if (!attr) { 5604 err = -EMSGSIZE; 5605 goto nla_put_failure; 5606 } 5607 5608 err = rtnl_offload_xstats_fill(skb, dev, prividx, 5609 off_filter_mask, extack); 5610 if (err == -ENODATA) 5611 nla_nest_cancel(skb, attr); 5612 else 5613 nla_nest_end(skb, attr); 5614 5615 if (err && err != -ENODATA) 5616 goto nla_put_failure; 5617 *idxattr = 0; 5618 } 5619 5620 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5621 struct rtnl_af_ops *af_ops; 5622 5623 *idxattr = IFLA_STATS_AF_SPEC; 5624 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5625 if (!attr) { 5626 err = -EMSGSIZE; 5627 goto nla_put_failure; 5628 } 5629 5630 rcu_read_lock(); 5631 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5632 if (af_ops->fill_stats_af) { 5633 struct nlattr *af; 5634 5635 af = nla_nest_start_noflag(skb, 5636 af_ops->family); 5637 if (!af) { 5638 rcu_read_unlock(); 5639 err = -EMSGSIZE; 5640 goto nla_put_failure; 5641 } 5642 err = af_ops->fill_stats_af(skb, dev); 5643 5644 if (err == -ENODATA) { 5645 nla_nest_cancel(skb, af); 5646 } else if (err < 0) { 5647 rcu_read_unlock(); 5648 goto nla_put_failure; 5649 } 5650 5651 nla_nest_end(skb, af); 5652 } 5653 } 5654 rcu_read_unlock(); 5655 5656 nla_nest_end(skb, attr); 5657 5658 *idxattr = 0; 5659 } 5660 5661 nlmsg_end(skb, nlh); 5662 5663 return 0; 5664 5665 nla_put_failure: 5666 /* not a multi message or no progress mean a real error */ 5667 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5668 nlmsg_cancel(skb, nlh); 5669 else 5670 nlmsg_end(skb, nlh); 5671 5672 return err; 5673 } 5674 5675 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5676 const struct rtnl_stats_dump_filters *filters) 5677 { 5678 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5679 unsigned int filter_mask = filters->mask[0]; 5680 5681 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5682 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5683 5684 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5685 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5686 int attr = IFLA_STATS_LINK_XSTATS; 5687 5688 if (ops && ops->get_linkxstats_size) { 5689 size += nla_total_size(ops->get_linkxstats_size(dev, 5690 attr)); 5691 /* for IFLA_STATS_LINK_XSTATS */ 5692 size += nla_total_size(0); 5693 } 5694 } 5695 5696 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5697 struct net_device *_dev = (struct net_device *)dev; 5698 const struct rtnl_link_ops *ops = NULL; 5699 const struct net_device *master; 5700 5701 /* netdev_master_upper_dev_get can't take const */ 5702 master = netdev_master_upper_dev_get(_dev); 5703 if (master) 5704 ops = master->rtnl_link_ops; 5705 if (ops && ops->get_linkxstats_size) { 5706 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5707 5708 size += nla_total_size(ops->get_linkxstats_size(dev, 5709 attr)); 5710 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5711 size += nla_total_size(0); 5712 } 5713 } 5714 5715 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { 5716 u32 off_filter_mask; 5717 5718 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5719 size += rtnl_offload_xstats_get_size(dev, off_filter_mask); 5720 } 5721 5722 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5723 struct rtnl_af_ops *af_ops; 5724 5725 /* for IFLA_STATS_AF_SPEC */ 5726 size += nla_total_size(0); 5727 5728 rcu_read_lock(); 5729 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5730 if (af_ops->get_stats_af_size) { 5731 size += nla_total_size( 5732 af_ops->get_stats_af_size(dev)); 5733 5734 /* for AF_* */ 5735 size += nla_total_size(0); 5736 } 5737 } 5738 rcu_read_unlock(); 5739 } 5740 5741 return size; 5742 } 5743 5744 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) 5745 5746 static const struct nla_policy 5747 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { 5748 [IFLA_STATS_LINK_OFFLOAD_XSTATS] = 5749 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), 5750 }; 5751 5752 static const struct nla_policy 5753 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { 5754 [IFLA_STATS_GET_FILTERS] = 5755 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), 5756 }; 5757 5758 static const struct nla_policy 5759 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { 5760 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), 5761 }; 5762 5763 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, 5764 struct rtnl_stats_dump_filters *filters, 5765 struct netlink_ext_ack *extack) 5766 { 5767 struct nlattr *tb[IFLA_STATS_MAX + 1]; 5768 int err; 5769 int at; 5770 5771 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, 5772 rtnl_stats_get_policy_filters, extack); 5773 if (err < 0) 5774 return err; 5775 5776 for (at = 1; at <= IFLA_STATS_MAX; at++) { 5777 if (tb[at]) { 5778 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { 5779 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); 5780 return -EINVAL; 5781 } 5782 filters->mask[at] = nla_get_u32(tb[at]); 5783 } 5784 } 5785 5786 return 0; 5787 } 5788 5789 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, 5790 u32 filter_mask, 5791 struct rtnl_stats_dump_filters *filters, 5792 struct netlink_ext_ack *extack) 5793 { 5794 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5795 int err; 5796 int i; 5797 5798 filters->mask[0] = filter_mask; 5799 for (i = 1; i < ARRAY_SIZE(filters->mask); i++) 5800 filters->mask[i] = -1U; 5801 5802 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, 5803 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); 5804 if (err < 0) 5805 return err; 5806 5807 if (tb[IFLA_STATS_GET_FILTERS]) { 5808 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], 5809 filters, extack); 5810 if (err) 5811 return err; 5812 } 5813 5814 return 0; 5815 } 5816 5817 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 5818 bool is_dump, struct netlink_ext_ack *extack) 5819 { 5820 struct if_stats_msg *ifsm; 5821 5822 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 5823 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 5824 return -EINVAL; 5825 } 5826 5827 if (!strict_check) 5828 return 0; 5829 5830 ifsm = nlmsg_data(nlh); 5831 5832 /* only requests using strict checks can pass data to influence 5833 * the dump. The legacy exception is filter_mask. 5834 */ 5835 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 5836 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 5837 return -EINVAL; 5838 } 5839 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 5840 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 5841 return -EINVAL; 5842 } 5843 5844 return 0; 5845 } 5846 5847 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 5848 struct netlink_ext_ack *extack) 5849 { 5850 struct rtnl_stats_dump_filters filters; 5851 struct net *net = sock_net(skb->sk); 5852 struct net_device *dev = NULL; 5853 int idxattr = 0, prividx = 0; 5854 struct if_stats_msg *ifsm; 5855 struct sk_buff *nskb; 5856 int err; 5857 5858 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5859 false, extack); 5860 if (err) 5861 return err; 5862 5863 ifsm = nlmsg_data(nlh); 5864 if (ifsm->ifindex > 0) 5865 dev = __dev_get_by_index(net, ifsm->ifindex); 5866 else 5867 return -EINVAL; 5868 5869 if (!dev) 5870 return -ENODEV; 5871 5872 if (!ifsm->filter_mask) { 5873 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); 5874 return -EINVAL; 5875 } 5876 5877 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); 5878 if (err) 5879 return err; 5880 5881 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); 5882 if (!nskb) 5883 return -ENOBUFS; 5884 5885 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 5886 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 5887 0, &filters, &idxattr, &prividx, extack); 5888 if (err < 0) { 5889 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 5890 WARN_ON(err == -EMSGSIZE); 5891 kfree_skb(nskb); 5892 } else { 5893 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 5894 } 5895 5896 return err; 5897 } 5898 5899 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 5900 { 5901 struct netlink_ext_ack *extack = cb->extack; 5902 int h, s_h, err, s_idx, s_idxattr, s_prividx; 5903 struct rtnl_stats_dump_filters filters; 5904 struct net *net = sock_net(skb->sk); 5905 unsigned int flags = NLM_F_MULTI; 5906 struct if_stats_msg *ifsm; 5907 struct hlist_head *head; 5908 struct net_device *dev; 5909 int idx = 0; 5910 5911 s_h = cb->args[0]; 5912 s_idx = cb->args[1]; 5913 s_idxattr = cb->args[2]; 5914 s_prividx = cb->args[3]; 5915 5916 cb->seq = net->dev_base_seq; 5917 5918 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 5919 if (err) 5920 return err; 5921 5922 ifsm = nlmsg_data(cb->nlh); 5923 if (!ifsm->filter_mask) { 5924 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 5925 return -EINVAL; 5926 } 5927 5928 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, 5929 extack); 5930 if (err) 5931 return err; 5932 5933 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 5934 idx = 0; 5935 head = &net->dev_index_head[h]; 5936 hlist_for_each_entry(dev, head, index_hlist) { 5937 if (idx < s_idx) 5938 goto cont; 5939 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 5940 NETLINK_CB(cb->skb).portid, 5941 cb->nlh->nlmsg_seq, 0, 5942 flags, &filters, 5943 &s_idxattr, &s_prividx, 5944 extack); 5945 /* If we ran out of room on the first message, 5946 * we're in trouble 5947 */ 5948 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 5949 5950 if (err < 0) 5951 goto out; 5952 s_prividx = 0; 5953 s_idxattr = 0; 5954 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 5955 cont: 5956 idx++; 5957 } 5958 } 5959 out: 5960 cb->args[3] = s_prividx; 5961 cb->args[2] = s_idxattr; 5962 cb->args[1] = idx; 5963 cb->args[0] = h; 5964 5965 return skb->len; 5966 } 5967 5968 void rtnl_offload_xstats_notify(struct net_device *dev) 5969 { 5970 struct rtnl_stats_dump_filters response_filters = {}; 5971 struct net *net = dev_net(dev); 5972 int idxattr = 0, prividx = 0; 5973 struct sk_buff *skb; 5974 int err = -ENOBUFS; 5975 5976 ASSERT_RTNL(); 5977 5978 response_filters.mask[0] |= 5979 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 5980 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 5981 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5982 5983 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), 5984 GFP_KERNEL); 5985 if (!skb) 5986 goto errout; 5987 5988 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, 5989 &response_filters, &idxattr, &prividx, NULL); 5990 if (err < 0) { 5991 kfree_skb(skb); 5992 goto errout; 5993 } 5994 5995 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); 5996 return; 5997 5998 errout: 5999 rtnl_set_sk_err(net, RTNLGRP_STATS, err); 6000 } 6001 EXPORT_SYMBOL(rtnl_offload_xstats_notify); 6002 6003 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, 6004 struct netlink_ext_ack *extack) 6005 { 6006 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 6007 struct rtnl_stats_dump_filters response_filters = {}; 6008 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 6009 struct net *net = sock_net(skb->sk); 6010 struct net_device *dev = NULL; 6011 struct if_stats_msg *ifsm; 6012 bool notify = false; 6013 int err; 6014 6015 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 6016 false, extack); 6017 if (err) 6018 return err; 6019 6020 ifsm = nlmsg_data(nlh); 6021 if (ifsm->family != AF_UNSPEC) { 6022 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); 6023 return -EINVAL; 6024 } 6025 6026 if (ifsm->ifindex > 0) 6027 dev = __dev_get_by_index(net, ifsm->ifindex); 6028 else 6029 return -EINVAL; 6030 6031 if (!dev) 6032 return -ENODEV; 6033 6034 if (ifsm->filter_mask) { 6035 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); 6036 return -EINVAL; 6037 } 6038 6039 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, 6040 ifla_stats_set_policy, extack); 6041 if (err < 0) 6042 return err; 6043 6044 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { 6045 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); 6046 6047 if (req) 6048 err = netdev_offload_xstats_enable(dev, t_l3, extack); 6049 else 6050 err = netdev_offload_xstats_disable(dev, t_l3); 6051 6052 if (!err) 6053 notify = true; 6054 else if (err != -EALREADY) 6055 return err; 6056 6057 response_filters.mask[0] |= 6058 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6059 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6060 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6061 } 6062 6063 if (notify) 6064 rtnl_offload_xstats_notify(dev); 6065 6066 return 0; 6067 } 6068 6069 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh, 6070 struct netlink_ext_ack *extack) 6071 { 6072 struct br_port_msg *bpm; 6073 6074 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 6075 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request"); 6076 return -EINVAL; 6077 } 6078 6079 bpm = nlmsg_data(nlh); 6080 if (bpm->ifindex) { 6081 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request"); 6082 return -EINVAL; 6083 } 6084 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 6085 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 6086 return -EINVAL; 6087 } 6088 6089 return 0; 6090 } 6091 6092 struct rtnl_mdb_dump_ctx { 6093 long idx; 6094 }; 6095 6096 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 6097 { 6098 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx; 6099 struct net *net = sock_net(skb->sk); 6100 struct net_device *dev; 6101 int idx, s_idx; 6102 int err; 6103 6104 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx); 6105 6106 if (cb->strict_check) { 6107 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack); 6108 if (err) 6109 return err; 6110 } 6111 6112 s_idx = ctx->idx; 6113 idx = 0; 6114 6115 for_each_netdev(net, dev) { 6116 if (idx < s_idx) 6117 goto skip; 6118 if (!dev->netdev_ops->ndo_mdb_dump) 6119 goto skip; 6120 6121 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb); 6122 if (err == -EMSGSIZE) 6123 goto out; 6124 /* Moving on to next device, reset markers and sequence 6125 * counters since they are all maintained per-device. 6126 */ 6127 memset(cb->ctx, 0, sizeof(cb->ctx)); 6128 cb->prev_seq = 0; 6129 cb->seq = 0; 6130 skip: 6131 idx++; 6132 } 6133 6134 out: 6135 ctx->idx = idx; 6136 return skb->len; 6137 } 6138 6139 static int rtnl_validate_mdb_entry(const struct nlattr *attr, 6140 struct netlink_ext_ack *extack) 6141 { 6142 struct br_mdb_entry *entry = nla_data(attr); 6143 6144 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6145 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6146 return -EINVAL; 6147 } 6148 6149 if (entry->ifindex == 0) { 6150 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed"); 6151 return -EINVAL; 6152 } 6153 6154 if (entry->addr.proto == htons(ETH_P_IP)) { 6155 if (!ipv4_is_multicast(entry->addr.u.ip4) && 6156 !ipv4_is_zeronet(entry->addr.u.ip4)) { 6157 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0"); 6158 return -EINVAL; 6159 } 6160 if (ipv4_is_local_multicast(entry->addr.u.ip4)) { 6161 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast"); 6162 return -EINVAL; 6163 } 6164 #if IS_ENABLED(CONFIG_IPV6) 6165 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 6166 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) { 6167 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes"); 6168 return -EINVAL; 6169 } 6170 #endif 6171 } else if (entry->addr.proto == 0) { 6172 /* L2 mdb */ 6173 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) { 6174 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast"); 6175 return -EINVAL; 6176 } 6177 } else { 6178 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6179 return -EINVAL; 6180 } 6181 6182 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6183 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6184 return -EINVAL; 6185 } 6186 if (entry->vid >= VLAN_VID_MASK) { 6187 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6188 return -EINVAL; 6189 } 6190 6191 return 0; 6192 } 6193 6194 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = { 6195 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 }, 6196 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6197 rtnl_validate_mdb_entry, 6198 sizeof(struct br_mdb_entry)), 6199 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6200 }; 6201 6202 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 6203 struct netlink_ext_ack *extack) 6204 { 6205 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6206 struct net *net = sock_net(skb->sk); 6207 struct br_port_msg *bpm; 6208 struct net_device *dev; 6209 int err; 6210 6211 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6212 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6213 if (err) 6214 return err; 6215 6216 bpm = nlmsg_data(nlh); 6217 if (!bpm->ifindex) { 6218 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6219 return -EINVAL; 6220 } 6221 6222 dev = __dev_get_by_index(net, bpm->ifindex); 6223 if (!dev) { 6224 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6225 return -ENODEV; 6226 } 6227 6228 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6229 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6230 return -EINVAL; 6231 } 6232 6233 if (!dev->netdev_ops->ndo_mdb_add) { 6234 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6235 return -EOPNOTSUPP; 6236 } 6237 6238 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack); 6239 } 6240 6241 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 6242 struct netlink_ext_ack *extack) 6243 { 6244 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6245 struct net *net = sock_net(skb->sk); 6246 struct br_port_msg *bpm; 6247 struct net_device *dev; 6248 int err; 6249 6250 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6251 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6252 if (err) 6253 return err; 6254 6255 bpm = nlmsg_data(nlh); 6256 if (!bpm->ifindex) { 6257 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6258 return -EINVAL; 6259 } 6260 6261 dev = __dev_get_by_index(net, bpm->ifindex); 6262 if (!dev) { 6263 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6264 return -ENODEV; 6265 } 6266 6267 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6268 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6269 return -EINVAL; 6270 } 6271 6272 if (!dev->netdev_ops->ndo_mdb_del) { 6273 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6274 return -EOPNOTSUPP; 6275 } 6276 6277 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack); 6278 } 6279 6280 /* Process one rtnetlink message. */ 6281 6282 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 6283 struct netlink_ext_ack *extack) 6284 { 6285 struct net *net = sock_net(skb->sk); 6286 struct rtnl_link *link; 6287 enum rtnl_kinds kind; 6288 struct module *owner; 6289 int err = -EOPNOTSUPP; 6290 rtnl_doit_func doit; 6291 unsigned int flags; 6292 int family; 6293 int type; 6294 6295 type = nlh->nlmsg_type; 6296 if (type > RTM_MAX) 6297 return -EOPNOTSUPP; 6298 6299 type -= RTM_BASE; 6300 6301 /* All the messages must have at least 1 byte length */ 6302 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 6303 return 0; 6304 6305 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 6306 kind = rtnl_msgtype_kind(type); 6307 6308 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) 6309 return -EPERM; 6310 6311 rcu_read_lock(); 6312 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { 6313 struct sock *rtnl; 6314 rtnl_dumpit_func dumpit; 6315 u32 min_dump_alloc = 0; 6316 6317 link = rtnl_get_link(family, type); 6318 if (!link || !link->dumpit) { 6319 family = PF_UNSPEC; 6320 link = rtnl_get_link(family, type); 6321 if (!link || !link->dumpit) 6322 goto err_unlock; 6323 } 6324 owner = link->owner; 6325 dumpit = link->dumpit; 6326 6327 if (type == RTM_GETLINK - RTM_BASE) 6328 min_dump_alloc = rtnl_calcit(skb, nlh); 6329 6330 err = 0; 6331 /* need to do this before rcu_read_unlock() */ 6332 if (!try_module_get(owner)) 6333 err = -EPROTONOSUPPORT; 6334 6335 rcu_read_unlock(); 6336 6337 rtnl = net->rtnl; 6338 if (err == 0) { 6339 struct netlink_dump_control c = { 6340 .dump = dumpit, 6341 .min_dump_alloc = min_dump_alloc, 6342 .module = owner, 6343 }; 6344 err = netlink_dump_start(rtnl, skb, nlh, &c); 6345 /* netlink_dump_start() will keep a reference on 6346 * module if dump is still in progress. 6347 */ 6348 module_put(owner); 6349 } 6350 return err; 6351 } 6352 6353 link = rtnl_get_link(family, type); 6354 if (!link || !link->doit) { 6355 family = PF_UNSPEC; 6356 link = rtnl_get_link(PF_UNSPEC, type); 6357 if (!link || !link->doit) 6358 goto out_unlock; 6359 } 6360 6361 owner = link->owner; 6362 if (!try_module_get(owner)) { 6363 err = -EPROTONOSUPPORT; 6364 goto out_unlock; 6365 } 6366 6367 flags = link->flags; 6368 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && 6369 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { 6370 NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); 6371 module_put(owner); 6372 goto err_unlock; 6373 } 6374 6375 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 6376 doit = link->doit; 6377 rcu_read_unlock(); 6378 if (doit) 6379 err = doit(skb, nlh, extack); 6380 module_put(owner); 6381 return err; 6382 } 6383 rcu_read_unlock(); 6384 6385 rtnl_lock(); 6386 link = rtnl_get_link(family, type); 6387 if (link && link->doit) 6388 err = link->doit(skb, nlh, extack); 6389 rtnl_unlock(); 6390 6391 module_put(owner); 6392 6393 return err; 6394 6395 out_unlock: 6396 rcu_read_unlock(); 6397 return err; 6398 6399 err_unlock: 6400 rcu_read_unlock(); 6401 return -EOPNOTSUPP; 6402 } 6403 6404 static void rtnetlink_rcv(struct sk_buff *skb) 6405 { 6406 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 6407 } 6408 6409 static int rtnetlink_bind(struct net *net, int group) 6410 { 6411 switch (group) { 6412 case RTNLGRP_IPV4_MROUTE_R: 6413 case RTNLGRP_IPV6_MROUTE_R: 6414 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 6415 return -EPERM; 6416 break; 6417 } 6418 return 0; 6419 } 6420 6421 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 6422 { 6423 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6424 6425 switch (event) { 6426 case NETDEV_REBOOT: 6427 case NETDEV_CHANGEMTU: 6428 case NETDEV_CHANGEADDR: 6429 case NETDEV_CHANGENAME: 6430 case NETDEV_FEAT_CHANGE: 6431 case NETDEV_BONDING_FAILOVER: 6432 case NETDEV_POST_TYPE_CHANGE: 6433 case NETDEV_NOTIFY_PEERS: 6434 case NETDEV_CHANGEUPPER: 6435 case NETDEV_RESEND_IGMP: 6436 case NETDEV_CHANGEINFODATA: 6437 case NETDEV_CHANGELOWERSTATE: 6438 case NETDEV_CHANGE_TX_QUEUE_LEN: 6439 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 6440 GFP_KERNEL, NULL, 0, 0, NULL); 6441 break; 6442 default: 6443 break; 6444 } 6445 return NOTIFY_DONE; 6446 } 6447 6448 static struct notifier_block rtnetlink_dev_notifier = { 6449 .notifier_call = rtnetlink_event, 6450 }; 6451 6452 6453 static int __net_init rtnetlink_net_init(struct net *net) 6454 { 6455 struct sock *sk; 6456 struct netlink_kernel_cfg cfg = { 6457 .groups = RTNLGRP_MAX, 6458 .input = rtnetlink_rcv, 6459 .cb_mutex = &rtnl_mutex, 6460 .flags = NL_CFG_F_NONROOT_RECV, 6461 .bind = rtnetlink_bind, 6462 }; 6463 6464 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 6465 if (!sk) 6466 return -ENOMEM; 6467 net->rtnl = sk; 6468 return 0; 6469 } 6470 6471 static void __net_exit rtnetlink_net_exit(struct net *net) 6472 { 6473 netlink_kernel_release(net->rtnl); 6474 net->rtnl = NULL; 6475 } 6476 6477 static struct pernet_operations rtnetlink_net_ops = { 6478 .init = rtnetlink_net_init, 6479 .exit = rtnetlink_net_exit, 6480 }; 6481 6482 void __init rtnetlink_init(void) 6483 { 6484 if (register_pernet_subsys(&rtnetlink_net_ops)) 6485 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 6486 6487 register_netdevice_notifier(&rtnetlink_dev_notifier); 6488 6489 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 6490 rtnl_dump_ifinfo, 0); 6491 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 6492 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 6493 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 6494 6495 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 6496 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 6497 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 6498 6499 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 6500 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 6501 6502 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 6503 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 6504 RTNL_FLAG_BULK_DEL_SUPPORTED); 6505 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 6506 6507 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 6508 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 6509 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 6510 6511 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 6512 0); 6513 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); 6514 6515 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, rtnl_mdb_dump, 0); 6516 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0); 6517 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 0); 6518 } 6519