1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 57 #include "dev.h" 58 59 #define RTNL_MAX_TYPE 50 60 #define RTNL_SLAVE_MAX_TYPE 40 61 62 struct rtnl_link { 63 rtnl_doit_func doit; 64 rtnl_dumpit_func dumpit; 65 struct module *owner; 66 unsigned int flags; 67 struct rcu_head rcu; 68 }; 69 70 static DEFINE_MUTEX(rtnl_mutex); 71 72 void rtnl_lock(void) 73 { 74 mutex_lock(&rtnl_mutex); 75 } 76 EXPORT_SYMBOL(rtnl_lock); 77 78 int rtnl_lock_killable(void) 79 { 80 return mutex_lock_killable(&rtnl_mutex); 81 } 82 EXPORT_SYMBOL(rtnl_lock_killable); 83 84 static struct sk_buff *defer_kfree_skb_list; 85 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 86 { 87 if (head && tail) { 88 tail->next = defer_kfree_skb_list; 89 defer_kfree_skb_list = head; 90 } 91 } 92 EXPORT_SYMBOL(rtnl_kfree_skbs); 93 94 void __rtnl_unlock(void) 95 { 96 struct sk_buff *head = defer_kfree_skb_list; 97 98 defer_kfree_skb_list = NULL; 99 100 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() 101 * is used. In some places, e.g. in cfg80211, we have code that will do 102 * something like 103 * rtnl_lock() 104 * wiphy_lock() 105 * ... 106 * rtnl_unlock() 107 * 108 * and because netdev_run_todo() acquires the RTNL for items on the list 109 * we could cause a situation such as this: 110 * Thread 1 Thread 2 111 * rtnl_lock() 112 * unregister_netdevice() 113 * __rtnl_unlock() 114 * rtnl_lock() 115 * wiphy_lock() 116 * rtnl_unlock() 117 * netdev_run_todo() 118 * __rtnl_unlock() 119 * 120 * // list not empty now 121 * // because of thread 2 122 * rtnl_lock() 123 * while (!list_empty(...)) 124 * rtnl_lock() 125 * wiphy_lock() 126 * **** DEADLOCK **** 127 * 128 * However, usage of __rtnl_unlock() is rare, and so we can ensure that 129 * it's not used in cases where something is added to do the list. 130 */ 131 WARN_ON(!list_empty(&net_todo_list)); 132 133 mutex_unlock(&rtnl_mutex); 134 135 while (head) { 136 struct sk_buff *next = head->next; 137 138 kfree_skb(head); 139 cond_resched(); 140 head = next; 141 } 142 } 143 144 void rtnl_unlock(void) 145 { 146 /* This fellow will unlock it for us. */ 147 netdev_run_todo(); 148 } 149 EXPORT_SYMBOL(rtnl_unlock); 150 151 int rtnl_trylock(void) 152 { 153 return mutex_trylock(&rtnl_mutex); 154 } 155 EXPORT_SYMBOL(rtnl_trylock); 156 157 int rtnl_is_locked(void) 158 { 159 return mutex_is_locked(&rtnl_mutex); 160 } 161 EXPORT_SYMBOL(rtnl_is_locked); 162 163 bool refcount_dec_and_rtnl_lock(refcount_t *r) 164 { 165 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 166 } 167 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 168 169 #ifdef CONFIG_PROVE_LOCKING 170 bool lockdep_rtnl_is_held(void) 171 { 172 return lockdep_is_held(&rtnl_mutex); 173 } 174 EXPORT_SYMBOL(lockdep_rtnl_is_held); 175 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 176 177 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 178 179 static inline int rtm_msgindex(int msgtype) 180 { 181 int msgindex = msgtype - RTM_BASE; 182 183 /* 184 * msgindex < 0 implies someone tried to register a netlink 185 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 186 * the message type has not been added to linux/rtnetlink.h 187 */ 188 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 189 190 return msgindex; 191 } 192 193 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 194 { 195 struct rtnl_link __rcu **tab; 196 197 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 198 protocol = PF_UNSPEC; 199 200 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 201 if (!tab) 202 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 203 204 return rcu_dereference_rtnl(tab[msgtype]); 205 } 206 207 static int rtnl_register_internal(struct module *owner, 208 int protocol, int msgtype, 209 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 210 unsigned int flags) 211 { 212 struct rtnl_link *link, *old; 213 struct rtnl_link __rcu **tab; 214 int msgindex; 215 int ret = -ENOBUFS; 216 217 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 218 msgindex = rtm_msgindex(msgtype); 219 220 rtnl_lock(); 221 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 222 if (tab == NULL) { 223 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 224 if (!tab) 225 goto unlock; 226 227 /* ensures we see the 0 stores */ 228 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 229 } 230 231 old = rtnl_dereference(tab[msgindex]); 232 if (old) { 233 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 234 if (!link) 235 goto unlock; 236 } else { 237 link = kzalloc(sizeof(*link), GFP_KERNEL); 238 if (!link) 239 goto unlock; 240 } 241 242 WARN_ON(link->owner && link->owner != owner); 243 link->owner = owner; 244 245 WARN_ON(doit && link->doit && link->doit != doit); 246 if (doit) 247 link->doit = doit; 248 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 249 if (dumpit) 250 link->dumpit = dumpit; 251 252 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && 253 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); 254 link->flags |= flags; 255 256 /* publish protocol:msgtype */ 257 rcu_assign_pointer(tab[msgindex], link); 258 ret = 0; 259 if (old) 260 kfree_rcu(old, rcu); 261 unlock: 262 rtnl_unlock(); 263 return ret; 264 } 265 266 /** 267 * rtnl_register_module - Register a rtnetlink message type 268 * 269 * @owner: module registering the hook (THIS_MODULE) 270 * @protocol: Protocol family or PF_UNSPEC 271 * @msgtype: rtnetlink message type 272 * @doit: Function pointer called for each request message 273 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 274 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 275 * 276 * Like rtnl_register, but for use by removable modules. 277 */ 278 int rtnl_register_module(struct module *owner, 279 int protocol, int msgtype, 280 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 281 unsigned int flags) 282 { 283 return rtnl_register_internal(owner, protocol, msgtype, 284 doit, dumpit, flags); 285 } 286 EXPORT_SYMBOL_GPL(rtnl_register_module); 287 288 /** 289 * rtnl_register - Register a rtnetlink message type 290 * @protocol: Protocol family or PF_UNSPEC 291 * @msgtype: rtnetlink message type 292 * @doit: Function pointer called for each request message 293 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 294 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 295 * 296 * Registers the specified function pointers (at least one of them has 297 * to be non-NULL) to be called whenever a request message for the 298 * specified protocol family and message type is received. 299 * 300 * The special protocol family PF_UNSPEC may be used to define fallback 301 * function pointers for the case when no entry for the specific protocol 302 * family exists. 303 */ 304 void rtnl_register(int protocol, int msgtype, 305 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 306 unsigned int flags) 307 { 308 int err; 309 310 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 311 flags); 312 if (err) 313 pr_err("Unable to register rtnetlink message handler, " 314 "protocol = %d, message type = %d\n", protocol, msgtype); 315 } 316 317 /** 318 * rtnl_unregister - Unregister a rtnetlink message type 319 * @protocol: Protocol family or PF_UNSPEC 320 * @msgtype: rtnetlink message type 321 * 322 * Returns 0 on success or a negative error code. 323 */ 324 int rtnl_unregister(int protocol, int msgtype) 325 { 326 struct rtnl_link __rcu **tab; 327 struct rtnl_link *link; 328 int msgindex; 329 330 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 331 msgindex = rtm_msgindex(msgtype); 332 333 rtnl_lock(); 334 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 335 if (!tab) { 336 rtnl_unlock(); 337 return -ENOENT; 338 } 339 340 link = rtnl_dereference(tab[msgindex]); 341 RCU_INIT_POINTER(tab[msgindex], NULL); 342 rtnl_unlock(); 343 344 kfree_rcu(link, rcu); 345 346 return 0; 347 } 348 EXPORT_SYMBOL_GPL(rtnl_unregister); 349 350 /** 351 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 352 * @protocol : Protocol family or PF_UNSPEC 353 * 354 * Identical to calling rtnl_unregster() for all registered message types 355 * of a certain protocol family. 356 */ 357 void rtnl_unregister_all(int protocol) 358 { 359 struct rtnl_link __rcu **tab; 360 struct rtnl_link *link; 361 int msgindex; 362 363 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 364 365 rtnl_lock(); 366 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 367 if (!tab) { 368 rtnl_unlock(); 369 return; 370 } 371 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); 372 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 373 link = rtnl_dereference(tab[msgindex]); 374 if (!link) 375 continue; 376 377 RCU_INIT_POINTER(tab[msgindex], NULL); 378 kfree_rcu(link, rcu); 379 } 380 rtnl_unlock(); 381 382 synchronize_net(); 383 384 kfree(tab); 385 } 386 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 387 388 static LIST_HEAD(link_ops); 389 390 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 391 { 392 const struct rtnl_link_ops *ops; 393 394 list_for_each_entry(ops, &link_ops, list) { 395 if (!strcmp(ops->kind, kind)) 396 return ops; 397 } 398 return NULL; 399 } 400 401 /** 402 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 403 * @ops: struct rtnl_link_ops * to register 404 * 405 * The caller must hold the rtnl_mutex. This function should be used 406 * by drivers that create devices during module initialization. It 407 * must be called before registering the devices. 408 * 409 * Returns 0 on success or a negative error code. 410 */ 411 int __rtnl_link_register(struct rtnl_link_ops *ops) 412 { 413 if (rtnl_link_ops_get(ops->kind)) 414 return -EEXIST; 415 416 /* The check for alloc/setup is here because if ops 417 * does not have that filled up, it is not possible 418 * to use the ops for creating device. So do not 419 * fill up dellink as well. That disables rtnl_dellink. 420 */ 421 if ((ops->alloc || ops->setup) && !ops->dellink) 422 ops->dellink = unregister_netdevice_queue; 423 424 list_add_tail(&ops->list, &link_ops); 425 return 0; 426 } 427 EXPORT_SYMBOL_GPL(__rtnl_link_register); 428 429 /** 430 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 431 * @ops: struct rtnl_link_ops * to register 432 * 433 * Returns 0 on success or a negative error code. 434 */ 435 int rtnl_link_register(struct rtnl_link_ops *ops) 436 { 437 int err; 438 439 /* Sanity-check max sizes to avoid stack buffer overflow. */ 440 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 441 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 442 return -EINVAL; 443 444 rtnl_lock(); 445 err = __rtnl_link_register(ops); 446 rtnl_unlock(); 447 return err; 448 } 449 EXPORT_SYMBOL_GPL(rtnl_link_register); 450 451 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 452 { 453 struct net_device *dev; 454 LIST_HEAD(list_kill); 455 456 for_each_netdev(net, dev) { 457 if (dev->rtnl_link_ops == ops) 458 ops->dellink(dev, &list_kill); 459 } 460 unregister_netdevice_many(&list_kill); 461 } 462 463 /** 464 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 465 * @ops: struct rtnl_link_ops * to unregister 466 * 467 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 468 * integrity (hold pernet_ops_rwsem for writing to close the race 469 * with setup_net() and cleanup_net()). 470 */ 471 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 472 { 473 struct net *net; 474 475 for_each_net(net) { 476 __rtnl_kill_links(net, ops); 477 } 478 list_del(&ops->list); 479 } 480 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 481 482 /* Return with the rtnl_lock held when there are no network 483 * devices unregistering in any network namespace. 484 */ 485 static void rtnl_lock_unregistering_all(void) 486 { 487 struct net *net; 488 bool unregistering; 489 DEFINE_WAIT_FUNC(wait, woken_wake_function); 490 491 add_wait_queue(&netdev_unregistering_wq, &wait); 492 for (;;) { 493 unregistering = false; 494 rtnl_lock(); 495 /* We held write locked pernet_ops_rwsem, and parallel 496 * setup_net() and cleanup_net() are not possible. 497 */ 498 for_each_net(net) { 499 if (atomic_read(&net->dev_unreg_count) > 0) { 500 unregistering = true; 501 break; 502 } 503 } 504 if (!unregistering) 505 break; 506 __rtnl_unlock(); 507 508 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 509 } 510 remove_wait_queue(&netdev_unregistering_wq, &wait); 511 } 512 513 /** 514 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 515 * @ops: struct rtnl_link_ops * to unregister 516 */ 517 void rtnl_link_unregister(struct rtnl_link_ops *ops) 518 { 519 /* Close the race with setup_net() and cleanup_net() */ 520 down_write(&pernet_ops_rwsem); 521 rtnl_lock_unregistering_all(); 522 __rtnl_link_unregister(ops); 523 rtnl_unlock(); 524 up_write(&pernet_ops_rwsem); 525 } 526 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 527 528 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 529 { 530 struct net_device *master_dev; 531 const struct rtnl_link_ops *ops; 532 size_t size = 0; 533 534 rcu_read_lock(); 535 536 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 537 if (!master_dev) 538 goto out; 539 540 ops = master_dev->rtnl_link_ops; 541 if (!ops || !ops->get_slave_size) 542 goto out; 543 /* IFLA_INFO_SLAVE_DATA + nested data */ 544 size = nla_total_size(sizeof(struct nlattr)) + 545 ops->get_slave_size(master_dev, dev); 546 547 out: 548 rcu_read_unlock(); 549 return size; 550 } 551 552 static size_t rtnl_link_get_size(const struct net_device *dev) 553 { 554 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 555 size_t size; 556 557 if (!ops) 558 return 0; 559 560 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 561 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 562 563 if (ops->get_size) 564 /* IFLA_INFO_DATA + nested data */ 565 size += nla_total_size(sizeof(struct nlattr)) + 566 ops->get_size(dev); 567 568 if (ops->get_xstats_size) 569 /* IFLA_INFO_XSTATS */ 570 size += nla_total_size(ops->get_xstats_size(dev)); 571 572 size += rtnl_link_get_slave_info_data_size(dev); 573 574 return size; 575 } 576 577 static LIST_HEAD(rtnl_af_ops); 578 579 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 580 { 581 const struct rtnl_af_ops *ops; 582 583 ASSERT_RTNL(); 584 585 list_for_each_entry(ops, &rtnl_af_ops, list) { 586 if (ops->family == family) 587 return ops; 588 } 589 590 return NULL; 591 } 592 593 /** 594 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 595 * @ops: struct rtnl_af_ops * to register 596 * 597 * Returns 0 on success or a negative error code. 598 */ 599 void rtnl_af_register(struct rtnl_af_ops *ops) 600 { 601 rtnl_lock(); 602 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 603 rtnl_unlock(); 604 } 605 EXPORT_SYMBOL_GPL(rtnl_af_register); 606 607 /** 608 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 609 * @ops: struct rtnl_af_ops * to unregister 610 */ 611 void rtnl_af_unregister(struct rtnl_af_ops *ops) 612 { 613 rtnl_lock(); 614 list_del_rcu(&ops->list); 615 rtnl_unlock(); 616 617 synchronize_rcu(); 618 } 619 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 620 621 static size_t rtnl_link_get_af_size(const struct net_device *dev, 622 u32 ext_filter_mask) 623 { 624 struct rtnl_af_ops *af_ops; 625 size_t size; 626 627 /* IFLA_AF_SPEC */ 628 size = nla_total_size(sizeof(struct nlattr)); 629 630 rcu_read_lock(); 631 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 632 if (af_ops->get_link_af_size) { 633 /* AF_* + nested data */ 634 size += nla_total_size(sizeof(struct nlattr)) + 635 af_ops->get_link_af_size(dev, ext_filter_mask); 636 } 637 } 638 rcu_read_unlock(); 639 640 return size; 641 } 642 643 static bool rtnl_have_link_slave_info(const struct net_device *dev) 644 { 645 struct net_device *master_dev; 646 bool ret = false; 647 648 rcu_read_lock(); 649 650 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 651 if (master_dev && master_dev->rtnl_link_ops) 652 ret = true; 653 rcu_read_unlock(); 654 return ret; 655 } 656 657 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 658 const struct net_device *dev) 659 { 660 struct net_device *master_dev; 661 const struct rtnl_link_ops *ops; 662 struct nlattr *slave_data; 663 int err; 664 665 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 666 if (!master_dev) 667 return 0; 668 ops = master_dev->rtnl_link_ops; 669 if (!ops) 670 return 0; 671 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 672 return -EMSGSIZE; 673 if (ops->fill_slave_info) { 674 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 675 if (!slave_data) 676 return -EMSGSIZE; 677 err = ops->fill_slave_info(skb, master_dev, dev); 678 if (err < 0) 679 goto err_cancel_slave_data; 680 nla_nest_end(skb, slave_data); 681 } 682 return 0; 683 684 err_cancel_slave_data: 685 nla_nest_cancel(skb, slave_data); 686 return err; 687 } 688 689 static int rtnl_link_info_fill(struct sk_buff *skb, 690 const struct net_device *dev) 691 { 692 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 693 struct nlattr *data; 694 int err; 695 696 if (!ops) 697 return 0; 698 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 699 return -EMSGSIZE; 700 if (ops->fill_xstats) { 701 err = ops->fill_xstats(skb, dev); 702 if (err < 0) 703 return err; 704 } 705 if (ops->fill_info) { 706 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 707 if (data == NULL) 708 return -EMSGSIZE; 709 err = ops->fill_info(skb, dev); 710 if (err < 0) 711 goto err_cancel_data; 712 nla_nest_end(skb, data); 713 } 714 return 0; 715 716 err_cancel_data: 717 nla_nest_cancel(skb, data); 718 return err; 719 } 720 721 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 722 { 723 struct nlattr *linkinfo; 724 int err = -EMSGSIZE; 725 726 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 727 if (linkinfo == NULL) 728 goto out; 729 730 err = rtnl_link_info_fill(skb, dev); 731 if (err < 0) 732 goto err_cancel_link; 733 734 err = rtnl_link_slave_info_fill(skb, dev); 735 if (err < 0) 736 goto err_cancel_link; 737 738 nla_nest_end(skb, linkinfo); 739 return 0; 740 741 err_cancel_link: 742 nla_nest_cancel(skb, linkinfo); 743 out: 744 return err; 745 } 746 747 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 748 { 749 struct sock *rtnl = net->rtnl; 750 751 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); 752 } 753 754 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 755 { 756 struct sock *rtnl = net->rtnl; 757 758 return nlmsg_unicast(rtnl, skb, pid); 759 } 760 EXPORT_SYMBOL(rtnl_unicast); 761 762 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 763 struct nlmsghdr *nlh, gfp_t flags) 764 { 765 struct sock *rtnl = net->rtnl; 766 767 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); 768 } 769 EXPORT_SYMBOL(rtnl_notify); 770 771 void rtnl_set_sk_err(struct net *net, u32 group, int error) 772 { 773 struct sock *rtnl = net->rtnl; 774 775 netlink_set_err(rtnl, 0, group, error); 776 } 777 EXPORT_SYMBOL(rtnl_set_sk_err); 778 779 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 780 { 781 struct nlattr *mx; 782 int i, valid = 0; 783 784 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 785 if (metrics == dst_default_metrics.metrics) 786 return 0; 787 788 mx = nla_nest_start_noflag(skb, RTA_METRICS); 789 if (mx == NULL) 790 return -ENOBUFS; 791 792 for (i = 0; i < RTAX_MAX; i++) { 793 if (metrics[i]) { 794 if (i == RTAX_CC_ALGO - 1) { 795 char tmp[TCP_CA_NAME_MAX], *name; 796 797 name = tcp_ca_get_name_by_key(metrics[i], tmp); 798 if (!name) 799 continue; 800 if (nla_put_string(skb, i + 1, name)) 801 goto nla_put_failure; 802 } else if (i == RTAX_FEATURES - 1) { 803 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 804 805 if (!user_features) 806 continue; 807 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 808 if (nla_put_u32(skb, i + 1, user_features)) 809 goto nla_put_failure; 810 } else { 811 if (nla_put_u32(skb, i + 1, metrics[i])) 812 goto nla_put_failure; 813 } 814 valid++; 815 } 816 } 817 818 if (!valid) { 819 nla_nest_cancel(skb, mx); 820 return 0; 821 } 822 823 return nla_nest_end(skb, mx); 824 825 nla_put_failure: 826 nla_nest_cancel(skb, mx); 827 return -EMSGSIZE; 828 } 829 EXPORT_SYMBOL(rtnetlink_put_metrics); 830 831 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 832 long expires, u32 error) 833 { 834 struct rta_cacheinfo ci = { 835 .rta_error = error, 836 .rta_id = id, 837 }; 838 839 if (dst) { 840 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 841 ci.rta_used = dst->__use; 842 ci.rta_clntref = atomic_read(&dst->__refcnt); 843 } 844 if (expires) { 845 unsigned long clock; 846 847 clock = jiffies_to_clock_t(abs(expires)); 848 clock = min_t(unsigned long, clock, INT_MAX); 849 ci.rta_expires = (expires > 0) ? clock : -clock; 850 } 851 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 852 } 853 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 854 855 static void set_operstate(struct net_device *dev, unsigned char transition) 856 { 857 unsigned char operstate = dev->operstate; 858 859 switch (transition) { 860 case IF_OPER_UP: 861 if ((operstate == IF_OPER_DORMANT || 862 operstate == IF_OPER_TESTING || 863 operstate == IF_OPER_UNKNOWN) && 864 !netif_dormant(dev) && !netif_testing(dev)) 865 operstate = IF_OPER_UP; 866 break; 867 868 case IF_OPER_TESTING: 869 if (operstate == IF_OPER_UP || 870 operstate == IF_OPER_UNKNOWN) 871 operstate = IF_OPER_TESTING; 872 break; 873 874 case IF_OPER_DORMANT: 875 if (operstate == IF_OPER_UP || 876 operstate == IF_OPER_UNKNOWN) 877 operstate = IF_OPER_DORMANT; 878 break; 879 } 880 881 if (dev->operstate != operstate) { 882 write_lock(&dev_base_lock); 883 dev->operstate = operstate; 884 write_unlock(&dev_base_lock); 885 netdev_state_change(dev); 886 } 887 } 888 889 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 890 { 891 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 892 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 893 } 894 895 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 896 const struct ifinfomsg *ifm) 897 { 898 unsigned int flags = ifm->ifi_flags; 899 900 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 901 if (ifm->ifi_change) 902 flags = (flags & ifm->ifi_change) | 903 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 904 905 return flags; 906 } 907 908 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 909 const struct rtnl_link_stats64 *b) 910 { 911 a->rx_packets = b->rx_packets; 912 a->tx_packets = b->tx_packets; 913 a->rx_bytes = b->rx_bytes; 914 a->tx_bytes = b->tx_bytes; 915 a->rx_errors = b->rx_errors; 916 a->tx_errors = b->tx_errors; 917 a->rx_dropped = b->rx_dropped; 918 a->tx_dropped = b->tx_dropped; 919 920 a->multicast = b->multicast; 921 a->collisions = b->collisions; 922 923 a->rx_length_errors = b->rx_length_errors; 924 a->rx_over_errors = b->rx_over_errors; 925 a->rx_crc_errors = b->rx_crc_errors; 926 a->rx_frame_errors = b->rx_frame_errors; 927 a->rx_fifo_errors = b->rx_fifo_errors; 928 a->rx_missed_errors = b->rx_missed_errors; 929 930 a->tx_aborted_errors = b->tx_aborted_errors; 931 a->tx_carrier_errors = b->tx_carrier_errors; 932 a->tx_fifo_errors = b->tx_fifo_errors; 933 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 934 a->tx_window_errors = b->tx_window_errors; 935 936 a->rx_compressed = b->rx_compressed; 937 a->tx_compressed = b->tx_compressed; 938 939 a->rx_nohandler = b->rx_nohandler; 940 } 941 942 /* All VF info */ 943 static inline int rtnl_vfinfo_size(const struct net_device *dev, 944 u32 ext_filter_mask) 945 { 946 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 947 int num_vfs = dev_num_vf(dev->dev.parent); 948 size_t size = nla_total_size(0); 949 size += num_vfs * 950 (nla_total_size(0) + 951 nla_total_size(sizeof(struct ifla_vf_mac)) + 952 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 953 nla_total_size(sizeof(struct ifla_vf_vlan)) + 954 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 955 nla_total_size(MAX_VLAN_LIST_LEN * 956 sizeof(struct ifla_vf_vlan_info)) + 957 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 958 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 959 nla_total_size(sizeof(struct ifla_vf_rate)) + 960 nla_total_size(sizeof(struct ifla_vf_link_state)) + 961 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 962 nla_total_size(0) + /* nest IFLA_VF_STATS */ 963 /* IFLA_VF_STATS_RX_PACKETS */ 964 nla_total_size_64bit(sizeof(__u64)) + 965 /* IFLA_VF_STATS_TX_PACKETS */ 966 nla_total_size_64bit(sizeof(__u64)) + 967 /* IFLA_VF_STATS_RX_BYTES */ 968 nla_total_size_64bit(sizeof(__u64)) + 969 /* IFLA_VF_STATS_TX_BYTES */ 970 nla_total_size_64bit(sizeof(__u64)) + 971 /* IFLA_VF_STATS_BROADCAST */ 972 nla_total_size_64bit(sizeof(__u64)) + 973 /* IFLA_VF_STATS_MULTICAST */ 974 nla_total_size_64bit(sizeof(__u64)) + 975 /* IFLA_VF_STATS_RX_DROPPED */ 976 nla_total_size_64bit(sizeof(__u64)) + 977 /* IFLA_VF_STATS_TX_DROPPED */ 978 nla_total_size_64bit(sizeof(__u64)) + 979 nla_total_size(sizeof(struct ifla_vf_trust))); 980 return size; 981 } else 982 return 0; 983 } 984 985 static size_t rtnl_port_size(const struct net_device *dev, 986 u32 ext_filter_mask) 987 { 988 size_t port_size = nla_total_size(4) /* PORT_VF */ 989 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 990 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 991 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 992 + nla_total_size(1) /* PROT_VDP_REQUEST */ 993 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 994 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 995 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 996 + port_size; 997 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 998 + port_size; 999 1000 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1001 !(ext_filter_mask & RTEXT_FILTER_VF)) 1002 return 0; 1003 if (dev_num_vf(dev->dev.parent)) 1004 return port_self_size + vf_ports_size + 1005 vf_port_size * dev_num_vf(dev->dev.parent); 1006 else 1007 return port_self_size; 1008 } 1009 1010 static size_t rtnl_xdp_size(void) 1011 { 1012 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 1013 nla_total_size(1) + /* XDP_ATTACHED */ 1014 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 1015 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 1016 1017 return xdp_size; 1018 } 1019 1020 static size_t rtnl_prop_list_size(const struct net_device *dev) 1021 { 1022 struct netdev_name_node *name_node; 1023 size_t size; 1024 1025 if (list_empty(&dev->name_node->list)) 1026 return 0; 1027 size = nla_total_size(0); 1028 list_for_each_entry(name_node, &dev->name_node->list, list) 1029 size += nla_total_size(ALTIFNAMSIZ); 1030 return size; 1031 } 1032 1033 static size_t rtnl_proto_down_size(const struct net_device *dev) 1034 { 1035 size_t size = nla_total_size(1); 1036 1037 if (dev->proto_down_reason) 1038 size += nla_total_size(0) + nla_total_size(4); 1039 1040 return size; 1041 } 1042 1043 static noinline size_t if_nlmsg_size(const struct net_device *dev, 1044 u32 ext_filter_mask) 1045 { 1046 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1047 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1048 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1049 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1050 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1051 + nla_total_size(sizeof(struct rtnl_link_stats)) 1052 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1053 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1054 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1055 + nla_total_size(4) /* IFLA_TXQLEN */ 1056 + nla_total_size(4) /* IFLA_WEIGHT */ 1057 + nla_total_size(4) /* IFLA_MTU */ 1058 + nla_total_size(4) /* IFLA_LINK */ 1059 + nla_total_size(4) /* IFLA_MASTER */ 1060 + nla_total_size(1) /* IFLA_CARRIER */ 1061 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1062 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1063 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1064 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1065 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1066 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ 1067 + nla_total_size(1) /* IFLA_OPERSTATE */ 1068 + nla_total_size(1) /* IFLA_LINKMODE */ 1069 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1070 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1071 + nla_total_size(4) /* IFLA_GROUP */ 1072 + nla_total_size(ext_filter_mask 1073 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1074 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1075 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1076 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1077 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1078 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1079 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1080 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1081 + rtnl_xdp_size() /* IFLA_XDP */ 1082 + nla_total_size(4) /* IFLA_EVENT */ 1083 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1084 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1085 + rtnl_proto_down_size(dev) /* proto down */ 1086 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1087 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1088 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1089 + nla_total_size(4) /* IFLA_MIN_MTU */ 1090 + nla_total_size(4) /* IFLA_MAX_MTU */ 1091 + rtnl_prop_list_size(dev) 1092 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ 1093 + 0; 1094 } 1095 1096 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1097 { 1098 struct nlattr *vf_ports; 1099 struct nlattr *vf_port; 1100 int vf; 1101 int err; 1102 1103 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1104 if (!vf_ports) 1105 return -EMSGSIZE; 1106 1107 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1108 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1109 if (!vf_port) 1110 goto nla_put_failure; 1111 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1112 goto nla_put_failure; 1113 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1114 if (err == -EMSGSIZE) 1115 goto nla_put_failure; 1116 if (err) { 1117 nla_nest_cancel(skb, vf_port); 1118 continue; 1119 } 1120 nla_nest_end(skb, vf_port); 1121 } 1122 1123 nla_nest_end(skb, vf_ports); 1124 1125 return 0; 1126 1127 nla_put_failure: 1128 nla_nest_cancel(skb, vf_ports); 1129 return -EMSGSIZE; 1130 } 1131 1132 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1133 { 1134 struct nlattr *port_self; 1135 int err; 1136 1137 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1138 if (!port_self) 1139 return -EMSGSIZE; 1140 1141 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1142 if (err) { 1143 nla_nest_cancel(skb, port_self); 1144 return (err == -EMSGSIZE) ? err : 0; 1145 } 1146 1147 nla_nest_end(skb, port_self); 1148 1149 return 0; 1150 } 1151 1152 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1153 u32 ext_filter_mask) 1154 { 1155 int err; 1156 1157 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1158 !(ext_filter_mask & RTEXT_FILTER_VF)) 1159 return 0; 1160 1161 err = rtnl_port_self_fill(skb, dev); 1162 if (err) 1163 return err; 1164 1165 if (dev_num_vf(dev->dev.parent)) { 1166 err = rtnl_vf_ports_fill(skb, dev); 1167 if (err) 1168 return err; 1169 } 1170 1171 return 0; 1172 } 1173 1174 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1175 { 1176 int err; 1177 struct netdev_phys_item_id ppid; 1178 1179 err = dev_get_phys_port_id(dev, &ppid); 1180 if (err) { 1181 if (err == -EOPNOTSUPP) 1182 return 0; 1183 return err; 1184 } 1185 1186 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1187 return -EMSGSIZE; 1188 1189 return 0; 1190 } 1191 1192 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1193 { 1194 char name[IFNAMSIZ]; 1195 int err; 1196 1197 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1198 if (err) { 1199 if (err == -EOPNOTSUPP) 1200 return 0; 1201 return err; 1202 } 1203 1204 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1205 return -EMSGSIZE; 1206 1207 return 0; 1208 } 1209 1210 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1211 { 1212 struct netdev_phys_item_id ppid = { }; 1213 int err; 1214 1215 err = dev_get_port_parent_id(dev, &ppid, false); 1216 if (err) { 1217 if (err == -EOPNOTSUPP) 1218 return 0; 1219 return err; 1220 } 1221 1222 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1223 return -EMSGSIZE; 1224 1225 return 0; 1226 } 1227 1228 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1229 struct net_device *dev) 1230 { 1231 struct rtnl_link_stats64 *sp; 1232 struct nlattr *attr; 1233 1234 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1235 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1236 if (!attr) 1237 return -EMSGSIZE; 1238 1239 sp = nla_data(attr); 1240 dev_get_stats(dev, sp); 1241 1242 attr = nla_reserve(skb, IFLA_STATS, 1243 sizeof(struct rtnl_link_stats)); 1244 if (!attr) 1245 return -EMSGSIZE; 1246 1247 copy_rtnl_link_stats(nla_data(attr), sp); 1248 1249 return 0; 1250 } 1251 1252 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1253 struct net_device *dev, 1254 int vfs_num, 1255 struct nlattr *vfinfo) 1256 { 1257 struct ifla_vf_rss_query_en vf_rss_query_en; 1258 struct nlattr *vf, *vfstats, *vfvlanlist; 1259 struct ifla_vf_link_state vf_linkstate; 1260 struct ifla_vf_vlan_info vf_vlan_info; 1261 struct ifla_vf_spoofchk vf_spoofchk; 1262 struct ifla_vf_tx_rate vf_tx_rate; 1263 struct ifla_vf_stats vf_stats; 1264 struct ifla_vf_trust vf_trust; 1265 struct ifla_vf_vlan vf_vlan; 1266 struct ifla_vf_rate vf_rate; 1267 struct ifla_vf_mac vf_mac; 1268 struct ifla_vf_broadcast vf_broadcast; 1269 struct ifla_vf_info ivi; 1270 struct ifla_vf_guid node_guid; 1271 struct ifla_vf_guid port_guid; 1272 1273 memset(&ivi, 0, sizeof(ivi)); 1274 1275 /* Not all SR-IOV capable drivers support the 1276 * spoofcheck and "RSS query enable" query. Preset to 1277 * -1 so the user space tool can detect that the driver 1278 * didn't report anything. 1279 */ 1280 ivi.spoofchk = -1; 1281 ivi.rss_query_en = -1; 1282 ivi.trusted = -1; 1283 /* The default value for VF link state is "auto" 1284 * IFLA_VF_LINK_STATE_AUTO which equals zero 1285 */ 1286 ivi.linkstate = 0; 1287 /* VLAN Protocol by default is 802.1Q */ 1288 ivi.vlan_proto = htons(ETH_P_8021Q); 1289 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1290 return 0; 1291 1292 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1293 memset(&node_guid, 0, sizeof(node_guid)); 1294 memset(&port_guid, 0, sizeof(port_guid)); 1295 1296 vf_mac.vf = 1297 vf_vlan.vf = 1298 vf_vlan_info.vf = 1299 vf_rate.vf = 1300 vf_tx_rate.vf = 1301 vf_spoofchk.vf = 1302 vf_linkstate.vf = 1303 vf_rss_query_en.vf = 1304 vf_trust.vf = 1305 node_guid.vf = 1306 port_guid.vf = ivi.vf; 1307 1308 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1309 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1310 vf_vlan.vlan = ivi.vlan; 1311 vf_vlan.qos = ivi.qos; 1312 vf_vlan_info.vlan = ivi.vlan; 1313 vf_vlan_info.qos = ivi.qos; 1314 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1315 vf_tx_rate.rate = ivi.max_tx_rate; 1316 vf_rate.min_tx_rate = ivi.min_tx_rate; 1317 vf_rate.max_tx_rate = ivi.max_tx_rate; 1318 vf_spoofchk.setting = ivi.spoofchk; 1319 vf_linkstate.link_state = ivi.linkstate; 1320 vf_rss_query_en.setting = ivi.rss_query_en; 1321 vf_trust.setting = ivi.trusted; 1322 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1323 if (!vf) 1324 goto nla_put_vfinfo_failure; 1325 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1326 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1327 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1328 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1329 &vf_rate) || 1330 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1331 &vf_tx_rate) || 1332 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1333 &vf_spoofchk) || 1334 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1335 &vf_linkstate) || 1336 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1337 sizeof(vf_rss_query_en), 1338 &vf_rss_query_en) || 1339 nla_put(skb, IFLA_VF_TRUST, 1340 sizeof(vf_trust), &vf_trust)) 1341 goto nla_put_vf_failure; 1342 1343 if (dev->netdev_ops->ndo_get_vf_guid && 1344 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1345 &port_guid)) { 1346 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1347 &node_guid) || 1348 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1349 &port_guid)) 1350 goto nla_put_vf_failure; 1351 } 1352 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1353 if (!vfvlanlist) 1354 goto nla_put_vf_failure; 1355 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1356 &vf_vlan_info)) { 1357 nla_nest_cancel(skb, vfvlanlist); 1358 goto nla_put_vf_failure; 1359 } 1360 nla_nest_end(skb, vfvlanlist); 1361 memset(&vf_stats, 0, sizeof(vf_stats)); 1362 if (dev->netdev_ops->ndo_get_vf_stats) 1363 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1364 &vf_stats); 1365 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1366 if (!vfstats) 1367 goto nla_put_vf_failure; 1368 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1369 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1370 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1371 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1372 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1373 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1374 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1375 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1376 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1377 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1378 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1379 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1380 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1381 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1382 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1383 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1384 nla_nest_cancel(skb, vfstats); 1385 goto nla_put_vf_failure; 1386 } 1387 nla_nest_end(skb, vfstats); 1388 nla_nest_end(skb, vf); 1389 return 0; 1390 1391 nla_put_vf_failure: 1392 nla_nest_cancel(skb, vf); 1393 nla_put_vfinfo_failure: 1394 nla_nest_cancel(skb, vfinfo); 1395 return -EMSGSIZE; 1396 } 1397 1398 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1399 struct net_device *dev, 1400 u32 ext_filter_mask) 1401 { 1402 struct nlattr *vfinfo; 1403 int i, num_vfs; 1404 1405 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1406 return 0; 1407 1408 num_vfs = dev_num_vf(dev->dev.parent); 1409 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1410 return -EMSGSIZE; 1411 1412 if (!dev->netdev_ops->ndo_get_vf_config) 1413 return 0; 1414 1415 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1416 if (!vfinfo) 1417 return -EMSGSIZE; 1418 1419 for (i = 0; i < num_vfs; i++) { 1420 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo)) 1421 return -EMSGSIZE; 1422 } 1423 1424 nla_nest_end(skb, vfinfo); 1425 return 0; 1426 } 1427 1428 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1429 { 1430 struct rtnl_link_ifmap map; 1431 1432 memset(&map, 0, sizeof(map)); 1433 map.mem_start = dev->mem_start; 1434 map.mem_end = dev->mem_end; 1435 map.base_addr = dev->base_addr; 1436 map.irq = dev->irq; 1437 map.dma = dev->dma; 1438 map.port = dev->if_port; 1439 1440 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1441 return -EMSGSIZE; 1442 1443 return 0; 1444 } 1445 1446 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1447 { 1448 const struct bpf_prog *generic_xdp_prog; 1449 1450 ASSERT_RTNL(); 1451 1452 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1453 if (!generic_xdp_prog) 1454 return 0; 1455 return generic_xdp_prog->aux->id; 1456 } 1457 1458 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1459 { 1460 return dev_xdp_prog_id(dev, XDP_MODE_DRV); 1461 } 1462 1463 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1464 { 1465 return dev_xdp_prog_id(dev, XDP_MODE_HW); 1466 } 1467 1468 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1469 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1470 u32 (*get_prog_id)(struct net_device *dev)) 1471 { 1472 u32 curr_id; 1473 int err; 1474 1475 curr_id = get_prog_id(dev); 1476 if (!curr_id) 1477 return 0; 1478 1479 *prog_id = curr_id; 1480 err = nla_put_u32(skb, attr, curr_id); 1481 if (err) 1482 return err; 1483 1484 if (*mode != XDP_ATTACHED_NONE) 1485 *mode = XDP_ATTACHED_MULTI; 1486 else 1487 *mode = tgt_mode; 1488 1489 return 0; 1490 } 1491 1492 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1493 { 1494 struct nlattr *xdp; 1495 u32 prog_id; 1496 int err; 1497 u8 mode; 1498 1499 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1500 if (!xdp) 1501 return -EMSGSIZE; 1502 1503 prog_id = 0; 1504 mode = XDP_ATTACHED_NONE; 1505 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1506 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1507 if (err) 1508 goto err_cancel; 1509 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1510 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1511 if (err) 1512 goto err_cancel; 1513 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1514 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1515 if (err) 1516 goto err_cancel; 1517 1518 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1519 if (err) 1520 goto err_cancel; 1521 1522 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1523 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1524 if (err) 1525 goto err_cancel; 1526 } 1527 1528 nla_nest_end(skb, xdp); 1529 return 0; 1530 1531 err_cancel: 1532 nla_nest_cancel(skb, xdp); 1533 return err; 1534 } 1535 1536 static u32 rtnl_get_event(unsigned long event) 1537 { 1538 u32 rtnl_event_type = IFLA_EVENT_NONE; 1539 1540 switch (event) { 1541 case NETDEV_REBOOT: 1542 rtnl_event_type = IFLA_EVENT_REBOOT; 1543 break; 1544 case NETDEV_FEAT_CHANGE: 1545 rtnl_event_type = IFLA_EVENT_FEATURES; 1546 break; 1547 case NETDEV_BONDING_FAILOVER: 1548 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1549 break; 1550 case NETDEV_NOTIFY_PEERS: 1551 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1552 break; 1553 case NETDEV_RESEND_IGMP: 1554 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1555 break; 1556 case NETDEV_CHANGEINFODATA: 1557 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1558 break; 1559 default: 1560 break; 1561 } 1562 1563 return rtnl_event_type; 1564 } 1565 1566 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1567 { 1568 const struct net_device *upper_dev; 1569 int ret = 0; 1570 1571 rcu_read_lock(); 1572 1573 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1574 if (upper_dev) 1575 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1576 1577 rcu_read_unlock(); 1578 return ret; 1579 } 1580 1581 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1582 bool force) 1583 { 1584 int ifindex = dev_get_iflink(dev); 1585 1586 if (force || dev->ifindex != ifindex) 1587 return nla_put_u32(skb, IFLA_LINK, ifindex); 1588 1589 return 0; 1590 } 1591 1592 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1593 struct net_device *dev) 1594 { 1595 char buf[IFALIASZ]; 1596 int ret; 1597 1598 ret = dev_get_alias(dev, buf, sizeof(buf)); 1599 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1600 } 1601 1602 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1603 const struct net_device *dev, 1604 struct net *src_net, gfp_t gfp) 1605 { 1606 bool put_iflink = false; 1607 1608 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1609 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1610 1611 if (!net_eq(dev_net(dev), link_net)) { 1612 int id = peernet2id_alloc(src_net, link_net, gfp); 1613 1614 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1615 return -EMSGSIZE; 1616 1617 put_iflink = true; 1618 } 1619 } 1620 1621 return nla_put_iflink(skb, dev, put_iflink); 1622 } 1623 1624 static int rtnl_fill_link_af(struct sk_buff *skb, 1625 const struct net_device *dev, 1626 u32 ext_filter_mask) 1627 { 1628 const struct rtnl_af_ops *af_ops; 1629 struct nlattr *af_spec; 1630 1631 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1632 if (!af_spec) 1633 return -EMSGSIZE; 1634 1635 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1636 struct nlattr *af; 1637 int err; 1638 1639 if (!af_ops->fill_link_af) 1640 continue; 1641 1642 af = nla_nest_start_noflag(skb, af_ops->family); 1643 if (!af) 1644 return -EMSGSIZE; 1645 1646 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1647 /* 1648 * Caller may return ENODATA to indicate that there 1649 * was no data to be dumped. This is not an error, it 1650 * means we should trim the attribute header and 1651 * continue. 1652 */ 1653 if (err == -ENODATA) 1654 nla_nest_cancel(skb, af); 1655 else if (err < 0) 1656 return -EMSGSIZE; 1657 1658 nla_nest_end(skb, af); 1659 } 1660 1661 nla_nest_end(skb, af_spec); 1662 return 0; 1663 } 1664 1665 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1666 const struct net_device *dev) 1667 { 1668 struct netdev_name_node *name_node; 1669 int count = 0; 1670 1671 list_for_each_entry(name_node, &dev->name_node->list, list) { 1672 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1673 return -EMSGSIZE; 1674 count++; 1675 } 1676 return count; 1677 } 1678 1679 static int rtnl_fill_prop_list(struct sk_buff *skb, 1680 const struct net_device *dev) 1681 { 1682 struct nlattr *prop_list; 1683 int ret; 1684 1685 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1686 if (!prop_list) 1687 return -EMSGSIZE; 1688 1689 ret = rtnl_fill_alt_ifnames(skb, dev); 1690 if (ret <= 0) 1691 goto nest_cancel; 1692 1693 nla_nest_end(skb, prop_list); 1694 return 0; 1695 1696 nest_cancel: 1697 nla_nest_cancel(skb, prop_list); 1698 return ret; 1699 } 1700 1701 static int rtnl_fill_proto_down(struct sk_buff *skb, 1702 const struct net_device *dev) 1703 { 1704 struct nlattr *pr; 1705 u32 preason; 1706 1707 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1708 goto nla_put_failure; 1709 1710 preason = dev->proto_down_reason; 1711 if (!preason) 1712 return 0; 1713 1714 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); 1715 if (!pr) 1716 return -EMSGSIZE; 1717 1718 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { 1719 nla_nest_cancel(skb, pr); 1720 goto nla_put_failure; 1721 } 1722 1723 nla_nest_end(skb, pr); 1724 return 0; 1725 1726 nla_put_failure: 1727 return -EMSGSIZE; 1728 } 1729 1730 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1731 struct net_device *dev, struct net *src_net, 1732 int type, u32 pid, u32 seq, u32 change, 1733 unsigned int flags, u32 ext_filter_mask, 1734 u32 event, int *new_nsid, int new_ifindex, 1735 int tgt_netnsid, gfp_t gfp) 1736 { 1737 struct ifinfomsg *ifm; 1738 struct nlmsghdr *nlh; 1739 struct Qdisc *qdisc; 1740 1741 ASSERT_RTNL(); 1742 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1743 if (nlh == NULL) 1744 return -EMSGSIZE; 1745 1746 ifm = nlmsg_data(nlh); 1747 ifm->ifi_family = AF_UNSPEC; 1748 ifm->__ifi_pad = 0; 1749 ifm->ifi_type = dev->type; 1750 ifm->ifi_index = dev->ifindex; 1751 ifm->ifi_flags = dev_get_flags(dev); 1752 ifm->ifi_change = change; 1753 1754 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1755 goto nla_put_failure; 1756 1757 qdisc = rtnl_dereference(dev->qdisc); 1758 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1759 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1760 nla_put_u8(skb, IFLA_OPERSTATE, 1761 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1762 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1763 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1764 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || 1765 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || 1766 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1767 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1768 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1769 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1770 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1771 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) || 1772 #ifdef CONFIG_RPS 1773 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1774 #endif 1775 put_master_ifindex(skb, dev) || 1776 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1777 (qdisc && 1778 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || 1779 nla_put_ifalias(skb, dev) || 1780 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1781 atomic_read(&dev->carrier_up_count) + 1782 atomic_read(&dev->carrier_down_count)) || 1783 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1784 atomic_read(&dev->carrier_up_count)) || 1785 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1786 atomic_read(&dev->carrier_down_count))) 1787 goto nla_put_failure; 1788 1789 if (rtnl_fill_proto_down(skb, dev)) 1790 goto nla_put_failure; 1791 1792 if (event != IFLA_EVENT_NONE) { 1793 if (nla_put_u32(skb, IFLA_EVENT, event)) 1794 goto nla_put_failure; 1795 } 1796 1797 if (rtnl_fill_link_ifmap(skb, dev)) 1798 goto nla_put_failure; 1799 1800 if (dev->addr_len) { 1801 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1802 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1803 goto nla_put_failure; 1804 } 1805 1806 if (rtnl_phys_port_id_fill(skb, dev)) 1807 goto nla_put_failure; 1808 1809 if (rtnl_phys_port_name_fill(skb, dev)) 1810 goto nla_put_failure; 1811 1812 if (rtnl_phys_switch_id_fill(skb, dev)) 1813 goto nla_put_failure; 1814 1815 if (rtnl_fill_stats(skb, dev)) 1816 goto nla_put_failure; 1817 1818 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1819 goto nla_put_failure; 1820 1821 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1822 goto nla_put_failure; 1823 1824 if (rtnl_xdp_fill(skb, dev)) 1825 goto nla_put_failure; 1826 1827 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1828 if (rtnl_link_fill(skb, dev) < 0) 1829 goto nla_put_failure; 1830 } 1831 1832 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp)) 1833 goto nla_put_failure; 1834 1835 if (new_nsid && 1836 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1837 goto nla_put_failure; 1838 if (new_ifindex && 1839 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1840 goto nla_put_failure; 1841 1842 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) && 1843 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr)) 1844 goto nla_put_failure; 1845 1846 rcu_read_lock(); 1847 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1848 goto nla_put_failure_rcu; 1849 rcu_read_unlock(); 1850 1851 if (rtnl_fill_prop_list(skb, dev)) 1852 goto nla_put_failure; 1853 1854 if (dev->dev.parent && 1855 nla_put_string(skb, IFLA_PARENT_DEV_NAME, 1856 dev_name(dev->dev.parent))) 1857 goto nla_put_failure; 1858 1859 if (dev->dev.parent && dev->dev.parent->bus && 1860 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, 1861 dev->dev.parent->bus->name)) 1862 goto nla_put_failure; 1863 1864 nlmsg_end(skb, nlh); 1865 return 0; 1866 1867 nla_put_failure_rcu: 1868 rcu_read_unlock(); 1869 nla_put_failure: 1870 nlmsg_cancel(skb, nlh); 1871 return -EMSGSIZE; 1872 } 1873 1874 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1875 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1876 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1877 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1878 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1879 [IFLA_MTU] = { .type = NLA_U32 }, 1880 [IFLA_LINK] = { .type = NLA_U32 }, 1881 [IFLA_MASTER] = { .type = NLA_U32 }, 1882 [IFLA_CARRIER] = { .type = NLA_U8 }, 1883 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1884 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1885 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1886 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1887 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1888 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1889 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1890 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1891 * allow 0-length string (needed to remove an alias). 1892 */ 1893 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1894 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1895 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1896 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1897 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1898 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1899 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1900 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1901 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1902 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1903 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 1904 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1905 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1906 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1907 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1908 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1909 [IFLA_XDP] = { .type = NLA_NESTED }, 1910 [IFLA_EVENT] = { .type = NLA_U32 }, 1911 [IFLA_GROUP] = { .type = NLA_U32 }, 1912 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 1913 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 1914 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 1915 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 1916 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 1917 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 1918 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 1919 .len = ALTIFNAMSIZ - 1 }, 1920 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, 1921 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, 1922 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 1923 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, 1924 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, 1925 }; 1926 1927 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1928 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 1929 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 1930 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 1931 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1932 }; 1933 1934 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1935 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1936 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 1937 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1938 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 1939 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 1940 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 1941 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 1942 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 1943 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 1944 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 1945 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 1946 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1947 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1948 }; 1949 1950 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 1951 [IFLA_PORT_VF] = { .type = NLA_U32 }, 1952 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 1953 .len = PORT_PROFILE_MAX }, 1954 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 1955 .len = PORT_UUID_MAX }, 1956 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 1957 .len = PORT_UUID_MAX }, 1958 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 1959 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 1960 1961 /* Unused, but we need to keep it here since user space could 1962 * fill it. It's also broken with regard to NLA_BINARY use in 1963 * combination with structs. 1964 */ 1965 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 1966 .len = sizeof(struct ifla_port_vsi) }, 1967 }; 1968 1969 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 1970 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, 1971 [IFLA_XDP_FD] = { .type = NLA_S32 }, 1972 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, 1973 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 1974 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 1975 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 1976 }; 1977 1978 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 1979 { 1980 const struct rtnl_link_ops *ops = NULL; 1981 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 1982 1983 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 1984 return NULL; 1985 1986 if (linfo[IFLA_INFO_KIND]) { 1987 char kind[MODULE_NAME_LEN]; 1988 1989 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 1990 ops = rtnl_link_ops_get(kind); 1991 } 1992 1993 return ops; 1994 } 1995 1996 static bool link_master_filtered(struct net_device *dev, int master_idx) 1997 { 1998 struct net_device *master; 1999 2000 if (!master_idx) 2001 return false; 2002 2003 master = netdev_master_upper_dev_get(dev); 2004 2005 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need 2006 * another invalid value for ifindex to denote "no master". 2007 */ 2008 if (master_idx == -1) 2009 return !!master; 2010 2011 if (!master || master->ifindex != master_idx) 2012 return true; 2013 2014 return false; 2015 } 2016 2017 static bool link_kind_filtered(const struct net_device *dev, 2018 const struct rtnl_link_ops *kind_ops) 2019 { 2020 if (kind_ops && dev->rtnl_link_ops != kind_ops) 2021 return true; 2022 2023 return false; 2024 } 2025 2026 static bool link_dump_filtered(struct net_device *dev, 2027 int master_idx, 2028 const struct rtnl_link_ops *kind_ops) 2029 { 2030 if (link_master_filtered(dev, master_idx) || 2031 link_kind_filtered(dev, kind_ops)) 2032 return true; 2033 2034 return false; 2035 } 2036 2037 /** 2038 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 2039 * @sk: netlink socket 2040 * @netnsid: network namespace identifier 2041 * 2042 * Returns the network namespace identified by netnsid on success or an error 2043 * pointer on failure. 2044 */ 2045 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 2046 { 2047 struct net *net; 2048 2049 net = get_net_ns_by_id(sock_net(sk), netnsid); 2050 if (!net) 2051 return ERR_PTR(-EINVAL); 2052 2053 /* For now, the caller is required to have CAP_NET_ADMIN in 2054 * the user namespace owning the target net ns. 2055 */ 2056 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 2057 put_net(net); 2058 return ERR_PTR(-EACCES); 2059 } 2060 return net; 2061 } 2062 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 2063 2064 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 2065 bool strict_check, struct nlattr **tb, 2066 struct netlink_ext_ack *extack) 2067 { 2068 int hdrlen; 2069 2070 if (strict_check) { 2071 struct ifinfomsg *ifm; 2072 2073 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2074 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 2075 return -EINVAL; 2076 } 2077 2078 ifm = nlmsg_data(nlh); 2079 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2080 ifm->ifi_change) { 2081 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 2082 return -EINVAL; 2083 } 2084 if (ifm->ifi_index) { 2085 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 2086 return -EINVAL; 2087 } 2088 2089 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 2090 IFLA_MAX, ifla_policy, 2091 extack); 2092 } 2093 2094 /* A hack to preserve kernel<->userspace interface. 2095 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 2096 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 2097 * what iproute2 < v3.9.0 used. 2098 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 2099 * attribute, its netlink message is shorter than struct ifinfomsg. 2100 */ 2101 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 2102 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 2103 2104 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 2105 extack); 2106 } 2107 2108 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 2109 { 2110 struct netlink_ext_ack *extack = cb->extack; 2111 const struct nlmsghdr *nlh = cb->nlh; 2112 struct net *net = sock_net(skb->sk); 2113 struct net *tgt_net = net; 2114 int h, s_h; 2115 int idx = 0, s_idx; 2116 struct net_device *dev; 2117 struct hlist_head *head; 2118 struct nlattr *tb[IFLA_MAX+1]; 2119 u32 ext_filter_mask = 0; 2120 const struct rtnl_link_ops *kind_ops = NULL; 2121 unsigned int flags = NLM_F_MULTI; 2122 int master_idx = 0; 2123 int netnsid = -1; 2124 int err, i; 2125 2126 s_h = cb->args[0]; 2127 s_idx = cb->args[1]; 2128 2129 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2130 if (err < 0) { 2131 if (cb->strict_check) 2132 return err; 2133 2134 goto walk_entries; 2135 } 2136 2137 for (i = 0; i <= IFLA_MAX; ++i) { 2138 if (!tb[i]) 2139 continue; 2140 2141 /* new attributes should only be added with strict checking */ 2142 switch (i) { 2143 case IFLA_TARGET_NETNSID: 2144 netnsid = nla_get_s32(tb[i]); 2145 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2146 if (IS_ERR(tgt_net)) { 2147 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2148 return PTR_ERR(tgt_net); 2149 } 2150 break; 2151 case IFLA_EXT_MASK: 2152 ext_filter_mask = nla_get_u32(tb[i]); 2153 break; 2154 case IFLA_MASTER: 2155 master_idx = nla_get_u32(tb[i]); 2156 break; 2157 case IFLA_LINKINFO: 2158 kind_ops = linkinfo_to_kind_ops(tb[i]); 2159 break; 2160 default: 2161 if (cb->strict_check) { 2162 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2163 return -EINVAL; 2164 } 2165 } 2166 } 2167 2168 if (master_idx || kind_ops) 2169 flags |= NLM_F_DUMP_FILTERED; 2170 2171 walk_entries: 2172 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 2173 idx = 0; 2174 head = &tgt_net->dev_index_head[h]; 2175 hlist_for_each_entry(dev, head, index_hlist) { 2176 if (link_dump_filtered(dev, master_idx, kind_ops)) 2177 goto cont; 2178 if (idx < s_idx) 2179 goto cont; 2180 err = rtnl_fill_ifinfo(skb, dev, net, 2181 RTM_NEWLINK, 2182 NETLINK_CB(cb->skb).portid, 2183 nlh->nlmsg_seq, 0, flags, 2184 ext_filter_mask, 0, NULL, 0, 2185 netnsid, GFP_KERNEL); 2186 2187 if (err < 0) { 2188 if (likely(skb->len)) 2189 goto out; 2190 2191 goto out_err; 2192 } 2193 cont: 2194 idx++; 2195 } 2196 } 2197 out: 2198 err = skb->len; 2199 out_err: 2200 cb->args[1] = idx; 2201 cb->args[0] = h; 2202 cb->seq = tgt_net->dev_base_seq; 2203 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2204 if (netnsid >= 0) 2205 put_net(tgt_net); 2206 2207 return err; 2208 } 2209 2210 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 2211 struct netlink_ext_ack *exterr) 2212 { 2213 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, 2214 exterr); 2215 } 2216 EXPORT_SYMBOL(rtnl_nla_parse_ifla); 2217 2218 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2219 { 2220 struct net *net; 2221 /* Examine the link attributes and figure out which 2222 * network namespace we are talking about. 2223 */ 2224 if (tb[IFLA_NET_NS_PID]) 2225 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2226 else if (tb[IFLA_NET_NS_FD]) 2227 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2228 else 2229 net = get_net(src_net); 2230 return net; 2231 } 2232 EXPORT_SYMBOL(rtnl_link_get_net); 2233 2234 /* Figure out which network namespace we are talking about by 2235 * examining the link attributes in the following order: 2236 * 2237 * 1. IFLA_NET_NS_PID 2238 * 2. IFLA_NET_NS_FD 2239 * 3. IFLA_TARGET_NETNSID 2240 */ 2241 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2242 struct nlattr *tb[]) 2243 { 2244 struct net *net; 2245 2246 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2247 return rtnl_link_get_net(src_net, tb); 2248 2249 if (!tb[IFLA_TARGET_NETNSID]) 2250 return get_net(src_net); 2251 2252 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2253 if (!net) 2254 return ERR_PTR(-EINVAL); 2255 2256 return net; 2257 } 2258 2259 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2260 struct net *src_net, 2261 struct nlattr *tb[], int cap) 2262 { 2263 struct net *net; 2264 2265 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2266 if (IS_ERR(net)) 2267 return net; 2268 2269 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2270 put_net(net); 2271 return ERR_PTR(-EPERM); 2272 } 2273 2274 return net; 2275 } 2276 2277 /* Verify that rtnetlink requests do not pass additional properties 2278 * potentially referring to different network namespaces. 2279 */ 2280 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2281 struct netlink_ext_ack *extack, 2282 bool netns_id_only) 2283 { 2284 2285 if (netns_id_only) { 2286 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2287 return 0; 2288 2289 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2290 return -EOPNOTSUPP; 2291 } 2292 2293 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2294 goto invalid_attr; 2295 2296 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2297 goto invalid_attr; 2298 2299 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2300 goto invalid_attr; 2301 2302 return 0; 2303 2304 invalid_attr: 2305 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2306 return -EINVAL; 2307 } 2308 2309 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], 2310 struct netlink_ext_ack *extack) 2311 { 2312 if (dev) { 2313 if (tb[IFLA_ADDRESS] && 2314 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2315 return -EINVAL; 2316 2317 if (tb[IFLA_BROADCAST] && 2318 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2319 return -EINVAL; 2320 } 2321 2322 if (tb[IFLA_AF_SPEC]) { 2323 struct nlattr *af; 2324 int rem, err; 2325 2326 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2327 const struct rtnl_af_ops *af_ops; 2328 2329 af_ops = rtnl_af_lookup(nla_type(af)); 2330 if (!af_ops) 2331 return -EAFNOSUPPORT; 2332 2333 if (!af_ops->set_link_af) 2334 return -EOPNOTSUPP; 2335 2336 if (af_ops->validate_link_af) { 2337 err = af_ops->validate_link_af(dev, af, extack); 2338 if (err < 0) 2339 return err; 2340 } 2341 } 2342 } 2343 2344 if (tb[IFLA_GRO_MAX_SIZE]) { 2345 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 2346 2347 if (gro_max_size > GRO_MAX_SIZE) { 2348 NL_SET_ERR_MSG(extack, "too big gro_max_size"); 2349 return -EINVAL; 2350 } 2351 } 2352 return 0; 2353 } 2354 2355 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2356 int guid_type) 2357 { 2358 const struct net_device_ops *ops = dev->netdev_ops; 2359 2360 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2361 } 2362 2363 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2364 { 2365 if (dev->type != ARPHRD_INFINIBAND) 2366 return -EOPNOTSUPP; 2367 2368 return handle_infiniband_guid(dev, ivt, guid_type); 2369 } 2370 2371 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2372 { 2373 const struct net_device_ops *ops = dev->netdev_ops; 2374 int err = -EINVAL; 2375 2376 if (tb[IFLA_VF_MAC]) { 2377 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2378 2379 if (ivm->vf >= INT_MAX) 2380 return -EINVAL; 2381 err = -EOPNOTSUPP; 2382 if (ops->ndo_set_vf_mac) 2383 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2384 ivm->mac); 2385 if (err < 0) 2386 return err; 2387 } 2388 2389 if (tb[IFLA_VF_VLAN]) { 2390 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2391 2392 if (ivv->vf >= INT_MAX) 2393 return -EINVAL; 2394 err = -EOPNOTSUPP; 2395 if (ops->ndo_set_vf_vlan) 2396 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2397 ivv->qos, 2398 htons(ETH_P_8021Q)); 2399 if (err < 0) 2400 return err; 2401 } 2402 2403 if (tb[IFLA_VF_VLAN_LIST]) { 2404 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2405 struct nlattr *attr; 2406 int rem, len = 0; 2407 2408 err = -EOPNOTSUPP; 2409 if (!ops->ndo_set_vf_vlan) 2410 return err; 2411 2412 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2413 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2414 nla_len(attr) < NLA_HDRLEN) { 2415 return -EINVAL; 2416 } 2417 if (len >= MAX_VLAN_LIST_LEN) 2418 return -EOPNOTSUPP; 2419 ivvl[len] = nla_data(attr); 2420 2421 len++; 2422 } 2423 if (len == 0) 2424 return -EINVAL; 2425 2426 if (ivvl[0]->vf >= INT_MAX) 2427 return -EINVAL; 2428 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2429 ivvl[0]->qos, ivvl[0]->vlan_proto); 2430 if (err < 0) 2431 return err; 2432 } 2433 2434 if (tb[IFLA_VF_TX_RATE]) { 2435 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2436 struct ifla_vf_info ivf; 2437 2438 if (ivt->vf >= INT_MAX) 2439 return -EINVAL; 2440 err = -EOPNOTSUPP; 2441 if (ops->ndo_get_vf_config) 2442 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2443 if (err < 0) 2444 return err; 2445 2446 err = -EOPNOTSUPP; 2447 if (ops->ndo_set_vf_rate) 2448 err = ops->ndo_set_vf_rate(dev, ivt->vf, 2449 ivf.min_tx_rate, 2450 ivt->rate); 2451 if (err < 0) 2452 return err; 2453 } 2454 2455 if (tb[IFLA_VF_RATE]) { 2456 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2457 2458 if (ivt->vf >= INT_MAX) 2459 return -EINVAL; 2460 err = -EOPNOTSUPP; 2461 if (ops->ndo_set_vf_rate) 2462 err = ops->ndo_set_vf_rate(dev, ivt->vf, 2463 ivt->min_tx_rate, 2464 ivt->max_tx_rate); 2465 if (err < 0) 2466 return err; 2467 } 2468 2469 if (tb[IFLA_VF_SPOOFCHK]) { 2470 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2471 2472 if (ivs->vf >= INT_MAX) 2473 return -EINVAL; 2474 err = -EOPNOTSUPP; 2475 if (ops->ndo_set_vf_spoofchk) 2476 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2477 ivs->setting); 2478 if (err < 0) 2479 return err; 2480 } 2481 2482 if (tb[IFLA_VF_LINK_STATE]) { 2483 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2484 2485 if (ivl->vf >= INT_MAX) 2486 return -EINVAL; 2487 err = -EOPNOTSUPP; 2488 if (ops->ndo_set_vf_link_state) 2489 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2490 ivl->link_state); 2491 if (err < 0) 2492 return err; 2493 } 2494 2495 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2496 struct ifla_vf_rss_query_en *ivrssq_en; 2497 2498 err = -EOPNOTSUPP; 2499 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2500 if (ivrssq_en->vf >= INT_MAX) 2501 return -EINVAL; 2502 if (ops->ndo_set_vf_rss_query_en) 2503 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2504 ivrssq_en->setting); 2505 if (err < 0) 2506 return err; 2507 } 2508 2509 if (tb[IFLA_VF_TRUST]) { 2510 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2511 2512 if (ivt->vf >= INT_MAX) 2513 return -EINVAL; 2514 err = -EOPNOTSUPP; 2515 if (ops->ndo_set_vf_trust) 2516 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2517 if (err < 0) 2518 return err; 2519 } 2520 2521 if (tb[IFLA_VF_IB_NODE_GUID]) { 2522 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2523 2524 if (ivt->vf >= INT_MAX) 2525 return -EINVAL; 2526 if (!ops->ndo_set_vf_guid) 2527 return -EOPNOTSUPP; 2528 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2529 } 2530 2531 if (tb[IFLA_VF_IB_PORT_GUID]) { 2532 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2533 2534 if (ivt->vf >= INT_MAX) 2535 return -EINVAL; 2536 if (!ops->ndo_set_vf_guid) 2537 return -EOPNOTSUPP; 2538 2539 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2540 } 2541 2542 return err; 2543 } 2544 2545 static int do_set_master(struct net_device *dev, int ifindex, 2546 struct netlink_ext_ack *extack) 2547 { 2548 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2549 const struct net_device_ops *ops; 2550 int err; 2551 2552 if (upper_dev) { 2553 if (upper_dev->ifindex == ifindex) 2554 return 0; 2555 ops = upper_dev->netdev_ops; 2556 if (ops->ndo_del_slave) { 2557 err = ops->ndo_del_slave(upper_dev, dev); 2558 if (err) 2559 return err; 2560 } else { 2561 return -EOPNOTSUPP; 2562 } 2563 } 2564 2565 if (ifindex) { 2566 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2567 if (!upper_dev) 2568 return -EINVAL; 2569 ops = upper_dev->netdev_ops; 2570 if (ops->ndo_add_slave) { 2571 err = ops->ndo_add_slave(upper_dev, dev, extack); 2572 if (err) 2573 return err; 2574 } else { 2575 return -EOPNOTSUPP; 2576 } 2577 } 2578 return 0; 2579 } 2580 2581 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { 2582 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, 2583 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, 2584 }; 2585 2586 static int do_set_proto_down(struct net_device *dev, 2587 struct nlattr *nl_proto_down, 2588 struct nlattr *nl_proto_down_reason, 2589 struct netlink_ext_ack *extack) 2590 { 2591 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; 2592 unsigned long mask = 0; 2593 u32 value; 2594 bool proto_down; 2595 int err; 2596 2597 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) { 2598 NL_SET_ERR_MSG(extack, "Protodown not supported by device"); 2599 return -EOPNOTSUPP; 2600 } 2601 2602 if (nl_proto_down_reason) { 2603 err = nla_parse_nested_deprecated(pdreason, 2604 IFLA_PROTO_DOWN_REASON_MAX, 2605 nl_proto_down_reason, 2606 ifla_proto_down_reason_policy, 2607 NULL); 2608 if (err < 0) 2609 return err; 2610 2611 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { 2612 NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); 2613 return -EINVAL; 2614 } 2615 2616 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); 2617 2618 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) 2619 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); 2620 2621 dev_change_proto_down_reason(dev, mask, value); 2622 } 2623 2624 if (nl_proto_down) { 2625 proto_down = nla_get_u8(nl_proto_down); 2626 2627 /* Don't turn off protodown if there are active reasons */ 2628 if (!proto_down && dev->proto_down_reason) { 2629 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); 2630 return -EBUSY; 2631 } 2632 err = dev_change_proto_down(dev, 2633 proto_down); 2634 if (err) 2635 return err; 2636 } 2637 2638 return 0; 2639 } 2640 2641 #define DO_SETLINK_MODIFIED 0x01 2642 /* notify flag means notify + modified. */ 2643 #define DO_SETLINK_NOTIFY 0x03 2644 static int do_setlink(const struct sk_buff *skb, 2645 struct net_device *dev, struct ifinfomsg *ifm, 2646 struct netlink_ext_ack *extack, 2647 struct nlattr **tb, char *ifname, int status) 2648 { 2649 const struct net_device_ops *ops = dev->netdev_ops; 2650 int err; 2651 2652 err = validate_linkmsg(dev, tb, extack); 2653 if (err < 0) 2654 return err; 2655 2656 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2657 const char *pat = ifname && ifname[0] ? ifname : NULL; 2658 struct net *net; 2659 int new_ifindex; 2660 2661 net = rtnl_link_get_net_capable(skb, dev_net(dev), 2662 tb, CAP_NET_ADMIN); 2663 if (IS_ERR(net)) { 2664 err = PTR_ERR(net); 2665 goto errout; 2666 } 2667 2668 if (tb[IFLA_NEW_IFINDEX]) 2669 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); 2670 else 2671 new_ifindex = 0; 2672 2673 err = __dev_change_net_namespace(dev, net, pat, new_ifindex); 2674 put_net(net); 2675 if (err) 2676 goto errout; 2677 status |= DO_SETLINK_MODIFIED; 2678 } 2679 2680 if (tb[IFLA_MAP]) { 2681 struct rtnl_link_ifmap *u_map; 2682 struct ifmap k_map; 2683 2684 if (!ops->ndo_set_config) { 2685 err = -EOPNOTSUPP; 2686 goto errout; 2687 } 2688 2689 if (!netif_device_present(dev)) { 2690 err = -ENODEV; 2691 goto errout; 2692 } 2693 2694 u_map = nla_data(tb[IFLA_MAP]); 2695 k_map.mem_start = (unsigned long) u_map->mem_start; 2696 k_map.mem_end = (unsigned long) u_map->mem_end; 2697 k_map.base_addr = (unsigned short) u_map->base_addr; 2698 k_map.irq = (unsigned char) u_map->irq; 2699 k_map.dma = (unsigned char) u_map->dma; 2700 k_map.port = (unsigned char) u_map->port; 2701 2702 err = ops->ndo_set_config(dev, &k_map); 2703 if (err < 0) 2704 goto errout; 2705 2706 status |= DO_SETLINK_NOTIFY; 2707 } 2708 2709 if (tb[IFLA_ADDRESS]) { 2710 struct sockaddr *sa; 2711 int len; 2712 2713 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2714 sizeof(*sa)); 2715 sa = kmalloc(len, GFP_KERNEL); 2716 if (!sa) { 2717 err = -ENOMEM; 2718 goto errout; 2719 } 2720 sa->sa_family = dev->type; 2721 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2722 dev->addr_len); 2723 err = dev_set_mac_address_user(dev, sa, extack); 2724 kfree(sa); 2725 if (err) 2726 goto errout; 2727 status |= DO_SETLINK_MODIFIED; 2728 } 2729 2730 if (tb[IFLA_MTU]) { 2731 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2732 if (err < 0) 2733 goto errout; 2734 status |= DO_SETLINK_MODIFIED; 2735 } 2736 2737 if (tb[IFLA_GROUP]) { 2738 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2739 status |= DO_SETLINK_NOTIFY; 2740 } 2741 2742 /* 2743 * Interface selected by interface index but interface 2744 * name provided implies that a name change has been 2745 * requested. 2746 */ 2747 if (ifm->ifi_index > 0 && ifname[0]) { 2748 err = dev_change_name(dev, ifname); 2749 if (err < 0) 2750 goto errout; 2751 status |= DO_SETLINK_MODIFIED; 2752 } 2753 2754 if (tb[IFLA_IFALIAS]) { 2755 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2756 nla_len(tb[IFLA_IFALIAS])); 2757 if (err < 0) 2758 goto errout; 2759 status |= DO_SETLINK_NOTIFY; 2760 } 2761 2762 if (tb[IFLA_BROADCAST]) { 2763 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2764 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2765 } 2766 2767 if (ifm->ifi_flags || ifm->ifi_change) { 2768 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2769 extack); 2770 if (err < 0) 2771 goto errout; 2772 } 2773 2774 if (tb[IFLA_MASTER]) { 2775 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2776 if (err) 2777 goto errout; 2778 status |= DO_SETLINK_MODIFIED; 2779 } 2780 2781 if (tb[IFLA_CARRIER]) { 2782 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2783 if (err) 2784 goto errout; 2785 status |= DO_SETLINK_MODIFIED; 2786 } 2787 2788 if (tb[IFLA_TXQLEN]) { 2789 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2790 2791 err = dev_change_tx_queue_len(dev, value); 2792 if (err) 2793 goto errout; 2794 status |= DO_SETLINK_MODIFIED; 2795 } 2796 2797 if (tb[IFLA_GSO_MAX_SIZE]) { 2798 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2799 2800 if (max_size > GSO_MAX_SIZE) { 2801 err = -EINVAL; 2802 goto errout; 2803 } 2804 2805 if (dev->gso_max_size ^ max_size) { 2806 netif_set_gso_max_size(dev, max_size); 2807 status |= DO_SETLINK_MODIFIED; 2808 } 2809 } 2810 2811 if (tb[IFLA_GSO_MAX_SEGS]) { 2812 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2813 2814 if (max_segs > GSO_MAX_SEGS) { 2815 err = -EINVAL; 2816 goto errout; 2817 } 2818 2819 if (dev->gso_max_segs ^ max_segs) { 2820 netif_set_gso_max_segs(dev, max_segs); 2821 status |= DO_SETLINK_MODIFIED; 2822 } 2823 } 2824 2825 if (tb[IFLA_GRO_MAX_SIZE]) { 2826 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 2827 2828 if (dev->gro_max_size ^ gro_max_size) { 2829 netif_set_gro_max_size(dev, gro_max_size); 2830 status |= DO_SETLINK_MODIFIED; 2831 } 2832 } 2833 2834 if (tb[IFLA_OPERSTATE]) 2835 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2836 2837 if (tb[IFLA_LINKMODE]) { 2838 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2839 2840 write_lock(&dev_base_lock); 2841 if (dev->link_mode ^ value) 2842 status |= DO_SETLINK_NOTIFY; 2843 dev->link_mode = value; 2844 write_unlock(&dev_base_lock); 2845 } 2846 2847 if (tb[IFLA_VFINFO_LIST]) { 2848 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2849 struct nlattr *attr; 2850 int rem; 2851 2852 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2853 if (nla_type(attr) != IFLA_VF_INFO || 2854 nla_len(attr) < NLA_HDRLEN) { 2855 err = -EINVAL; 2856 goto errout; 2857 } 2858 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 2859 attr, 2860 ifla_vf_policy, 2861 NULL); 2862 if (err < 0) 2863 goto errout; 2864 err = do_setvfinfo(dev, vfinfo); 2865 if (err < 0) 2866 goto errout; 2867 status |= DO_SETLINK_NOTIFY; 2868 } 2869 } 2870 err = 0; 2871 2872 if (tb[IFLA_VF_PORTS]) { 2873 struct nlattr *port[IFLA_PORT_MAX+1]; 2874 struct nlattr *attr; 2875 int vf; 2876 int rem; 2877 2878 err = -EOPNOTSUPP; 2879 if (!ops->ndo_set_vf_port) 2880 goto errout; 2881 2882 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 2883 if (nla_type(attr) != IFLA_VF_PORT || 2884 nla_len(attr) < NLA_HDRLEN) { 2885 err = -EINVAL; 2886 goto errout; 2887 } 2888 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 2889 attr, 2890 ifla_port_policy, 2891 NULL); 2892 if (err < 0) 2893 goto errout; 2894 if (!port[IFLA_PORT_VF]) { 2895 err = -EOPNOTSUPP; 2896 goto errout; 2897 } 2898 vf = nla_get_u32(port[IFLA_PORT_VF]); 2899 err = ops->ndo_set_vf_port(dev, vf, port); 2900 if (err < 0) 2901 goto errout; 2902 status |= DO_SETLINK_NOTIFY; 2903 } 2904 } 2905 err = 0; 2906 2907 if (tb[IFLA_PORT_SELF]) { 2908 struct nlattr *port[IFLA_PORT_MAX+1]; 2909 2910 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 2911 tb[IFLA_PORT_SELF], 2912 ifla_port_policy, NULL); 2913 if (err < 0) 2914 goto errout; 2915 2916 err = -EOPNOTSUPP; 2917 if (ops->ndo_set_vf_port) 2918 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 2919 if (err < 0) 2920 goto errout; 2921 status |= DO_SETLINK_NOTIFY; 2922 } 2923 2924 if (tb[IFLA_AF_SPEC]) { 2925 struct nlattr *af; 2926 int rem; 2927 2928 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2929 const struct rtnl_af_ops *af_ops; 2930 2931 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 2932 2933 err = af_ops->set_link_af(dev, af, extack); 2934 if (err < 0) 2935 goto errout; 2936 2937 status |= DO_SETLINK_NOTIFY; 2938 } 2939 } 2940 err = 0; 2941 2942 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { 2943 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], 2944 tb[IFLA_PROTO_DOWN_REASON], extack); 2945 if (err) 2946 goto errout; 2947 status |= DO_SETLINK_NOTIFY; 2948 } 2949 2950 if (tb[IFLA_XDP]) { 2951 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 2952 u32 xdp_flags = 0; 2953 2954 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 2955 tb[IFLA_XDP], 2956 ifla_xdp_policy, NULL); 2957 if (err < 0) 2958 goto errout; 2959 2960 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 2961 err = -EINVAL; 2962 goto errout; 2963 } 2964 2965 if (xdp[IFLA_XDP_FLAGS]) { 2966 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 2967 if (xdp_flags & ~XDP_FLAGS_MASK) { 2968 err = -EINVAL; 2969 goto errout; 2970 } 2971 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 2972 err = -EINVAL; 2973 goto errout; 2974 } 2975 } 2976 2977 if (xdp[IFLA_XDP_FD]) { 2978 int expected_fd = -1; 2979 2980 if (xdp_flags & XDP_FLAGS_REPLACE) { 2981 if (!xdp[IFLA_XDP_EXPECTED_FD]) { 2982 err = -EINVAL; 2983 goto errout; 2984 } 2985 expected_fd = 2986 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]); 2987 } 2988 2989 err = dev_change_xdp_fd(dev, extack, 2990 nla_get_s32(xdp[IFLA_XDP_FD]), 2991 expected_fd, 2992 xdp_flags); 2993 if (err) 2994 goto errout; 2995 status |= DO_SETLINK_NOTIFY; 2996 } 2997 } 2998 2999 errout: 3000 if (status & DO_SETLINK_MODIFIED) { 3001 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 3002 netdev_state_change(dev); 3003 3004 if (err < 0) 3005 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 3006 dev->name); 3007 } 3008 3009 return err; 3010 } 3011 3012 static struct net_device *rtnl_dev_get(struct net *net, 3013 struct nlattr *ifname_attr, 3014 struct nlattr *altifname_attr, 3015 char *ifname) 3016 { 3017 char buffer[ALTIFNAMSIZ]; 3018 3019 if (!ifname) { 3020 ifname = buffer; 3021 if (ifname_attr) 3022 nla_strscpy(ifname, ifname_attr, IFNAMSIZ); 3023 else if (altifname_attr) 3024 nla_strscpy(ifname, altifname_attr, ALTIFNAMSIZ); 3025 else 3026 return NULL; 3027 } 3028 3029 return __dev_get_by_name(net, ifname); 3030 } 3031 3032 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3033 struct netlink_ext_ack *extack) 3034 { 3035 struct net *net = sock_net(skb->sk); 3036 struct ifinfomsg *ifm; 3037 struct net_device *dev; 3038 int err; 3039 struct nlattr *tb[IFLA_MAX+1]; 3040 char ifname[IFNAMSIZ]; 3041 3042 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3043 ifla_policy, extack); 3044 if (err < 0) 3045 goto errout; 3046 3047 err = rtnl_ensure_unique_netns(tb, extack, false); 3048 if (err < 0) 3049 goto errout; 3050 3051 if (tb[IFLA_IFNAME]) 3052 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3053 else 3054 ifname[0] = '\0'; 3055 3056 err = -EINVAL; 3057 ifm = nlmsg_data(nlh); 3058 if (ifm->ifi_index > 0) 3059 dev = __dev_get_by_index(net, ifm->ifi_index); 3060 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3061 dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname); 3062 else 3063 goto errout; 3064 3065 if (dev == NULL) { 3066 err = -ENODEV; 3067 goto errout; 3068 } 3069 3070 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0); 3071 errout: 3072 return err; 3073 } 3074 3075 static int rtnl_group_dellink(const struct net *net, int group) 3076 { 3077 struct net_device *dev, *aux; 3078 LIST_HEAD(list_kill); 3079 bool found = false; 3080 3081 if (!group) 3082 return -EPERM; 3083 3084 for_each_netdev(net, dev) { 3085 if (dev->group == group) { 3086 const struct rtnl_link_ops *ops; 3087 3088 found = true; 3089 ops = dev->rtnl_link_ops; 3090 if (!ops || !ops->dellink) 3091 return -EOPNOTSUPP; 3092 } 3093 } 3094 3095 if (!found) 3096 return -ENODEV; 3097 3098 for_each_netdev_safe(net, dev, aux) { 3099 if (dev->group == group) { 3100 const struct rtnl_link_ops *ops; 3101 3102 ops = dev->rtnl_link_ops; 3103 ops->dellink(dev, &list_kill); 3104 } 3105 } 3106 unregister_netdevice_many(&list_kill); 3107 3108 return 0; 3109 } 3110 3111 int rtnl_delete_link(struct net_device *dev) 3112 { 3113 const struct rtnl_link_ops *ops; 3114 LIST_HEAD(list_kill); 3115 3116 ops = dev->rtnl_link_ops; 3117 if (!ops || !ops->dellink) 3118 return -EOPNOTSUPP; 3119 3120 ops->dellink(dev, &list_kill); 3121 unregister_netdevice_many(&list_kill); 3122 3123 return 0; 3124 } 3125 EXPORT_SYMBOL_GPL(rtnl_delete_link); 3126 3127 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 3128 struct netlink_ext_ack *extack) 3129 { 3130 struct net *net = sock_net(skb->sk); 3131 struct net *tgt_net = net; 3132 struct net_device *dev = NULL; 3133 struct ifinfomsg *ifm; 3134 struct nlattr *tb[IFLA_MAX+1]; 3135 int err; 3136 int netnsid = -1; 3137 3138 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3139 ifla_policy, extack); 3140 if (err < 0) 3141 return err; 3142 3143 err = rtnl_ensure_unique_netns(tb, extack, true); 3144 if (err < 0) 3145 return err; 3146 3147 if (tb[IFLA_TARGET_NETNSID]) { 3148 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3149 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3150 if (IS_ERR(tgt_net)) 3151 return PTR_ERR(tgt_net); 3152 } 3153 3154 err = -EINVAL; 3155 ifm = nlmsg_data(nlh); 3156 if (ifm->ifi_index > 0) 3157 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3158 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3159 dev = rtnl_dev_get(net, tb[IFLA_IFNAME], 3160 tb[IFLA_ALT_IFNAME], NULL); 3161 else if (tb[IFLA_GROUP]) 3162 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3163 else 3164 goto out; 3165 3166 if (!dev) { 3167 if (tb[IFLA_IFNAME] || ifm->ifi_index > 0) 3168 err = -ENODEV; 3169 3170 goto out; 3171 } 3172 3173 err = rtnl_delete_link(dev); 3174 3175 out: 3176 if (netnsid >= 0) 3177 put_net(tgt_net); 3178 3179 return err; 3180 } 3181 3182 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) 3183 { 3184 unsigned int old_flags; 3185 int err; 3186 3187 old_flags = dev->flags; 3188 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 3189 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3190 NULL); 3191 if (err < 0) 3192 return err; 3193 } 3194 3195 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3196 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); 3197 } else { 3198 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3199 __dev_notify_flags(dev, old_flags, ~0U); 3200 } 3201 return 0; 3202 } 3203 EXPORT_SYMBOL(rtnl_configure_link); 3204 3205 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3206 unsigned char name_assign_type, 3207 const struct rtnl_link_ops *ops, 3208 struct nlattr *tb[], 3209 struct netlink_ext_ack *extack) 3210 { 3211 struct net_device *dev; 3212 unsigned int num_tx_queues = 1; 3213 unsigned int num_rx_queues = 1; 3214 3215 if (tb[IFLA_NUM_TX_QUEUES]) 3216 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3217 else if (ops->get_num_tx_queues) 3218 num_tx_queues = ops->get_num_tx_queues(); 3219 3220 if (tb[IFLA_NUM_RX_QUEUES]) 3221 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3222 else if (ops->get_num_rx_queues) 3223 num_rx_queues = ops->get_num_rx_queues(); 3224 3225 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3226 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3227 return ERR_PTR(-EINVAL); 3228 } 3229 3230 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3231 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3232 return ERR_PTR(-EINVAL); 3233 } 3234 3235 if (ops->alloc) { 3236 dev = ops->alloc(tb, ifname, name_assign_type, 3237 num_tx_queues, num_rx_queues); 3238 if (IS_ERR(dev)) 3239 return dev; 3240 } else { 3241 dev = alloc_netdev_mqs(ops->priv_size, ifname, 3242 name_assign_type, ops->setup, 3243 num_tx_queues, num_rx_queues); 3244 } 3245 3246 if (!dev) 3247 return ERR_PTR(-ENOMEM); 3248 3249 dev_net_set(dev, net); 3250 dev->rtnl_link_ops = ops; 3251 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3252 3253 if (tb[IFLA_MTU]) { 3254 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3255 int err; 3256 3257 err = dev_validate_mtu(dev, mtu, extack); 3258 if (err) { 3259 free_netdev(dev); 3260 return ERR_PTR(err); 3261 } 3262 dev->mtu = mtu; 3263 } 3264 if (tb[IFLA_ADDRESS]) { 3265 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), 3266 nla_len(tb[IFLA_ADDRESS])); 3267 dev->addr_assign_type = NET_ADDR_SET; 3268 } 3269 if (tb[IFLA_BROADCAST]) 3270 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3271 nla_len(tb[IFLA_BROADCAST])); 3272 if (tb[IFLA_TXQLEN]) 3273 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3274 if (tb[IFLA_OPERSTATE]) 3275 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3276 if (tb[IFLA_LINKMODE]) 3277 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3278 if (tb[IFLA_GROUP]) 3279 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3280 if (tb[IFLA_GSO_MAX_SIZE]) 3281 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3282 if (tb[IFLA_GSO_MAX_SEGS]) 3283 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); 3284 if (tb[IFLA_GRO_MAX_SIZE]) 3285 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE])); 3286 3287 return dev; 3288 } 3289 EXPORT_SYMBOL(rtnl_create_link); 3290 3291 static int rtnl_group_changelink(const struct sk_buff *skb, 3292 struct net *net, int group, 3293 struct ifinfomsg *ifm, 3294 struct netlink_ext_ack *extack, 3295 struct nlattr **tb) 3296 { 3297 struct net_device *dev, *aux; 3298 int err; 3299 3300 for_each_netdev_safe(net, dev, aux) { 3301 if (dev->group == group) { 3302 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0); 3303 if (err < 0) 3304 return err; 3305 } 3306 } 3307 3308 return 0; 3309 } 3310 3311 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3312 struct nlattr **attr, struct netlink_ext_ack *extack) 3313 { 3314 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3315 unsigned char name_assign_type = NET_NAME_USER; 3316 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3317 const struct rtnl_link_ops *m_ops; 3318 struct net_device *master_dev; 3319 struct net *net = sock_net(skb->sk); 3320 const struct rtnl_link_ops *ops; 3321 struct nlattr *tb[IFLA_MAX + 1]; 3322 struct net *dest_net, *link_net; 3323 struct nlattr **slave_data; 3324 char kind[MODULE_NAME_LEN]; 3325 struct net_device *dev; 3326 struct ifinfomsg *ifm; 3327 char ifname[IFNAMSIZ]; 3328 struct nlattr **data; 3329 int err; 3330 3331 #ifdef CONFIG_MODULES 3332 replay: 3333 #endif 3334 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3335 ifla_policy, extack); 3336 if (err < 0) 3337 return err; 3338 3339 err = rtnl_ensure_unique_netns(tb, extack, false); 3340 if (err < 0) 3341 return err; 3342 3343 if (tb[IFLA_IFNAME]) 3344 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3345 else 3346 ifname[0] = '\0'; 3347 3348 ifm = nlmsg_data(nlh); 3349 if (ifm->ifi_index > 0) 3350 dev = __dev_get_by_index(net, ifm->ifi_index); 3351 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3352 dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname); 3353 else 3354 dev = NULL; 3355 3356 master_dev = NULL; 3357 m_ops = NULL; 3358 if (dev) { 3359 master_dev = netdev_master_upper_dev_get(dev); 3360 if (master_dev) 3361 m_ops = master_dev->rtnl_link_ops; 3362 } 3363 3364 err = validate_linkmsg(dev, tb, extack); 3365 if (err < 0) 3366 return err; 3367 3368 if (tb[IFLA_LINKINFO]) { 3369 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3370 tb[IFLA_LINKINFO], 3371 ifla_info_policy, NULL); 3372 if (err < 0) 3373 return err; 3374 } else 3375 memset(linkinfo, 0, sizeof(linkinfo)); 3376 3377 if (linkinfo[IFLA_INFO_KIND]) { 3378 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3379 ops = rtnl_link_ops_get(kind); 3380 } else { 3381 kind[0] = '\0'; 3382 ops = NULL; 3383 } 3384 3385 data = NULL; 3386 if (ops) { 3387 if (ops->maxtype > RTNL_MAX_TYPE) 3388 return -EINVAL; 3389 3390 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3391 err = nla_parse_nested_deprecated(attr, ops->maxtype, 3392 linkinfo[IFLA_INFO_DATA], 3393 ops->policy, extack); 3394 if (err < 0) 3395 return err; 3396 data = attr; 3397 } 3398 if (ops->validate) { 3399 err = ops->validate(tb, data, extack); 3400 if (err < 0) 3401 return err; 3402 } 3403 } 3404 3405 slave_data = NULL; 3406 if (m_ops) { 3407 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3408 return -EINVAL; 3409 3410 if (m_ops->slave_maxtype && 3411 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3412 err = nla_parse_nested_deprecated(slave_attr, 3413 m_ops->slave_maxtype, 3414 linkinfo[IFLA_INFO_SLAVE_DATA], 3415 m_ops->slave_policy, 3416 extack); 3417 if (err < 0) 3418 return err; 3419 slave_data = slave_attr; 3420 } 3421 } 3422 3423 if (dev) { 3424 int status = 0; 3425 3426 if (nlh->nlmsg_flags & NLM_F_EXCL) 3427 return -EEXIST; 3428 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3429 return -EOPNOTSUPP; 3430 3431 if (linkinfo[IFLA_INFO_DATA]) { 3432 if (!ops || ops != dev->rtnl_link_ops || 3433 !ops->changelink) 3434 return -EOPNOTSUPP; 3435 3436 err = ops->changelink(dev, tb, data, extack); 3437 if (err < 0) 3438 return err; 3439 status |= DO_SETLINK_NOTIFY; 3440 } 3441 3442 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3443 if (!m_ops || !m_ops->slave_changelink) 3444 return -EOPNOTSUPP; 3445 3446 err = m_ops->slave_changelink(master_dev, dev, tb, 3447 slave_data, extack); 3448 if (err < 0) 3449 return err; 3450 status |= DO_SETLINK_NOTIFY; 3451 } 3452 3453 return do_setlink(skb, dev, ifm, extack, tb, ifname, status); 3454 } 3455 3456 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3457 if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) 3458 return rtnl_group_changelink(skb, net, 3459 nla_get_u32(tb[IFLA_GROUP]), 3460 ifm, extack, tb); 3461 return -ENODEV; 3462 } 3463 3464 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3465 return -EOPNOTSUPP; 3466 3467 if (!ops) { 3468 #ifdef CONFIG_MODULES 3469 if (kind[0]) { 3470 __rtnl_unlock(); 3471 request_module("rtnl-link-%s", kind); 3472 rtnl_lock(); 3473 ops = rtnl_link_ops_get(kind); 3474 if (ops) 3475 goto replay; 3476 } 3477 #endif 3478 NL_SET_ERR_MSG(extack, "Unknown device type"); 3479 return -EOPNOTSUPP; 3480 } 3481 3482 if (!ops->alloc && !ops->setup) 3483 return -EOPNOTSUPP; 3484 3485 if (!ifname[0]) { 3486 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3487 name_assign_type = NET_NAME_ENUM; 3488 } 3489 3490 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3491 if (IS_ERR(dest_net)) 3492 return PTR_ERR(dest_net); 3493 3494 if (tb[IFLA_LINK_NETNSID]) { 3495 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3496 3497 link_net = get_net_ns_by_id(dest_net, id); 3498 if (!link_net) { 3499 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3500 err = -EINVAL; 3501 goto out; 3502 } 3503 err = -EPERM; 3504 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3505 goto out; 3506 } else { 3507 link_net = NULL; 3508 } 3509 3510 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3511 name_assign_type, ops, tb, extack); 3512 if (IS_ERR(dev)) { 3513 err = PTR_ERR(dev); 3514 goto out; 3515 } 3516 3517 dev->ifindex = ifm->ifi_index; 3518 3519 if (ops->newlink) 3520 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3521 else 3522 err = register_netdevice(dev); 3523 if (err < 0) { 3524 free_netdev(dev); 3525 goto out; 3526 } 3527 3528 err = rtnl_configure_link(dev, ifm); 3529 if (err < 0) 3530 goto out_unregister; 3531 if (link_net) { 3532 err = dev_change_net_namespace(dev, dest_net, ifname); 3533 if (err < 0) 3534 goto out_unregister; 3535 } 3536 if (tb[IFLA_MASTER]) { 3537 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3538 if (err) 3539 goto out_unregister; 3540 } 3541 out: 3542 if (link_net) 3543 put_net(link_net); 3544 put_net(dest_net); 3545 return err; 3546 out_unregister: 3547 if (ops->newlink) { 3548 LIST_HEAD(list_kill); 3549 3550 ops->dellink(dev, &list_kill); 3551 unregister_netdevice_many(&list_kill); 3552 } else { 3553 unregister_netdevice(dev); 3554 } 3555 goto out; 3556 } 3557 3558 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3559 struct netlink_ext_ack *extack) 3560 { 3561 struct nlattr **attr; 3562 int ret; 3563 3564 attr = kmalloc_array(RTNL_MAX_TYPE + 1, sizeof(*attr), GFP_KERNEL); 3565 if (!attr) 3566 return -ENOMEM; 3567 3568 ret = __rtnl_newlink(skb, nlh, attr, extack); 3569 kfree(attr); 3570 return ret; 3571 } 3572 3573 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3574 const struct nlmsghdr *nlh, 3575 struct nlattr **tb, 3576 struct netlink_ext_ack *extack) 3577 { 3578 struct ifinfomsg *ifm; 3579 int i, err; 3580 3581 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3582 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3583 return -EINVAL; 3584 } 3585 3586 if (!netlink_strict_get_check(skb)) 3587 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3588 ifla_policy, extack); 3589 3590 ifm = nlmsg_data(nlh); 3591 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3592 ifm->ifi_change) { 3593 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3594 return -EINVAL; 3595 } 3596 3597 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3598 ifla_policy, extack); 3599 if (err) 3600 return err; 3601 3602 for (i = 0; i <= IFLA_MAX; i++) { 3603 if (!tb[i]) 3604 continue; 3605 3606 switch (i) { 3607 case IFLA_IFNAME: 3608 case IFLA_ALT_IFNAME: 3609 case IFLA_EXT_MASK: 3610 case IFLA_TARGET_NETNSID: 3611 break; 3612 default: 3613 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3614 return -EINVAL; 3615 } 3616 } 3617 3618 return 0; 3619 } 3620 3621 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3622 struct netlink_ext_ack *extack) 3623 { 3624 struct net *net = sock_net(skb->sk); 3625 struct net *tgt_net = net; 3626 struct ifinfomsg *ifm; 3627 struct nlattr *tb[IFLA_MAX+1]; 3628 struct net_device *dev = NULL; 3629 struct sk_buff *nskb; 3630 int netnsid = -1; 3631 int err; 3632 u32 ext_filter_mask = 0; 3633 3634 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3635 if (err < 0) 3636 return err; 3637 3638 err = rtnl_ensure_unique_netns(tb, extack, true); 3639 if (err < 0) 3640 return err; 3641 3642 if (tb[IFLA_TARGET_NETNSID]) { 3643 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3644 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3645 if (IS_ERR(tgt_net)) 3646 return PTR_ERR(tgt_net); 3647 } 3648 3649 if (tb[IFLA_EXT_MASK]) 3650 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3651 3652 err = -EINVAL; 3653 ifm = nlmsg_data(nlh); 3654 if (ifm->ifi_index > 0) 3655 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3656 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3657 dev = rtnl_dev_get(tgt_net, tb[IFLA_IFNAME], 3658 tb[IFLA_ALT_IFNAME], NULL); 3659 else 3660 goto out; 3661 3662 err = -ENODEV; 3663 if (dev == NULL) 3664 goto out; 3665 3666 err = -ENOBUFS; 3667 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); 3668 if (nskb == NULL) 3669 goto out; 3670 3671 err = rtnl_fill_ifinfo(nskb, dev, net, 3672 RTM_NEWLINK, NETLINK_CB(skb).portid, 3673 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3674 0, NULL, 0, netnsid, GFP_KERNEL); 3675 if (err < 0) { 3676 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3677 WARN_ON(err == -EMSGSIZE); 3678 kfree_skb(nskb); 3679 } else 3680 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3681 out: 3682 if (netnsid >= 0) 3683 put_net(tgt_net); 3684 3685 return err; 3686 } 3687 3688 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3689 bool *changed, struct netlink_ext_ack *extack) 3690 { 3691 char *alt_ifname; 3692 size_t size; 3693 int err; 3694 3695 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3696 if (err) 3697 return err; 3698 3699 if (cmd == RTM_NEWLINKPROP) { 3700 size = rtnl_prop_list_size(dev); 3701 size += nla_total_size(ALTIFNAMSIZ); 3702 if (size >= U16_MAX) { 3703 NL_SET_ERR_MSG(extack, 3704 "effective property list too long"); 3705 return -EINVAL; 3706 } 3707 } 3708 3709 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 3710 if (!alt_ifname) 3711 return -ENOMEM; 3712 3713 if (cmd == RTM_NEWLINKPROP) { 3714 err = netdev_name_node_alt_create(dev, alt_ifname); 3715 if (!err) 3716 alt_ifname = NULL; 3717 } else if (cmd == RTM_DELLINKPROP) { 3718 err = netdev_name_node_alt_destroy(dev, alt_ifname); 3719 } else { 3720 WARN_ON_ONCE(1); 3721 err = -EINVAL; 3722 } 3723 3724 kfree(alt_ifname); 3725 if (!err) 3726 *changed = true; 3727 return err; 3728 } 3729 3730 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 3731 struct netlink_ext_ack *extack) 3732 { 3733 struct net *net = sock_net(skb->sk); 3734 struct nlattr *tb[IFLA_MAX + 1]; 3735 struct net_device *dev; 3736 struct ifinfomsg *ifm; 3737 bool changed = false; 3738 struct nlattr *attr; 3739 int err, rem; 3740 3741 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3742 if (err) 3743 return err; 3744 3745 err = rtnl_ensure_unique_netns(tb, extack, true); 3746 if (err) 3747 return err; 3748 3749 ifm = nlmsg_data(nlh); 3750 if (ifm->ifi_index > 0) 3751 dev = __dev_get_by_index(net, ifm->ifi_index); 3752 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3753 dev = rtnl_dev_get(net, tb[IFLA_IFNAME], 3754 tb[IFLA_ALT_IFNAME], NULL); 3755 else 3756 return -EINVAL; 3757 3758 if (!dev) 3759 return -ENODEV; 3760 3761 if (!tb[IFLA_PROP_LIST]) 3762 return 0; 3763 3764 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 3765 switch (nla_type(attr)) { 3766 case IFLA_ALT_IFNAME: 3767 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 3768 if (err) 3769 return err; 3770 break; 3771 } 3772 } 3773 3774 if (changed) 3775 netdev_state_change(dev); 3776 return 0; 3777 } 3778 3779 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3780 struct netlink_ext_ack *extack) 3781 { 3782 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 3783 } 3784 3785 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3786 struct netlink_ext_ack *extack) 3787 { 3788 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 3789 } 3790 3791 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3792 { 3793 struct net *net = sock_net(skb->sk); 3794 size_t min_ifinfo_dump_size = 0; 3795 struct nlattr *tb[IFLA_MAX+1]; 3796 u32 ext_filter_mask = 0; 3797 struct net_device *dev; 3798 int hdrlen; 3799 3800 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3801 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3802 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3803 3804 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3805 if (tb[IFLA_EXT_MASK]) 3806 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3807 } 3808 3809 if (!ext_filter_mask) 3810 return NLMSG_GOODSIZE; 3811 /* 3812 * traverse the list of net devices and compute the minimum 3813 * buffer size based upon the filter mask. 3814 */ 3815 rcu_read_lock(); 3816 for_each_netdev_rcu(net, dev) { 3817 min_ifinfo_dump_size = max(min_ifinfo_dump_size, 3818 if_nlmsg_size(dev, ext_filter_mask)); 3819 } 3820 rcu_read_unlock(); 3821 3822 return nlmsg_total_size(min_ifinfo_dump_size); 3823 } 3824 3825 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 3826 { 3827 int idx; 3828 int s_idx = cb->family; 3829 int type = cb->nlh->nlmsg_type - RTM_BASE; 3830 int ret = 0; 3831 3832 if (s_idx == 0) 3833 s_idx = 1; 3834 3835 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 3836 struct rtnl_link __rcu **tab; 3837 struct rtnl_link *link; 3838 rtnl_dumpit_func dumpit; 3839 3840 if (idx < s_idx || idx == PF_PACKET) 3841 continue; 3842 3843 if (type < 0 || type >= RTM_NR_MSGTYPES) 3844 continue; 3845 3846 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 3847 if (!tab) 3848 continue; 3849 3850 link = rcu_dereference_rtnl(tab[type]); 3851 if (!link) 3852 continue; 3853 3854 dumpit = link->dumpit; 3855 if (!dumpit) 3856 continue; 3857 3858 if (idx > s_idx) { 3859 memset(&cb->args[0], 0, sizeof(cb->args)); 3860 cb->prev_seq = 0; 3861 cb->seq = 0; 3862 } 3863 ret = dumpit(skb, cb); 3864 if (ret) 3865 break; 3866 } 3867 cb->family = idx; 3868 3869 return skb->len ? : ret; 3870 } 3871 3872 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 3873 unsigned int change, 3874 u32 event, gfp_t flags, int *new_nsid, 3875 int new_ifindex) 3876 { 3877 struct net *net = dev_net(dev); 3878 struct sk_buff *skb; 3879 int err = -ENOBUFS; 3880 3881 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); 3882 if (skb == NULL) 3883 goto errout; 3884 3885 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 3886 type, 0, 0, change, 0, 0, event, 3887 new_nsid, new_ifindex, -1, flags); 3888 if (err < 0) { 3889 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 3890 WARN_ON(err == -EMSGSIZE); 3891 kfree_skb(skb); 3892 goto errout; 3893 } 3894 return skb; 3895 errout: 3896 if (err < 0) 3897 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 3898 return NULL; 3899 } 3900 3901 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags) 3902 { 3903 struct net *net = dev_net(dev); 3904 3905 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags); 3906 } 3907 3908 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 3909 unsigned int change, u32 event, 3910 gfp_t flags, int *new_nsid, int new_ifindex) 3911 { 3912 struct sk_buff *skb; 3913 3914 if (dev->reg_state != NETREG_REGISTERED) 3915 return; 3916 3917 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 3918 new_ifindex); 3919 if (skb) 3920 rtmsg_ifinfo_send(skb, dev, flags); 3921 } 3922 3923 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 3924 gfp_t flags) 3925 { 3926 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 3927 NULL, 0); 3928 } 3929 3930 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 3931 gfp_t flags, int *new_nsid, int new_ifindex) 3932 { 3933 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 3934 new_nsid, new_ifindex); 3935 } 3936 3937 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 3938 struct net_device *dev, 3939 u8 *addr, u16 vid, u32 pid, u32 seq, 3940 int type, unsigned int flags, 3941 int nlflags, u16 ndm_state) 3942 { 3943 struct nlmsghdr *nlh; 3944 struct ndmsg *ndm; 3945 3946 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 3947 if (!nlh) 3948 return -EMSGSIZE; 3949 3950 ndm = nlmsg_data(nlh); 3951 ndm->ndm_family = AF_BRIDGE; 3952 ndm->ndm_pad1 = 0; 3953 ndm->ndm_pad2 = 0; 3954 ndm->ndm_flags = flags; 3955 ndm->ndm_type = 0; 3956 ndm->ndm_ifindex = dev->ifindex; 3957 ndm->ndm_state = ndm_state; 3958 3959 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) 3960 goto nla_put_failure; 3961 if (vid) 3962 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 3963 goto nla_put_failure; 3964 3965 nlmsg_end(skb, nlh); 3966 return 0; 3967 3968 nla_put_failure: 3969 nlmsg_cancel(skb, nlh); 3970 return -EMSGSIZE; 3971 } 3972 3973 static inline size_t rtnl_fdb_nlmsg_size(void) 3974 { 3975 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 3976 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ 3977 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 3978 0; 3979 } 3980 3981 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 3982 u16 ndm_state) 3983 { 3984 struct net *net = dev_net(dev); 3985 struct sk_buff *skb; 3986 int err = -ENOBUFS; 3987 3988 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); 3989 if (!skb) 3990 goto errout; 3991 3992 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 3993 0, 0, type, NTF_SELF, 0, ndm_state); 3994 if (err < 0) { 3995 kfree_skb(skb); 3996 goto errout; 3997 } 3998 3999 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 4000 return; 4001 errout: 4002 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 4003 } 4004 4005 /* 4006 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 4007 */ 4008 int ndo_dflt_fdb_add(struct ndmsg *ndm, 4009 struct nlattr *tb[], 4010 struct net_device *dev, 4011 const unsigned char *addr, u16 vid, 4012 u16 flags) 4013 { 4014 int err = -EINVAL; 4015 4016 /* If aging addresses are supported device will need to 4017 * implement its own handler for this. 4018 */ 4019 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 4020 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4021 return err; 4022 } 4023 4024 if (vid) { 4025 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n"); 4026 return err; 4027 } 4028 4029 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4030 err = dev_uc_add_excl(dev, addr); 4031 else if (is_multicast_ether_addr(addr)) 4032 err = dev_mc_add_excl(dev, addr); 4033 4034 /* Only return duplicate errors if NLM_F_EXCL is set */ 4035 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 4036 err = 0; 4037 4038 return err; 4039 } 4040 EXPORT_SYMBOL(ndo_dflt_fdb_add); 4041 4042 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 4043 struct netlink_ext_ack *extack) 4044 { 4045 u16 vid = 0; 4046 4047 if (vlan_attr) { 4048 if (nla_len(vlan_attr) != sizeof(u16)) { 4049 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 4050 return -EINVAL; 4051 } 4052 4053 vid = nla_get_u16(vlan_attr); 4054 4055 if (!vid || vid >= VLAN_VID_MASK) { 4056 NL_SET_ERR_MSG(extack, "invalid vlan id"); 4057 return -EINVAL; 4058 } 4059 } 4060 *p_vid = vid; 4061 return 0; 4062 } 4063 4064 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 4065 struct netlink_ext_ack *extack) 4066 { 4067 struct net *net = sock_net(skb->sk); 4068 struct ndmsg *ndm; 4069 struct nlattr *tb[NDA_MAX+1]; 4070 struct net_device *dev; 4071 u8 *addr; 4072 u16 vid; 4073 int err; 4074 4075 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 4076 extack); 4077 if (err < 0) 4078 return err; 4079 4080 ndm = nlmsg_data(nlh); 4081 if (ndm->ndm_ifindex == 0) { 4082 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4083 return -EINVAL; 4084 } 4085 4086 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4087 if (dev == NULL) { 4088 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4089 return -ENODEV; 4090 } 4091 4092 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4093 NL_SET_ERR_MSG(extack, "invalid address"); 4094 return -EINVAL; 4095 } 4096 4097 if (dev->type != ARPHRD_ETHER) { 4098 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 4099 return -EINVAL; 4100 } 4101 4102 addr = nla_data(tb[NDA_LLADDR]); 4103 4104 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4105 if (err) 4106 return err; 4107 4108 err = -EOPNOTSUPP; 4109 4110 /* Support fdb on master device the net/bridge default case */ 4111 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4112 netif_is_bridge_port(dev)) { 4113 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4114 const struct net_device_ops *ops = br_dev->netdev_ops; 4115 4116 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 4117 nlh->nlmsg_flags, extack); 4118 if (err) 4119 goto out; 4120 else 4121 ndm->ndm_flags &= ~NTF_MASTER; 4122 } 4123 4124 /* Embedded bridge, macvlan, and any other device support */ 4125 if ((ndm->ndm_flags & NTF_SELF)) { 4126 if (dev->netdev_ops->ndo_fdb_add) 4127 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 4128 vid, 4129 nlh->nlmsg_flags, 4130 extack); 4131 else 4132 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 4133 nlh->nlmsg_flags); 4134 4135 if (!err) { 4136 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 4137 ndm->ndm_state); 4138 ndm->ndm_flags &= ~NTF_SELF; 4139 } 4140 } 4141 out: 4142 return err; 4143 } 4144 4145 /* 4146 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 4147 */ 4148 int ndo_dflt_fdb_del(struct ndmsg *ndm, 4149 struct nlattr *tb[], 4150 struct net_device *dev, 4151 const unsigned char *addr, u16 vid) 4152 { 4153 int err = -EINVAL; 4154 4155 /* If aging addresses are supported device will need to 4156 * implement its own handler for this. 4157 */ 4158 if (!(ndm->ndm_state & NUD_PERMANENT)) { 4159 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4160 return err; 4161 } 4162 4163 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4164 err = dev_uc_del(dev, addr); 4165 else if (is_multicast_ether_addr(addr)) 4166 err = dev_mc_del(dev, addr); 4167 4168 return err; 4169 } 4170 EXPORT_SYMBOL(ndo_dflt_fdb_del); 4171 4172 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = { 4173 [NDA_VLAN] = { .type = NLA_U16 }, 4174 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 4175 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, 4176 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, 4177 }; 4178 4179 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 4180 struct netlink_ext_ack *extack) 4181 { 4182 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 4183 struct net *net = sock_net(skb->sk); 4184 const struct net_device_ops *ops; 4185 struct ndmsg *ndm; 4186 struct nlattr *tb[NDA_MAX+1]; 4187 struct net_device *dev; 4188 __u8 *addr = NULL; 4189 int err; 4190 u16 vid; 4191 4192 if (!netlink_capable(skb, CAP_NET_ADMIN)) 4193 return -EPERM; 4194 4195 if (!del_bulk) { 4196 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4197 NULL, extack); 4198 } else { 4199 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, 4200 fdb_del_bulk_policy, extack); 4201 } 4202 if (err < 0) 4203 return err; 4204 4205 ndm = nlmsg_data(nlh); 4206 if (ndm->ndm_ifindex == 0) { 4207 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4208 return -EINVAL; 4209 } 4210 4211 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4212 if (dev == NULL) { 4213 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4214 return -ENODEV; 4215 } 4216 4217 if (!del_bulk) { 4218 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4219 NL_SET_ERR_MSG(extack, "invalid address"); 4220 return -EINVAL; 4221 } 4222 addr = nla_data(tb[NDA_LLADDR]); 4223 } 4224 4225 if (dev->type != ARPHRD_ETHER) { 4226 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 4227 return -EINVAL; 4228 } 4229 4230 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4231 if (err) 4232 return err; 4233 4234 err = -EOPNOTSUPP; 4235 4236 /* Support fdb on master device the net/bridge default case */ 4237 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4238 netif_is_bridge_port(dev)) { 4239 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4240 4241 ops = br_dev->netdev_ops; 4242 if (!del_bulk) { 4243 if (ops->ndo_fdb_del) 4244 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid); 4245 } else { 4246 if (ops->ndo_fdb_del_bulk) 4247 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4248 extack); 4249 } 4250 4251 if (err) 4252 goto out; 4253 else 4254 ndm->ndm_flags &= ~NTF_MASTER; 4255 } 4256 4257 /* Embedded bridge, macvlan, and any other device support */ 4258 if (ndm->ndm_flags & NTF_SELF) { 4259 ops = dev->netdev_ops; 4260 if (!del_bulk) { 4261 if (ops->ndo_fdb_del) 4262 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid); 4263 else 4264 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4265 } else { 4266 /* in case err was cleared by NTF_MASTER call */ 4267 err = -EOPNOTSUPP; 4268 if (ops->ndo_fdb_del_bulk) 4269 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4270 extack); 4271 } 4272 4273 if (!err) { 4274 if (!del_bulk) 4275 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4276 ndm->ndm_state); 4277 ndm->ndm_flags &= ~NTF_SELF; 4278 } 4279 } 4280 out: 4281 return err; 4282 } 4283 4284 static int nlmsg_populate_fdb(struct sk_buff *skb, 4285 struct netlink_callback *cb, 4286 struct net_device *dev, 4287 int *idx, 4288 struct netdev_hw_addr_list *list) 4289 { 4290 struct netdev_hw_addr *ha; 4291 int err; 4292 u32 portid, seq; 4293 4294 portid = NETLINK_CB(cb->skb).portid; 4295 seq = cb->nlh->nlmsg_seq; 4296 4297 list_for_each_entry(ha, &list->list, list) { 4298 if (*idx < cb->args[2]) 4299 goto skip; 4300 4301 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4302 portid, seq, 4303 RTM_NEWNEIGH, NTF_SELF, 4304 NLM_F_MULTI, NUD_PERMANENT); 4305 if (err < 0) 4306 return err; 4307 skip: 4308 *idx += 1; 4309 } 4310 return 0; 4311 } 4312 4313 /** 4314 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4315 * @skb: socket buffer to store message in 4316 * @cb: netlink callback 4317 * @dev: netdevice 4318 * @filter_dev: ignored 4319 * @idx: the number of FDB table entries dumped is added to *@idx 4320 * 4321 * Default netdevice operation to dump the existing unicast address list. 4322 * Returns number of addresses from list put in skb. 4323 */ 4324 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4325 struct netlink_callback *cb, 4326 struct net_device *dev, 4327 struct net_device *filter_dev, 4328 int *idx) 4329 { 4330 int err; 4331 4332 if (dev->type != ARPHRD_ETHER) 4333 return -EINVAL; 4334 4335 netif_addr_lock_bh(dev); 4336 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4337 if (err) 4338 goto out; 4339 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4340 out: 4341 netif_addr_unlock_bh(dev); 4342 return err; 4343 } 4344 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4345 4346 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4347 int *br_idx, int *brport_idx, 4348 struct netlink_ext_ack *extack) 4349 { 4350 struct nlattr *tb[NDA_MAX + 1]; 4351 struct ndmsg *ndm; 4352 int err, i; 4353 4354 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4355 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4356 return -EINVAL; 4357 } 4358 4359 ndm = nlmsg_data(nlh); 4360 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4361 ndm->ndm_flags || ndm->ndm_type) { 4362 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4363 return -EINVAL; 4364 } 4365 4366 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4367 NDA_MAX, NULL, extack); 4368 if (err < 0) 4369 return err; 4370 4371 *brport_idx = ndm->ndm_ifindex; 4372 for (i = 0; i <= NDA_MAX; ++i) { 4373 if (!tb[i]) 4374 continue; 4375 4376 switch (i) { 4377 case NDA_IFINDEX: 4378 if (nla_len(tb[i]) != sizeof(u32)) { 4379 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4380 return -EINVAL; 4381 } 4382 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4383 break; 4384 case NDA_MASTER: 4385 if (nla_len(tb[i]) != sizeof(u32)) { 4386 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4387 return -EINVAL; 4388 } 4389 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4390 break; 4391 default: 4392 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4393 return -EINVAL; 4394 } 4395 } 4396 4397 return 0; 4398 } 4399 4400 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4401 int *br_idx, int *brport_idx, 4402 struct netlink_ext_ack *extack) 4403 { 4404 struct nlattr *tb[IFLA_MAX+1]; 4405 int err; 4406 4407 /* A hack to preserve kernel<->userspace interface. 4408 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4409 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4410 * So, check for ndmsg with an optional u32 attribute (not used here). 4411 * Fortunately these sizes don't conflict with the size of ifinfomsg 4412 * with an optional attribute. 4413 */ 4414 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4415 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4416 nla_attr_size(sizeof(u32)))) { 4417 struct ifinfomsg *ifm; 4418 4419 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4420 tb, IFLA_MAX, ifla_policy, 4421 extack); 4422 if (err < 0) { 4423 return -EINVAL; 4424 } else if (err == 0) { 4425 if (tb[IFLA_MASTER]) 4426 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4427 } 4428 4429 ifm = nlmsg_data(nlh); 4430 *brport_idx = ifm->ifi_index; 4431 } 4432 return 0; 4433 } 4434 4435 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4436 { 4437 struct net_device *dev; 4438 struct net_device *br_dev = NULL; 4439 const struct net_device_ops *ops = NULL; 4440 const struct net_device_ops *cops = NULL; 4441 struct net *net = sock_net(skb->sk); 4442 struct hlist_head *head; 4443 int brport_idx = 0; 4444 int br_idx = 0; 4445 int h, s_h; 4446 int idx = 0, s_idx; 4447 int err = 0; 4448 int fidx = 0; 4449 4450 if (cb->strict_check) 4451 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4452 cb->extack); 4453 else 4454 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4455 cb->extack); 4456 if (err < 0) 4457 return err; 4458 4459 if (br_idx) { 4460 br_dev = __dev_get_by_index(net, br_idx); 4461 if (!br_dev) 4462 return -ENODEV; 4463 4464 ops = br_dev->netdev_ops; 4465 } 4466 4467 s_h = cb->args[0]; 4468 s_idx = cb->args[1]; 4469 4470 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4471 idx = 0; 4472 head = &net->dev_index_head[h]; 4473 hlist_for_each_entry(dev, head, index_hlist) { 4474 4475 if (brport_idx && (dev->ifindex != brport_idx)) 4476 continue; 4477 4478 if (!br_idx) { /* user did not specify a specific bridge */ 4479 if (netif_is_bridge_port(dev)) { 4480 br_dev = netdev_master_upper_dev_get(dev); 4481 cops = br_dev->netdev_ops; 4482 } 4483 } else { 4484 if (dev != br_dev && 4485 !netif_is_bridge_port(dev)) 4486 continue; 4487 4488 if (br_dev != netdev_master_upper_dev_get(dev) && 4489 !netif_is_bridge_master(dev)) 4490 continue; 4491 cops = ops; 4492 } 4493 4494 if (idx < s_idx) 4495 goto cont; 4496 4497 if (netif_is_bridge_port(dev)) { 4498 if (cops && cops->ndo_fdb_dump) { 4499 err = cops->ndo_fdb_dump(skb, cb, 4500 br_dev, dev, 4501 &fidx); 4502 if (err == -EMSGSIZE) 4503 goto out; 4504 } 4505 } 4506 4507 if (dev->netdev_ops->ndo_fdb_dump) 4508 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4509 dev, NULL, 4510 &fidx); 4511 else 4512 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4513 &fidx); 4514 if (err == -EMSGSIZE) 4515 goto out; 4516 4517 cops = NULL; 4518 4519 /* reset fdb offset to 0 for rest of the interfaces */ 4520 cb->args[2] = 0; 4521 fidx = 0; 4522 cont: 4523 idx++; 4524 } 4525 } 4526 4527 out: 4528 cb->args[0] = h; 4529 cb->args[1] = idx; 4530 cb->args[2] = fidx; 4531 4532 return skb->len; 4533 } 4534 4535 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4536 struct nlattr **tb, u8 *ndm_flags, 4537 int *br_idx, int *brport_idx, u8 **addr, 4538 u16 *vid, struct netlink_ext_ack *extack) 4539 { 4540 struct ndmsg *ndm; 4541 int err, i; 4542 4543 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4544 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4545 return -EINVAL; 4546 } 4547 4548 ndm = nlmsg_data(nlh); 4549 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4550 ndm->ndm_type) { 4551 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4552 return -EINVAL; 4553 } 4554 4555 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4556 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4557 return -EINVAL; 4558 } 4559 4560 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4561 NDA_MAX, nda_policy, extack); 4562 if (err < 0) 4563 return err; 4564 4565 *ndm_flags = ndm->ndm_flags; 4566 *brport_idx = ndm->ndm_ifindex; 4567 for (i = 0; i <= NDA_MAX; ++i) { 4568 if (!tb[i]) 4569 continue; 4570 4571 switch (i) { 4572 case NDA_MASTER: 4573 *br_idx = nla_get_u32(tb[i]); 4574 break; 4575 case NDA_LLADDR: 4576 if (nla_len(tb[i]) != ETH_ALEN) { 4577 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4578 return -EINVAL; 4579 } 4580 *addr = nla_data(tb[i]); 4581 break; 4582 case NDA_VLAN: 4583 err = fdb_vid_parse(tb[i], vid, extack); 4584 if (err) 4585 return err; 4586 break; 4587 case NDA_VNI: 4588 break; 4589 default: 4590 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4591 return -EINVAL; 4592 } 4593 } 4594 4595 return 0; 4596 } 4597 4598 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4599 struct netlink_ext_ack *extack) 4600 { 4601 struct net_device *dev = NULL, *br_dev = NULL; 4602 const struct net_device_ops *ops = NULL; 4603 struct net *net = sock_net(in_skb->sk); 4604 struct nlattr *tb[NDA_MAX + 1]; 4605 struct sk_buff *skb; 4606 int brport_idx = 0; 4607 u8 ndm_flags = 0; 4608 int br_idx = 0; 4609 u8 *addr = NULL; 4610 u16 vid = 0; 4611 int err; 4612 4613 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4614 &brport_idx, &addr, &vid, extack); 4615 if (err < 0) 4616 return err; 4617 4618 if (!addr) { 4619 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4620 return -EINVAL; 4621 } 4622 4623 if (brport_idx) { 4624 dev = __dev_get_by_index(net, brport_idx); 4625 if (!dev) { 4626 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4627 return -ENODEV; 4628 } 4629 } 4630 4631 if (br_idx) { 4632 if (dev) { 4633 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4634 return -EINVAL; 4635 } 4636 4637 br_dev = __dev_get_by_index(net, br_idx); 4638 if (!br_dev) { 4639 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4640 return -EINVAL; 4641 } 4642 ops = br_dev->netdev_ops; 4643 } 4644 4645 if (dev) { 4646 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4647 if (!netif_is_bridge_port(dev)) { 4648 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4649 return -EINVAL; 4650 } 4651 br_dev = netdev_master_upper_dev_get(dev); 4652 if (!br_dev) { 4653 NL_SET_ERR_MSG(extack, "Master of device not found"); 4654 return -EINVAL; 4655 } 4656 ops = br_dev->netdev_ops; 4657 } else { 4658 if (!(ndm_flags & NTF_SELF)) { 4659 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4660 return -EINVAL; 4661 } 4662 ops = dev->netdev_ops; 4663 } 4664 } 4665 4666 if (!br_dev && !dev) { 4667 NL_SET_ERR_MSG(extack, "No device specified"); 4668 return -ENODEV; 4669 } 4670 4671 if (!ops || !ops->ndo_fdb_get) { 4672 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4673 return -EOPNOTSUPP; 4674 } 4675 4676 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4677 if (!skb) 4678 return -ENOBUFS; 4679 4680 if (br_dev) 4681 dev = br_dev; 4682 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4683 NETLINK_CB(in_skb).portid, 4684 nlh->nlmsg_seq, extack); 4685 if (err) 4686 goto out; 4687 4688 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4689 out: 4690 kfree_skb(skb); 4691 return err; 4692 } 4693 4694 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4695 unsigned int attrnum, unsigned int flag) 4696 { 4697 if (mask & flag) 4698 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4699 return 0; 4700 } 4701 4702 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4703 struct net_device *dev, u16 mode, 4704 u32 flags, u32 mask, int nlflags, 4705 u32 filter_mask, 4706 int (*vlan_fill)(struct sk_buff *skb, 4707 struct net_device *dev, 4708 u32 filter_mask)) 4709 { 4710 struct nlmsghdr *nlh; 4711 struct ifinfomsg *ifm; 4712 struct nlattr *br_afspec; 4713 struct nlattr *protinfo; 4714 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4715 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4716 int err = 0; 4717 4718 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4719 if (nlh == NULL) 4720 return -EMSGSIZE; 4721 4722 ifm = nlmsg_data(nlh); 4723 ifm->ifi_family = AF_BRIDGE; 4724 ifm->__ifi_pad = 0; 4725 ifm->ifi_type = dev->type; 4726 ifm->ifi_index = dev->ifindex; 4727 ifm->ifi_flags = dev_get_flags(dev); 4728 ifm->ifi_change = 0; 4729 4730 4731 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4732 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4733 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4734 (br_dev && 4735 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4736 (dev->addr_len && 4737 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4738 (dev->ifindex != dev_get_iflink(dev) && 4739 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4740 goto nla_put_failure; 4741 4742 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 4743 if (!br_afspec) 4744 goto nla_put_failure; 4745 4746 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4747 nla_nest_cancel(skb, br_afspec); 4748 goto nla_put_failure; 4749 } 4750 4751 if (mode != BRIDGE_MODE_UNDEF) { 4752 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4753 nla_nest_cancel(skb, br_afspec); 4754 goto nla_put_failure; 4755 } 4756 } 4757 if (vlan_fill) { 4758 err = vlan_fill(skb, dev, filter_mask); 4759 if (err) { 4760 nla_nest_cancel(skb, br_afspec); 4761 goto nla_put_failure; 4762 } 4763 } 4764 nla_nest_end(skb, br_afspec); 4765 4766 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 4767 if (!protinfo) 4768 goto nla_put_failure; 4769 4770 if (brport_nla_put_flag(skb, flags, mask, 4771 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4772 brport_nla_put_flag(skb, flags, mask, 4773 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4774 brport_nla_put_flag(skb, flags, mask, 4775 IFLA_BRPORT_FAST_LEAVE, 4776 BR_MULTICAST_FAST_LEAVE) || 4777 brport_nla_put_flag(skb, flags, mask, 4778 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4779 brport_nla_put_flag(skb, flags, mask, 4780 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4781 brport_nla_put_flag(skb, flags, mask, 4782 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4783 brport_nla_put_flag(skb, flags, mask, 4784 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4785 brport_nla_put_flag(skb, flags, mask, 4786 IFLA_BRPORT_PROXYARP, BR_PROXYARP) || 4787 brport_nla_put_flag(skb, flags, mask, 4788 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || 4789 brport_nla_put_flag(skb, flags, mask, 4790 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { 4791 nla_nest_cancel(skb, protinfo); 4792 goto nla_put_failure; 4793 } 4794 4795 nla_nest_end(skb, protinfo); 4796 4797 nlmsg_end(skb, nlh); 4798 return 0; 4799 nla_put_failure: 4800 nlmsg_cancel(skb, nlh); 4801 return err ? err : -EMSGSIZE; 4802 } 4803 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 4804 4805 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 4806 bool strict_check, u32 *filter_mask, 4807 struct netlink_ext_ack *extack) 4808 { 4809 struct nlattr *tb[IFLA_MAX+1]; 4810 int err, i; 4811 4812 if (strict_check) { 4813 struct ifinfomsg *ifm; 4814 4815 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 4816 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 4817 return -EINVAL; 4818 } 4819 4820 ifm = nlmsg_data(nlh); 4821 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 4822 ifm->ifi_change || ifm->ifi_index) { 4823 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 4824 return -EINVAL; 4825 } 4826 4827 err = nlmsg_parse_deprecated_strict(nlh, 4828 sizeof(struct ifinfomsg), 4829 tb, IFLA_MAX, ifla_policy, 4830 extack); 4831 } else { 4832 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4833 tb, IFLA_MAX, ifla_policy, 4834 extack); 4835 } 4836 if (err < 0) 4837 return err; 4838 4839 /* new attributes should only be added with strict checking */ 4840 for (i = 0; i <= IFLA_MAX; ++i) { 4841 if (!tb[i]) 4842 continue; 4843 4844 switch (i) { 4845 case IFLA_EXT_MASK: 4846 *filter_mask = nla_get_u32(tb[i]); 4847 break; 4848 default: 4849 if (strict_check) { 4850 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 4851 return -EINVAL; 4852 } 4853 } 4854 } 4855 4856 return 0; 4857 } 4858 4859 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 4860 { 4861 const struct nlmsghdr *nlh = cb->nlh; 4862 struct net *net = sock_net(skb->sk); 4863 struct net_device *dev; 4864 int idx = 0; 4865 u32 portid = NETLINK_CB(cb->skb).portid; 4866 u32 seq = nlh->nlmsg_seq; 4867 u32 filter_mask = 0; 4868 int err; 4869 4870 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 4871 cb->extack); 4872 if (err < 0 && cb->strict_check) 4873 return err; 4874 4875 rcu_read_lock(); 4876 for_each_netdev_rcu(net, dev) { 4877 const struct net_device_ops *ops = dev->netdev_ops; 4878 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4879 4880 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 4881 if (idx >= cb->args[0]) { 4882 err = br_dev->netdev_ops->ndo_bridge_getlink( 4883 skb, portid, seq, dev, 4884 filter_mask, NLM_F_MULTI); 4885 if (err < 0 && err != -EOPNOTSUPP) { 4886 if (likely(skb->len)) 4887 break; 4888 4889 goto out_err; 4890 } 4891 } 4892 idx++; 4893 } 4894 4895 if (ops->ndo_bridge_getlink) { 4896 if (idx >= cb->args[0]) { 4897 err = ops->ndo_bridge_getlink(skb, portid, 4898 seq, dev, 4899 filter_mask, 4900 NLM_F_MULTI); 4901 if (err < 0 && err != -EOPNOTSUPP) { 4902 if (likely(skb->len)) 4903 break; 4904 4905 goto out_err; 4906 } 4907 } 4908 idx++; 4909 } 4910 } 4911 err = skb->len; 4912 out_err: 4913 rcu_read_unlock(); 4914 cb->args[0] = idx; 4915 4916 return err; 4917 } 4918 4919 static inline size_t bridge_nlmsg_size(void) 4920 { 4921 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 4922 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 4923 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 4924 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 4925 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 4926 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 4927 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 4928 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 4929 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 4930 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 4931 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 4932 } 4933 4934 static int rtnl_bridge_notify(struct net_device *dev) 4935 { 4936 struct net *net = dev_net(dev); 4937 struct sk_buff *skb; 4938 int err = -EOPNOTSUPP; 4939 4940 if (!dev->netdev_ops->ndo_bridge_getlink) 4941 return 0; 4942 4943 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 4944 if (!skb) { 4945 err = -ENOMEM; 4946 goto errout; 4947 } 4948 4949 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 4950 if (err < 0) 4951 goto errout; 4952 4953 /* Notification info is only filled for bridge ports, not the bridge 4954 * device itself. Therefore, a zero notification length is valid and 4955 * should not result in an error. 4956 */ 4957 if (!skb->len) 4958 goto errout; 4959 4960 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 4961 return 0; 4962 errout: 4963 WARN_ON(err == -EMSGSIZE); 4964 kfree_skb(skb); 4965 if (err) 4966 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4967 return err; 4968 } 4969 4970 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 4971 struct netlink_ext_ack *extack) 4972 { 4973 struct net *net = sock_net(skb->sk); 4974 struct ifinfomsg *ifm; 4975 struct net_device *dev; 4976 struct nlattr *br_spec, *attr = NULL; 4977 int rem, err = -EOPNOTSUPP; 4978 u16 flags = 0; 4979 bool have_flags = false; 4980 4981 if (nlmsg_len(nlh) < sizeof(*ifm)) 4982 return -EINVAL; 4983 4984 ifm = nlmsg_data(nlh); 4985 if (ifm->ifi_family != AF_BRIDGE) 4986 return -EPFNOSUPPORT; 4987 4988 dev = __dev_get_by_index(net, ifm->ifi_index); 4989 if (!dev) { 4990 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4991 return -ENODEV; 4992 } 4993 4994 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4995 if (br_spec) { 4996 nla_for_each_nested(attr, br_spec, rem) { 4997 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 4998 if (nla_len(attr) < sizeof(flags)) 4999 return -EINVAL; 5000 5001 have_flags = true; 5002 flags = nla_get_u16(attr); 5003 break; 5004 } 5005 } 5006 } 5007 5008 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5009 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5010 5011 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 5012 err = -EOPNOTSUPP; 5013 goto out; 5014 } 5015 5016 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 5017 extack); 5018 if (err) 5019 goto out; 5020 5021 flags &= ~BRIDGE_FLAGS_MASTER; 5022 } 5023 5024 if ((flags & BRIDGE_FLAGS_SELF)) { 5025 if (!dev->netdev_ops->ndo_bridge_setlink) 5026 err = -EOPNOTSUPP; 5027 else 5028 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 5029 flags, 5030 extack); 5031 if (!err) { 5032 flags &= ~BRIDGE_FLAGS_SELF; 5033 5034 /* Generate event to notify upper layer of bridge 5035 * change 5036 */ 5037 err = rtnl_bridge_notify(dev); 5038 } 5039 } 5040 5041 if (have_flags) 5042 memcpy(nla_data(attr), &flags, sizeof(flags)); 5043 out: 5044 return err; 5045 } 5046 5047 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 5048 struct netlink_ext_ack *extack) 5049 { 5050 struct net *net = sock_net(skb->sk); 5051 struct ifinfomsg *ifm; 5052 struct net_device *dev; 5053 struct nlattr *br_spec, *attr = NULL; 5054 int rem, err = -EOPNOTSUPP; 5055 u16 flags = 0; 5056 bool have_flags = false; 5057 5058 if (nlmsg_len(nlh) < sizeof(*ifm)) 5059 return -EINVAL; 5060 5061 ifm = nlmsg_data(nlh); 5062 if (ifm->ifi_family != AF_BRIDGE) 5063 return -EPFNOSUPPORT; 5064 5065 dev = __dev_get_by_index(net, ifm->ifi_index); 5066 if (!dev) { 5067 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5068 return -ENODEV; 5069 } 5070 5071 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5072 if (br_spec) { 5073 nla_for_each_nested(attr, br_spec, rem) { 5074 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5075 if (nla_len(attr) < sizeof(flags)) 5076 return -EINVAL; 5077 5078 have_flags = true; 5079 flags = nla_get_u16(attr); 5080 break; 5081 } 5082 } 5083 } 5084 5085 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5086 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5087 5088 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 5089 err = -EOPNOTSUPP; 5090 goto out; 5091 } 5092 5093 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 5094 if (err) 5095 goto out; 5096 5097 flags &= ~BRIDGE_FLAGS_MASTER; 5098 } 5099 5100 if ((flags & BRIDGE_FLAGS_SELF)) { 5101 if (!dev->netdev_ops->ndo_bridge_dellink) 5102 err = -EOPNOTSUPP; 5103 else 5104 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 5105 flags); 5106 5107 if (!err) { 5108 flags &= ~BRIDGE_FLAGS_SELF; 5109 5110 /* Generate event to notify upper layer of bridge 5111 * change 5112 */ 5113 err = rtnl_bridge_notify(dev); 5114 } 5115 } 5116 5117 if (have_flags) 5118 memcpy(nla_data(attr), &flags, sizeof(flags)); 5119 out: 5120 return err; 5121 } 5122 5123 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 5124 { 5125 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 5126 (!idxattr || idxattr == attrid); 5127 } 5128 5129 static bool 5130 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) 5131 { 5132 return dev->netdev_ops && 5133 dev->netdev_ops->ndo_has_offload_stats && 5134 dev->netdev_ops->ndo_get_offload_stats && 5135 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); 5136 } 5137 5138 static unsigned int 5139 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) 5140 { 5141 return rtnl_offload_xstats_have_ndo(dev, attr_id) ? 5142 sizeof(struct rtnl_link_stats64) : 0; 5143 } 5144 5145 static int 5146 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, 5147 struct sk_buff *skb) 5148 { 5149 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); 5150 struct nlattr *attr = NULL; 5151 void *attr_data; 5152 int err; 5153 5154 if (!size) 5155 return -ENODATA; 5156 5157 attr = nla_reserve_64bit(skb, attr_id, size, 5158 IFLA_OFFLOAD_XSTATS_UNSPEC); 5159 if (!attr) 5160 return -EMSGSIZE; 5161 5162 attr_data = nla_data(attr); 5163 memset(attr_data, 0, size); 5164 5165 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); 5166 if (err) 5167 return err; 5168 5169 return 0; 5170 } 5171 5172 static unsigned int 5173 rtnl_offload_xstats_get_size_stats(const struct net_device *dev, 5174 enum netdev_offload_xstats_type type) 5175 { 5176 bool enabled = netdev_offload_xstats_enabled(dev, type); 5177 5178 return enabled ? sizeof(struct rtnl_hw_stats64) : 0; 5179 } 5180 5181 struct rtnl_offload_xstats_request_used { 5182 bool request; 5183 bool used; 5184 }; 5185 5186 static int 5187 rtnl_offload_xstats_get_stats(struct net_device *dev, 5188 enum netdev_offload_xstats_type type, 5189 struct rtnl_offload_xstats_request_used *ru, 5190 struct rtnl_hw_stats64 *stats, 5191 struct netlink_ext_ack *extack) 5192 { 5193 bool request; 5194 bool used; 5195 int err; 5196 5197 request = netdev_offload_xstats_enabled(dev, type); 5198 if (!request) { 5199 used = false; 5200 goto out; 5201 } 5202 5203 err = netdev_offload_xstats_get(dev, type, stats, &used, extack); 5204 if (err) 5205 return err; 5206 5207 out: 5208 if (ru) { 5209 ru->request = request; 5210 ru->used = used; 5211 } 5212 return 0; 5213 } 5214 5215 static int 5216 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, 5217 struct rtnl_offload_xstats_request_used *ru) 5218 { 5219 struct nlattr *nest; 5220 5221 nest = nla_nest_start(skb, attr_id); 5222 if (!nest) 5223 return -EMSGSIZE; 5224 5225 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) 5226 goto nla_put_failure; 5227 5228 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) 5229 goto nla_put_failure; 5230 5231 nla_nest_end(skb, nest); 5232 return 0; 5233 5234 nla_put_failure: 5235 nla_nest_cancel(skb, nest); 5236 return -EMSGSIZE; 5237 } 5238 5239 static int 5240 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, 5241 struct netlink_ext_ack *extack) 5242 { 5243 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5244 struct rtnl_offload_xstats_request_used ru_l3; 5245 struct nlattr *nest; 5246 int err; 5247 5248 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); 5249 if (err) 5250 return err; 5251 5252 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5253 if (!nest) 5254 return -EMSGSIZE; 5255 5256 if (rtnl_offload_xstats_fill_hw_s_info_one(skb, 5257 IFLA_OFFLOAD_XSTATS_L3_STATS, 5258 &ru_l3)) 5259 goto nla_put_failure; 5260 5261 nla_nest_end(skb, nest); 5262 return 0; 5263 5264 nla_put_failure: 5265 nla_nest_cancel(skb, nest); 5266 return -EMSGSIZE; 5267 } 5268 5269 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, 5270 int *prividx, u32 off_filter_mask, 5271 struct netlink_ext_ack *extack) 5272 { 5273 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5274 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; 5275 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; 5276 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5277 bool have_data = false; 5278 int err; 5279 5280 if (*prividx <= attr_id_cpu_hit && 5281 (off_filter_mask & 5282 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { 5283 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); 5284 if (!err) { 5285 have_data = true; 5286 } else if (err != -ENODATA) { 5287 *prividx = attr_id_cpu_hit; 5288 return err; 5289 } 5290 } 5291 5292 if (*prividx <= attr_id_hw_s_info && 5293 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { 5294 *prividx = attr_id_hw_s_info; 5295 5296 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); 5297 if (err) 5298 return err; 5299 5300 have_data = true; 5301 *prividx = 0; 5302 } 5303 5304 if (*prividx <= attr_id_l3_stats && 5305 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { 5306 unsigned int size_l3; 5307 struct nlattr *attr; 5308 5309 *prividx = attr_id_l3_stats; 5310 5311 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5312 if (!size_l3) 5313 goto skip_l3_stats; 5314 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, 5315 IFLA_OFFLOAD_XSTATS_UNSPEC); 5316 if (!attr) 5317 return -EMSGSIZE; 5318 5319 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, 5320 nla_data(attr), extack); 5321 if (err) 5322 return err; 5323 5324 have_data = true; 5325 skip_l3_stats: 5326 *prividx = 0; 5327 } 5328 5329 if (!have_data) 5330 return -ENODATA; 5331 5332 *prividx = 0; 5333 return 0; 5334 } 5335 5336 static unsigned int 5337 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, 5338 enum netdev_offload_xstats_type type) 5339 { 5340 bool enabled = netdev_offload_xstats_enabled(dev, type); 5341 5342 return nla_total_size(0) + 5343 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ 5344 nla_total_size(sizeof(u8)) + 5345 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ 5346 (enabled ? nla_total_size(sizeof(u8)) : 0) + 5347 0; 5348 } 5349 5350 static unsigned int 5351 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) 5352 { 5353 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5354 5355 return nla_total_size(0) + 5356 /* IFLA_OFFLOAD_XSTATS_L3_STATS */ 5357 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + 5358 0; 5359 } 5360 5361 static int rtnl_offload_xstats_get_size(const struct net_device *dev, 5362 u32 off_filter_mask) 5363 { 5364 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5365 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5366 int nla_size = 0; 5367 int size; 5368 5369 if (off_filter_mask & 5370 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { 5371 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); 5372 nla_size += nla_total_size_64bit(size); 5373 } 5374 5375 if (off_filter_mask & 5376 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) 5377 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); 5378 5379 if (off_filter_mask & 5380 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { 5381 size = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5382 nla_size += nla_total_size_64bit(size); 5383 } 5384 5385 if (nla_size != 0) 5386 nla_size += nla_total_size(0); 5387 5388 return nla_size; 5389 } 5390 5391 struct rtnl_stats_dump_filters { 5392 /* mask[0] filters outer attributes. Then individual nests have their 5393 * filtering mask at the index of the nested attribute. 5394 */ 5395 u32 mask[IFLA_STATS_MAX + 1]; 5396 }; 5397 5398 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 5399 int type, u32 pid, u32 seq, u32 change, 5400 unsigned int flags, 5401 const struct rtnl_stats_dump_filters *filters, 5402 int *idxattr, int *prividx, 5403 struct netlink_ext_ack *extack) 5404 { 5405 unsigned int filter_mask = filters->mask[0]; 5406 struct if_stats_msg *ifsm; 5407 struct nlmsghdr *nlh; 5408 struct nlattr *attr; 5409 int s_prividx = *prividx; 5410 int err; 5411 5412 ASSERT_RTNL(); 5413 5414 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 5415 if (!nlh) 5416 return -EMSGSIZE; 5417 5418 ifsm = nlmsg_data(nlh); 5419 ifsm->family = PF_UNSPEC; 5420 ifsm->pad1 = 0; 5421 ifsm->pad2 = 0; 5422 ifsm->ifindex = dev->ifindex; 5423 ifsm->filter_mask = filter_mask; 5424 5425 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 5426 struct rtnl_link_stats64 *sp; 5427 5428 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 5429 sizeof(struct rtnl_link_stats64), 5430 IFLA_STATS_UNSPEC); 5431 if (!attr) { 5432 err = -EMSGSIZE; 5433 goto nla_put_failure; 5434 } 5435 5436 sp = nla_data(attr); 5437 dev_get_stats(dev, sp); 5438 } 5439 5440 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5441 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5442 5443 if (ops && ops->fill_linkxstats) { 5444 *idxattr = IFLA_STATS_LINK_XSTATS; 5445 attr = nla_nest_start_noflag(skb, 5446 IFLA_STATS_LINK_XSTATS); 5447 if (!attr) { 5448 err = -EMSGSIZE; 5449 goto nla_put_failure; 5450 } 5451 5452 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5453 nla_nest_end(skb, attr); 5454 if (err) 5455 goto nla_put_failure; 5456 *idxattr = 0; 5457 } 5458 } 5459 5460 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5461 *idxattr)) { 5462 const struct rtnl_link_ops *ops = NULL; 5463 const struct net_device *master; 5464 5465 master = netdev_master_upper_dev_get(dev); 5466 if (master) 5467 ops = master->rtnl_link_ops; 5468 if (ops && ops->fill_linkxstats) { 5469 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5470 attr = nla_nest_start_noflag(skb, 5471 IFLA_STATS_LINK_XSTATS_SLAVE); 5472 if (!attr) { 5473 err = -EMSGSIZE; 5474 goto nla_put_failure; 5475 } 5476 5477 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5478 nla_nest_end(skb, attr); 5479 if (err) 5480 goto nla_put_failure; 5481 *idxattr = 0; 5482 } 5483 } 5484 5485 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5486 *idxattr)) { 5487 u32 off_filter_mask; 5488 5489 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5490 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5491 attr = nla_nest_start_noflag(skb, 5492 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5493 if (!attr) { 5494 err = -EMSGSIZE; 5495 goto nla_put_failure; 5496 } 5497 5498 err = rtnl_offload_xstats_fill(skb, dev, prividx, 5499 off_filter_mask, extack); 5500 if (err == -ENODATA) 5501 nla_nest_cancel(skb, attr); 5502 else 5503 nla_nest_end(skb, attr); 5504 5505 if (err && err != -ENODATA) 5506 goto nla_put_failure; 5507 *idxattr = 0; 5508 } 5509 5510 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5511 struct rtnl_af_ops *af_ops; 5512 5513 *idxattr = IFLA_STATS_AF_SPEC; 5514 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5515 if (!attr) { 5516 err = -EMSGSIZE; 5517 goto nla_put_failure; 5518 } 5519 5520 rcu_read_lock(); 5521 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5522 if (af_ops->fill_stats_af) { 5523 struct nlattr *af; 5524 5525 af = nla_nest_start_noflag(skb, 5526 af_ops->family); 5527 if (!af) { 5528 rcu_read_unlock(); 5529 err = -EMSGSIZE; 5530 goto nla_put_failure; 5531 } 5532 err = af_ops->fill_stats_af(skb, dev); 5533 5534 if (err == -ENODATA) { 5535 nla_nest_cancel(skb, af); 5536 } else if (err < 0) { 5537 rcu_read_unlock(); 5538 goto nla_put_failure; 5539 } 5540 5541 nla_nest_end(skb, af); 5542 } 5543 } 5544 rcu_read_unlock(); 5545 5546 nla_nest_end(skb, attr); 5547 5548 *idxattr = 0; 5549 } 5550 5551 nlmsg_end(skb, nlh); 5552 5553 return 0; 5554 5555 nla_put_failure: 5556 /* not a multi message or no progress mean a real error */ 5557 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5558 nlmsg_cancel(skb, nlh); 5559 else 5560 nlmsg_end(skb, nlh); 5561 5562 return err; 5563 } 5564 5565 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5566 const struct rtnl_stats_dump_filters *filters) 5567 { 5568 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5569 unsigned int filter_mask = filters->mask[0]; 5570 5571 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5572 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5573 5574 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5575 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5576 int attr = IFLA_STATS_LINK_XSTATS; 5577 5578 if (ops && ops->get_linkxstats_size) { 5579 size += nla_total_size(ops->get_linkxstats_size(dev, 5580 attr)); 5581 /* for IFLA_STATS_LINK_XSTATS */ 5582 size += nla_total_size(0); 5583 } 5584 } 5585 5586 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5587 struct net_device *_dev = (struct net_device *)dev; 5588 const struct rtnl_link_ops *ops = NULL; 5589 const struct net_device *master; 5590 5591 /* netdev_master_upper_dev_get can't take const */ 5592 master = netdev_master_upper_dev_get(_dev); 5593 if (master) 5594 ops = master->rtnl_link_ops; 5595 if (ops && ops->get_linkxstats_size) { 5596 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5597 5598 size += nla_total_size(ops->get_linkxstats_size(dev, 5599 attr)); 5600 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5601 size += nla_total_size(0); 5602 } 5603 } 5604 5605 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { 5606 u32 off_filter_mask; 5607 5608 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5609 size += rtnl_offload_xstats_get_size(dev, off_filter_mask); 5610 } 5611 5612 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5613 struct rtnl_af_ops *af_ops; 5614 5615 /* for IFLA_STATS_AF_SPEC */ 5616 size += nla_total_size(0); 5617 5618 rcu_read_lock(); 5619 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5620 if (af_ops->get_stats_af_size) { 5621 size += nla_total_size( 5622 af_ops->get_stats_af_size(dev)); 5623 5624 /* for AF_* */ 5625 size += nla_total_size(0); 5626 } 5627 } 5628 rcu_read_unlock(); 5629 } 5630 5631 return size; 5632 } 5633 5634 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) 5635 5636 static const struct nla_policy 5637 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { 5638 [IFLA_STATS_LINK_OFFLOAD_XSTATS] = 5639 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), 5640 }; 5641 5642 static const struct nla_policy 5643 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { 5644 [IFLA_STATS_GET_FILTERS] = 5645 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), 5646 }; 5647 5648 static const struct nla_policy 5649 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { 5650 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), 5651 }; 5652 5653 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, 5654 struct rtnl_stats_dump_filters *filters, 5655 struct netlink_ext_ack *extack) 5656 { 5657 struct nlattr *tb[IFLA_STATS_MAX + 1]; 5658 int err; 5659 int at; 5660 5661 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, 5662 rtnl_stats_get_policy_filters, extack); 5663 if (err < 0) 5664 return err; 5665 5666 for (at = 1; at <= IFLA_STATS_MAX; at++) { 5667 if (tb[at]) { 5668 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { 5669 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); 5670 return -EINVAL; 5671 } 5672 filters->mask[at] = nla_get_u32(tb[at]); 5673 } 5674 } 5675 5676 return 0; 5677 } 5678 5679 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, 5680 u32 filter_mask, 5681 struct rtnl_stats_dump_filters *filters, 5682 struct netlink_ext_ack *extack) 5683 { 5684 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5685 int err; 5686 int i; 5687 5688 filters->mask[0] = filter_mask; 5689 for (i = 1; i < ARRAY_SIZE(filters->mask); i++) 5690 filters->mask[i] = -1U; 5691 5692 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, 5693 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); 5694 if (err < 0) 5695 return err; 5696 5697 if (tb[IFLA_STATS_GET_FILTERS]) { 5698 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], 5699 filters, extack); 5700 if (err) 5701 return err; 5702 } 5703 5704 return 0; 5705 } 5706 5707 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 5708 bool is_dump, struct netlink_ext_ack *extack) 5709 { 5710 struct if_stats_msg *ifsm; 5711 5712 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 5713 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 5714 return -EINVAL; 5715 } 5716 5717 if (!strict_check) 5718 return 0; 5719 5720 ifsm = nlmsg_data(nlh); 5721 5722 /* only requests using strict checks can pass data to influence 5723 * the dump. The legacy exception is filter_mask. 5724 */ 5725 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 5726 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 5727 return -EINVAL; 5728 } 5729 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 5730 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 5731 return -EINVAL; 5732 } 5733 5734 return 0; 5735 } 5736 5737 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 5738 struct netlink_ext_ack *extack) 5739 { 5740 struct rtnl_stats_dump_filters filters; 5741 struct net *net = sock_net(skb->sk); 5742 struct net_device *dev = NULL; 5743 int idxattr = 0, prividx = 0; 5744 struct if_stats_msg *ifsm; 5745 struct sk_buff *nskb; 5746 int err; 5747 5748 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5749 false, extack); 5750 if (err) 5751 return err; 5752 5753 ifsm = nlmsg_data(nlh); 5754 if (ifsm->ifindex > 0) 5755 dev = __dev_get_by_index(net, ifsm->ifindex); 5756 else 5757 return -EINVAL; 5758 5759 if (!dev) 5760 return -ENODEV; 5761 5762 if (!ifsm->filter_mask) { 5763 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); 5764 return -EINVAL; 5765 } 5766 5767 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); 5768 if (err) 5769 return err; 5770 5771 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); 5772 if (!nskb) 5773 return -ENOBUFS; 5774 5775 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 5776 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 5777 0, &filters, &idxattr, &prividx, extack); 5778 if (err < 0) { 5779 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 5780 WARN_ON(err == -EMSGSIZE); 5781 kfree_skb(nskb); 5782 } else { 5783 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 5784 } 5785 5786 return err; 5787 } 5788 5789 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 5790 { 5791 struct netlink_ext_ack *extack = cb->extack; 5792 int h, s_h, err, s_idx, s_idxattr, s_prividx; 5793 struct rtnl_stats_dump_filters filters; 5794 struct net *net = sock_net(skb->sk); 5795 unsigned int flags = NLM_F_MULTI; 5796 struct if_stats_msg *ifsm; 5797 struct hlist_head *head; 5798 struct net_device *dev; 5799 int idx = 0; 5800 5801 s_h = cb->args[0]; 5802 s_idx = cb->args[1]; 5803 s_idxattr = cb->args[2]; 5804 s_prividx = cb->args[3]; 5805 5806 cb->seq = net->dev_base_seq; 5807 5808 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 5809 if (err) 5810 return err; 5811 5812 ifsm = nlmsg_data(cb->nlh); 5813 if (!ifsm->filter_mask) { 5814 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 5815 return -EINVAL; 5816 } 5817 5818 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, 5819 extack); 5820 if (err) 5821 return err; 5822 5823 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 5824 idx = 0; 5825 head = &net->dev_index_head[h]; 5826 hlist_for_each_entry(dev, head, index_hlist) { 5827 if (idx < s_idx) 5828 goto cont; 5829 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 5830 NETLINK_CB(cb->skb).portid, 5831 cb->nlh->nlmsg_seq, 0, 5832 flags, &filters, 5833 &s_idxattr, &s_prividx, 5834 extack); 5835 /* If we ran out of room on the first message, 5836 * we're in trouble 5837 */ 5838 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 5839 5840 if (err < 0) 5841 goto out; 5842 s_prividx = 0; 5843 s_idxattr = 0; 5844 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 5845 cont: 5846 idx++; 5847 } 5848 } 5849 out: 5850 cb->args[3] = s_prividx; 5851 cb->args[2] = s_idxattr; 5852 cb->args[1] = idx; 5853 cb->args[0] = h; 5854 5855 return skb->len; 5856 } 5857 5858 void rtnl_offload_xstats_notify(struct net_device *dev) 5859 { 5860 struct rtnl_stats_dump_filters response_filters = {}; 5861 struct net *net = dev_net(dev); 5862 int idxattr = 0, prividx = 0; 5863 struct sk_buff *skb; 5864 int err = -ENOBUFS; 5865 5866 ASSERT_RTNL(); 5867 5868 response_filters.mask[0] |= 5869 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 5870 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 5871 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5872 5873 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), 5874 GFP_KERNEL); 5875 if (!skb) 5876 goto errout; 5877 5878 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, 5879 &response_filters, &idxattr, &prividx, NULL); 5880 if (err < 0) { 5881 kfree_skb(skb); 5882 goto errout; 5883 } 5884 5885 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); 5886 return; 5887 5888 errout: 5889 rtnl_set_sk_err(net, RTNLGRP_STATS, err); 5890 } 5891 EXPORT_SYMBOL(rtnl_offload_xstats_notify); 5892 5893 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, 5894 struct netlink_ext_ack *extack) 5895 { 5896 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5897 struct rtnl_stats_dump_filters response_filters = {}; 5898 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5899 struct net *net = sock_net(skb->sk); 5900 struct net_device *dev = NULL; 5901 struct if_stats_msg *ifsm; 5902 bool notify = false; 5903 int err; 5904 5905 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5906 false, extack); 5907 if (err) 5908 return err; 5909 5910 ifsm = nlmsg_data(nlh); 5911 if (ifsm->family != AF_UNSPEC) { 5912 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); 5913 return -EINVAL; 5914 } 5915 5916 if (ifsm->ifindex > 0) 5917 dev = __dev_get_by_index(net, ifsm->ifindex); 5918 else 5919 return -EINVAL; 5920 5921 if (!dev) 5922 return -ENODEV; 5923 5924 if (ifsm->filter_mask) { 5925 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); 5926 return -EINVAL; 5927 } 5928 5929 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, 5930 ifla_stats_set_policy, extack); 5931 if (err < 0) 5932 return err; 5933 5934 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { 5935 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); 5936 5937 if (req) 5938 err = netdev_offload_xstats_enable(dev, t_l3, extack); 5939 else 5940 err = netdev_offload_xstats_disable(dev, t_l3); 5941 5942 if (!err) 5943 notify = true; 5944 else if (err != -EALREADY) 5945 return err; 5946 5947 response_filters.mask[0] |= 5948 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 5949 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 5950 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5951 } 5952 5953 if (notify) 5954 rtnl_offload_xstats_notify(dev); 5955 5956 return 0; 5957 } 5958 5959 /* Process one rtnetlink message. */ 5960 5961 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 5962 struct netlink_ext_ack *extack) 5963 { 5964 struct net *net = sock_net(skb->sk); 5965 struct rtnl_link *link; 5966 enum rtnl_kinds kind; 5967 struct module *owner; 5968 int err = -EOPNOTSUPP; 5969 rtnl_doit_func doit; 5970 unsigned int flags; 5971 int family; 5972 int type; 5973 5974 type = nlh->nlmsg_type; 5975 if (type > RTM_MAX) 5976 return -EOPNOTSUPP; 5977 5978 type -= RTM_BASE; 5979 5980 /* All the messages must have at least 1 byte length */ 5981 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 5982 return 0; 5983 5984 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 5985 kind = rtnl_msgtype_kind(type); 5986 5987 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) 5988 return -EPERM; 5989 5990 rcu_read_lock(); 5991 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { 5992 struct sock *rtnl; 5993 rtnl_dumpit_func dumpit; 5994 u32 min_dump_alloc = 0; 5995 5996 link = rtnl_get_link(family, type); 5997 if (!link || !link->dumpit) { 5998 family = PF_UNSPEC; 5999 link = rtnl_get_link(family, type); 6000 if (!link || !link->dumpit) 6001 goto err_unlock; 6002 } 6003 owner = link->owner; 6004 dumpit = link->dumpit; 6005 6006 if (type == RTM_GETLINK - RTM_BASE) 6007 min_dump_alloc = rtnl_calcit(skb, nlh); 6008 6009 err = 0; 6010 /* need to do this before rcu_read_unlock() */ 6011 if (!try_module_get(owner)) 6012 err = -EPROTONOSUPPORT; 6013 6014 rcu_read_unlock(); 6015 6016 rtnl = net->rtnl; 6017 if (err == 0) { 6018 struct netlink_dump_control c = { 6019 .dump = dumpit, 6020 .min_dump_alloc = min_dump_alloc, 6021 .module = owner, 6022 }; 6023 err = netlink_dump_start(rtnl, skb, nlh, &c); 6024 /* netlink_dump_start() will keep a reference on 6025 * module if dump is still in progress. 6026 */ 6027 module_put(owner); 6028 } 6029 return err; 6030 } 6031 6032 link = rtnl_get_link(family, type); 6033 if (!link || !link->doit) { 6034 family = PF_UNSPEC; 6035 link = rtnl_get_link(PF_UNSPEC, type); 6036 if (!link || !link->doit) 6037 goto out_unlock; 6038 } 6039 6040 owner = link->owner; 6041 if (!try_module_get(owner)) { 6042 err = -EPROTONOSUPPORT; 6043 goto out_unlock; 6044 } 6045 6046 flags = link->flags; 6047 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && 6048 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { 6049 NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); 6050 goto err_unlock; 6051 } 6052 6053 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 6054 doit = link->doit; 6055 rcu_read_unlock(); 6056 if (doit) 6057 err = doit(skb, nlh, extack); 6058 module_put(owner); 6059 return err; 6060 } 6061 rcu_read_unlock(); 6062 6063 rtnl_lock(); 6064 link = rtnl_get_link(family, type); 6065 if (link && link->doit) 6066 err = link->doit(skb, nlh, extack); 6067 rtnl_unlock(); 6068 6069 module_put(owner); 6070 6071 return err; 6072 6073 out_unlock: 6074 rcu_read_unlock(); 6075 return err; 6076 6077 err_unlock: 6078 rcu_read_unlock(); 6079 return -EOPNOTSUPP; 6080 } 6081 6082 static void rtnetlink_rcv(struct sk_buff *skb) 6083 { 6084 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 6085 } 6086 6087 static int rtnetlink_bind(struct net *net, int group) 6088 { 6089 switch (group) { 6090 case RTNLGRP_IPV4_MROUTE_R: 6091 case RTNLGRP_IPV6_MROUTE_R: 6092 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 6093 return -EPERM; 6094 break; 6095 } 6096 return 0; 6097 } 6098 6099 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 6100 { 6101 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6102 6103 switch (event) { 6104 case NETDEV_REBOOT: 6105 case NETDEV_CHANGEMTU: 6106 case NETDEV_CHANGEADDR: 6107 case NETDEV_CHANGENAME: 6108 case NETDEV_FEAT_CHANGE: 6109 case NETDEV_BONDING_FAILOVER: 6110 case NETDEV_POST_TYPE_CHANGE: 6111 case NETDEV_NOTIFY_PEERS: 6112 case NETDEV_CHANGEUPPER: 6113 case NETDEV_RESEND_IGMP: 6114 case NETDEV_CHANGEINFODATA: 6115 case NETDEV_CHANGELOWERSTATE: 6116 case NETDEV_CHANGE_TX_QUEUE_LEN: 6117 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 6118 GFP_KERNEL, NULL, 0); 6119 break; 6120 default: 6121 break; 6122 } 6123 return NOTIFY_DONE; 6124 } 6125 6126 static struct notifier_block rtnetlink_dev_notifier = { 6127 .notifier_call = rtnetlink_event, 6128 }; 6129 6130 6131 static int __net_init rtnetlink_net_init(struct net *net) 6132 { 6133 struct sock *sk; 6134 struct netlink_kernel_cfg cfg = { 6135 .groups = RTNLGRP_MAX, 6136 .input = rtnetlink_rcv, 6137 .cb_mutex = &rtnl_mutex, 6138 .flags = NL_CFG_F_NONROOT_RECV, 6139 .bind = rtnetlink_bind, 6140 }; 6141 6142 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 6143 if (!sk) 6144 return -ENOMEM; 6145 net->rtnl = sk; 6146 return 0; 6147 } 6148 6149 static void __net_exit rtnetlink_net_exit(struct net *net) 6150 { 6151 netlink_kernel_release(net->rtnl); 6152 net->rtnl = NULL; 6153 } 6154 6155 static struct pernet_operations rtnetlink_net_ops = { 6156 .init = rtnetlink_net_init, 6157 .exit = rtnetlink_net_exit, 6158 }; 6159 6160 void __init rtnetlink_init(void) 6161 { 6162 if (register_pernet_subsys(&rtnetlink_net_ops)) 6163 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 6164 6165 register_netdevice_notifier(&rtnetlink_dev_notifier); 6166 6167 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 6168 rtnl_dump_ifinfo, 0); 6169 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 6170 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 6171 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 6172 6173 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 6174 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 6175 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 6176 6177 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 6178 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 6179 6180 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 6181 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 6182 RTNL_FLAG_BULK_DEL_SUPPORTED); 6183 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 6184 6185 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 6186 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 6187 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 6188 6189 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 6190 0); 6191 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); 6192 } 6193