1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 #include <net/devlink.h> 57 #if IS_ENABLED(CONFIG_IPV6) 58 #include <net/addrconf.h> 59 #endif 60 #include <linux/dpll.h> 61 62 #include "dev.h" 63 64 #define RTNL_MAX_TYPE 50 65 #define RTNL_SLAVE_MAX_TYPE 44 66 67 struct rtnl_link { 68 rtnl_doit_func doit; 69 rtnl_dumpit_func dumpit; 70 struct module *owner; 71 unsigned int flags; 72 struct rcu_head rcu; 73 }; 74 75 static DEFINE_MUTEX(rtnl_mutex); 76 77 void rtnl_lock(void) 78 { 79 mutex_lock(&rtnl_mutex); 80 } 81 EXPORT_SYMBOL(rtnl_lock); 82 83 int rtnl_lock_killable(void) 84 { 85 return mutex_lock_killable(&rtnl_mutex); 86 } 87 EXPORT_SYMBOL(rtnl_lock_killable); 88 89 static struct sk_buff *defer_kfree_skb_list; 90 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 91 { 92 if (head && tail) { 93 tail->next = defer_kfree_skb_list; 94 defer_kfree_skb_list = head; 95 } 96 } 97 EXPORT_SYMBOL(rtnl_kfree_skbs); 98 99 void __rtnl_unlock(void) 100 { 101 struct sk_buff *head = defer_kfree_skb_list; 102 103 defer_kfree_skb_list = NULL; 104 105 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() 106 * is used. In some places, e.g. in cfg80211, we have code that will do 107 * something like 108 * rtnl_lock() 109 * wiphy_lock() 110 * ... 111 * rtnl_unlock() 112 * 113 * and because netdev_run_todo() acquires the RTNL for items on the list 114 * we could cause a situation such as this: 115 * Thread 1 Thread 2 116 * rtnl_lock() 117 * unregister_netdevice() 118 * __rtnl_unlock() 119 * rtnl_lock() 120 * wiphy_lock() 121 * rtnl_unlock() 122 * netdev_run_todo() 123 * __rtnl_unlock() 124 * 125 * // list not empty now 126 * // because of thread 2 127 * rtnl_lock() 128 * while (!list_empty(...)) 129 * rtnl_lock() 130 * wiphy_lock() 131 * **** DEADLOCK **** 132 * 133 * However, usage of __rtnl_unlock() is rare, and so we can ensure that 134 * it's not used in cases where something is added to do the list. 135 */ 136 WARN_ON(!list_empty(&net_todo_list)); 137 138 mutex_unlock(&rtnl_mutex); 139 140 while (head) { 141 struct sk_buff *next = head->next; 142 143 kfree_skb(head); 144 cond_resched(); 145 head = next; 146 } 147 } 148 149 void rtnl_unlock(void) 150 { 151 /* This fellow will unlock it for us. */ 152 netdev_run_todo(); 153 } 154 EXPORT_SYMBOL(rtnl_unlock); 155 156 int rtnl_trylock(void) 157 { 158 return mutex_trylock(&rtnl_mutex); 159 } 160 EXPORT_SYMBOL(rtnl_trylock); 161 162 int rtnl_is_locked(void) 163 { 164 return mutex_is_locked(&rtnl_mutex); 165 } 166 EXPORT_SYMBOL(rtnl_is_locked); 167 168 bool refcount_dec_and_rtnl_lock(refcount_t *r) 169 { 170 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 171 } 172 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 173 174 #ifdef CONFIG_PROVE_LOCKING 175 bool lockdep_rtnl_is_held(void) 176 { 177 return lockdep_is_held(&rtnl_mutex); 178 } 179 EXPORT_SYMBOL(lockdep_rtnl_is_held); 180 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 181 182 #ifdef CONFIG_DEBUG_NET_SMALL_RTNL 183 void __rtnl_net_lock(struct net *net) 184 { 185 ASSERT_RTNL(); 186 187 mutex_lock(&net->rtnl_mutex); 188 } 189 EXPORT_SYMBOL(__rtnl_net_lock); 190 191 void __rtnl_net_unlock(struct net *net) 192 { 193 ASSERT_RTNL(); 194 195 mutex_unlock(&net->rtnl_mutex); 196 } 197 EXPORT_SYMBOL(__rtnl_net_unlock); 198 199 void rtnl_net_lock(struct net *net) 200 { 201 rtnl_lock(); 202 __rtnl_net_lock(net); 203 } 204 EXPORT_SYMBOL(rtnl_net_lock); 205 206 void rtnl_net_unlock(struct net *net) 207 { 208 __rtnl_net_unlock(net); 209 rtnl_unlock(); 210 } 211 EXPORT_SYMBOL(rtnl_net_unlock); 212 213 static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b) 214 { 215 if (net_eq(net_a, net_b)) 216 return 0; 217 218 /* always init_net first */ 219 if (net_eq(net_a, &init_net)) 220 return -1; 221 222 if (net_eq(net_b, &init_net)) 223 return 1; 224 225 /* otherwise lock in ascending order */ 226 return net_a < net_b ? -1 : 1; 227 } 228 229 int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b) 230 { 231 const struct net *net_a, *net_b; 232 233 net_a = container_of(a, struct net, rtnl_mutex.dep_map); 234 net_b = container_of(b, struct net, rtnl_mutex.dep_map); 235 236 return rtnl_net_cmp_locks(net_a, net_b); 237 } 238 239 bool rtnl_net_is_locked(struct net *net) 240 { 241 return rtnl_is_locked() && mutex_is_locked(&net->rtnl_mutex); 242 } 243 EXPORT_SYMBOL(rtnl_net_is_locked); 244 245 bool lockdep_rtnl_net_is_held(struct net *net) 246 { 247 return lockdep_rtnl_is_held() && lockdep_is_held(&net->rtnl_mutex); 248 } 249 EXPORT_SYMBOL(lockdep_rtnl_net_is_held); 250 #endif 251 252 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 253 254 static inline int rtm_msgindex(int msgtype) 255 { 256 int msgindex = msgtype - RTM_BASE; 257 258 /* 259 * msgindex < 0 implies someone tried to register a netlink 260 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 261 * the message type has not been added to linux/rtnetlink.h 262 */ 263 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 264 265 return msgindex; 266 } 267 268 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 269 { 270 struct rtnl_link __rcu **tab; 271 272 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 273 protocol = PF_UNSPEC; 274 275 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 276 if (!tab) 277 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 278 279 return rcu_dereference_rtnl(tab[msgtype]); 280 } 281 282 static int rtnl_register_internal(struct module *owner, 283 int protocol, int msgtype, 284 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 285 unsigned int flags) 286 { 287 struct rtnl_link *link, *old; 288 struct rtnl_link __rcu **tab; 289 int msgindex; 290 int ret = -ENOBUFS; 291 292 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 293 msgindex = rtm_msgindex(msgtype); 294 295 rtnl_lock(); 296 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 297 if (tab == NULL) { 298 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 299 if (!tab) 300 goto unlock; 301 302 /* ensures we see the 0 stores */ 303 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 304 } 305 306 old = rtnl_dereference(tab[msgindex]); 307 if (old) { 308 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 309 if (!link) 310 goto unlock; 311 } else { 312 link = kzalloc(sizeof(*link), GFP_KERNEL); 313 if (!link) 314 goto unlock; 315 } 316 317 WARN_ON(link->owner && link->owner != owner); 318 link->owner = owner; 319 320 WARN_ON(doit && link->doit && link->doit != doit); 321 if (doit) 322 link->doit = doit; 323 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 324 if (dumpit) 325 link->dumpit = dumpit; 326 327 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && 328 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); 329 link->flags |= flags; 330 331 /* publish protocol:msgtype */ 332 rcu_assign_pointer(tab[msgindex], link); 333 ret = 0; 334 if (old) 335 kfree_rcu(old, rcu); 336 unlock: 337 rtnl_unlock(); 338 return ret; 339 } 340 341 /** 342 * rtnl_register_module - Register a rtnetlink message type 343 * 344 * @owner: module registering the hook (THIS_MODULE) 345 * @protocol: Protocol family or PF_UNSPEC 346 * @msgtype: rtnetlink message type 347 * @doit: Function pointer called for each request message 348 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 349 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 350 * 351 * Like rtnl_register, but for use by removable modules. 352 */ 353 int rtnl_register_module(struct module *owner, 354 int protocol, int msgtype, 355 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 356 unsigned int flags) 357 { 358 return rtnl_register_internal(owner, protocol, msgtype, 359 doit, dumpit, flags); 360 } 361 EXPORT_SYMBOL_GPL(rtnl_register_module); 362 363 /** 364 * rtnl_register - Register a rtnetlink message type 365 * @protocol: Protocol family or PF_UNSPEC 366 * @msgtype: rtnetlink message type 367 * @doit: Function pointer called for each request message 368 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 369 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 370 * 371 * Registers the specified function pointers (at least one of them has 372 * to be non-NULL) to be called whenever a request message for the 373 * specified protocol family and message type is received. 374 * 375 * The special protocol family PF_UNSPEC may be used to define fallback 376 * function pointers for the case when no entry for the specific protocol 377 * family exists. 378 */ 379 void rtnl_register(int protocol, int msgtype, 380 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 381 unsigned int flags) 382 { 383 int err; 384 385 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 386 flags); 387 if (err) 388 pr_err("Unable to register rtnetlink message handler, " 389 "protocol = %d, message type = %d\n", protocol, msgtype); 390 } 391 392 /** 393 * rtnl_unregister - Unregister a rtnetlink message type 394 * @protocol: Protocol family or PF_UNSPEC 395 * @msgtype: rtnetlink message type 396 * 397 * Returns 0 on success or a negative error code. 398 */ 399 int rtnl_unregister(int protocol, int msgtype) 400 { 401 struct rtnl_link __rcu **tab; 402 struct rtnl_link *link; 403 int msgindex; 404 405 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 406 msgindex = rtm_msgindex(msgtype); 407 408 rtnl_lock(); 409 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 410 if (!tab) { 411 rtnl_unlock(); 412 return -ENOENT; 413 } 414 415 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); 416 rtnl_unlock(); 417 418 kfree_rcu(link, rcu); 419 420 return 0; 421 } 422 EXPORT_SYMBOL_GPL(rtnl_unregister); 423 424 /** 425 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 426 * @protocol : Protocol family or PF_UNSPEC 427 * 428 * Identical to calling rtnl_unregster() for all registered message types 429 * of a certain protocol family. 430 */ 431 void rtnl_unregister_all(int protocol) 432 { 433 struct rtnl_link __rcu **tab; 434 struct rtnl_link *link; 435 int msgindex; 436 437 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 438 439 rtnl_lock(); 440 tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL); 441 if (!tab) { 442 rtnl_unlock(); 443 return; 444 } 445 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 446 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL); 447 kfree_rcu(link, rcu); 448 } 449 rtnl_unlock(); 450 451 synchronize_net(); 452 453 kfree(tab); 454 } 455 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 456 457 int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n) 458 { 459 const struct rtnl_msg_handler *handler; 460 int i, err; 461 462 for (i = 0, handler = handlers; i < n; i++, handler++) { 463 err = rtnl_register_internal(handler->owner, handler->protocol, 464 handler->msgtype, handler->doit, 465 handler->dumpit, handler->flags); 466 if (err) { 467 __rtnl_unregister_many(handlers, i); 468 break; 469 } 470 } 471 472 return err; 473 } 474 EXPORT_SYMBOL_GPL(__rtnl_register_many); 475 476 void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n) 477 { 478 const struct rtnl_msg_handler *handler; 479 int i; 480 481 for (i = n - 1, handler = handlers + n - 1; i >= 0; i--, handler--) 482 rtnl_unregister(handler->protocol, handler->msgtype); 483 } 484 EXPORT_SYMBOL_GPL(__rtnl_unregister_many); 485 486 static LIST_HEAD(link_ops); 487 488 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 489 { 490 const struct rtnl_link_ops *ops; 491 492 list_for_each_entry(ops, &link_ops, list) { 493 if (!strcmp(ops->kind, kind)) 494 return ops; 495 } 496 return NULL; 497 } 498 499 /** 500 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 501 * @ops: struct rtnl_link_ops * to register 502 * 503 * The caller must hold the rtnl_mutex. This function should be used 504 * by drivers that create devices during module initialization. It 505 * must be called before registering the devices. 506 * 507 * Returns 0 on success or a negative error code. 508 */ 509 int __rtnl_link_register(struct rtnl_link_ops *ops) 510 { 511 if (rtnl_link_ops_get(ops->kind)) 512 return -EEXIST; 513 514 /* The check for alloc/setup is here because if ops 515 * does not have that filled up, it is not possible 516 * to use the ops for creating device. So do not 517 * fill up dellink as well. That disables rtnl_dellink. 518 */ 519 if ((ops->alloc || ops->setup) && !ops->dellink) 520 ops->dellink = unregister_netdevice_queue; 521 522 list_add_tail(&ops->list, &link_ops); 523 return 0; 524 } 525 EXPORT_SYMBOL_GPL(__rtnl_link_register); 526 527 /** 528 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 529 * @ops: struct rtnl_link_ops * to register 530 * 531 * Returns 0 on success or a negative error code. 532 */ 533 int rtnl_link_register(struct rtnl_link_ops *ops) 534 { 535 int err; 536 537 /* Sanity-check max sizes to avoid stack buffer overflow. */ 538 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 539 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 540 return -EINVAL; 541 542 rtnl_lock(); 543 err = __rtnl_link_register(ops); 544 rtnl_unlock(); 545 return err; 546 } 547 EXPORT_SYMBOL_GPL(rtnl_link_register); 548 549 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 550 { 551 struct net_device *dev; 552 LIST_HEAD(list_kill); 553 554 for_each_netdev(net, dev) { 555 if (dev->rtnl_link_ops == ops) 556 ops->dellink(dev, &list_kill); 557 } 558 unregister_netdevice_many(&list_kill); 559 } 560 561 /** 562 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 563 * @ops: struct rtnl_link_ops * to unregister 564 * 565 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 566 * integrity (hold pernet_ops_rwsem for writing to close the race 567 * with setup_net() and cleanup_net()). 568 */ 569 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 570 { 571 struct net *net; 572 573 for_each_net(net) { 574 __rtnl_kill_links(net, ops); 575 } 576 list_del(&ops->list); 577 } 578 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 579 580 /* Return with the rtnl_lock held when there are no network 581 * devices unregistering in any network namespace. 582 */ 583 static void rtnl_lock_unregistering_all(void) 584 { 585 DEFINE_WAIT_FUNC(wait, woken_wake_function); 586 587 add_wait_queue(&netdev_unregistering_wq, &wait); 588 for (;;) { 589 rtnl_lock(); 590 /* We held write locked pernet_ops_rwsem, and parallel 591 * setup_net() and cleanup_net() are not possible. 592 */ 593 if (!atomic_read(&dev_unreg_count)) 594 break; 595 __rtnl_unlock(); 596 597 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 598 } 599 remove_wait_queue(&netdev_unregistering_wq, &wait); 600 } 601 602 /** 603 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 604 * @ops: struct rtnl_link_ops * to unregister 605 */ 606 void rtnl_link_unregister(struct rtnl_link_ops *ops) 607 { 608 /* Close the race with setup_net() and cleanup_net() */ 609 down_write(&pernet_ops_rwsem); 610 rtnl_lock_unregistering_all(); 611 __rtnl_link_unregister(ops); 612 rtnl_unlock(); 613 up_write(&pernet_ops_rwsem); 614 } 615 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 616 617 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 618 { 619 struct net_device *master_dev; 620 const struct rtnl_link_ops *ops; 621 size_t size = 0; 622 623 rcu_read_lock(); 624 625 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 626 if (!master_dev) 627 goto out; 628 629 ops = master_dev->rtnl_link_ops; 630 if (!ops || !ops->get_slave_size) 631 goto out; 632 /* IFLA_INFO_SLAVE_DATA + nested data */ 633 size = nla_total_size(sizeof(struct nlattr)) + 634 ops->get_slave_size(master_dev, dev); 635 636 out: 637 rcu_read_unlock(); 638 return size; 639 } 640 641 static size_t rtnl_link_get_size(const struct net_device *dev) 642 { 643 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 644 size_t size; 645 646 if (!ops) 647 return 0; 648 649 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 650 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 651 652 if (ops->get_size) 653 /* IFLA_INFO_DATA + nested data */ 654 size += nla_total_size(sizeof(struct nlattr)) + 655 ops->get_size(dev); 656 657 if (ops->get_xstats_size) 658 /* IFLA_INFO_XSTATS */ 659 size += nla_total_size(ops->get_xstats_size(dev)); 660 661 size += rtnl_link_get_slave_info_data_size(dev); 662 663 return size; 664 } 665 666 static LIST_HEAD(rtnl_af_ops); 667 668 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 669 { 670 const struct rtnl_af_ops *ops; 671 672 ASSERT_RTNL(); 673 674 list_for_each_entry(ops, &rtnl_af_ops, list) { 675 if (ops->family == family) 676 return ops; 677 } 678 679 return NULL; 680 } 681 682 /** 683 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 684 * @ops: struct rtnl_af_ops * to register 685 * 686 * Returns 0 on success or a negative error code. 687 */ 688 void rtnl_af_register(struct rtnl_af_ops *ops) 689 { 690 rtnl_lock(); 691 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 692 rtnl_unlock(); 693 } 694 EXPORT_SYMBOL_GPL(rtnl_af_register); 695 696 /** 697 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 698 * @ops: struct rtnl_af_ops * to unregister 699 */ 700 void rtnl_af_unregister(struct rtnl_af_ops *ops) 701 { 702 rtnl_lock(); 703 list_del_rcu(&ops->list); 704 rtnl_unlock(); 705 706 synchronize_rcu(); 707 } 708 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 709 710 static size_t rtnl_link_get_af_size(const struct net_device *dev, 711 u32 ext_filter_mask) 712 { 713 struct rtnl_af_ops *af_ops; 714 size_t size; 715 716 /* IFLA_AF_SPEC */ 717 size = nla_total_size(sizeof(struct nlattr)); 718 719 rcu_read_lock(); 720 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 721 if (af_ops->get_link_af_size) { 722 /* AF_* + nested data */ 723 size += nla_total_size(sizeof(struct nlattr)) + 724 af_ops->get_link_af_size(dev, ext_filter_mask); 725 } 726 } 727 rcu_read_unlock(); 728 729 return size; 730 } 731 732 static bool rtnl_have_link_slave_info(const struct net_device *dev) 733 { 734 struct net_device *master_dev; 735 bool ret = false; 736 737 rcu_read_lock(); 738 739 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 740 if (master_dev && master_dev->rtnl_link_ops) 741 ret = true; 742 rcu_read_unlock(); 743 return ret; 744 } 745 746 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 747 const struct net_device *dev) 748 { 749 struct net_device *master_dev; 750 const struct rtnl_link_ops *ops; 751 struct nlattr *slave_data; 752 int err; 753 754 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 755 if (!master_dev) 756 return 0; 757 ops = master_dev->rtnl_link_ops; 758 if (!ops) 759 return 0; 760 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 761 return -EMSGSIZE; 762 if (ops->fill_slave_info) { 763 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 764 if (!slave_data) 765 return -EMSGSIZE; 766 err = ops->fill_slave_info(skb, master_dev, dev); 767 if (err < 0) 768 goto err_cancel_slave_data; 769 nla_nest_end(skb, slave_data); 770 } 771 return 0; 772 773 err_cancel_slave_data: 774 nla_nest_cancel(skb, slave_data); 775 return err; 776 } 777 778 static int rtnl_link_info_fill(struct sk_buff *skb, 779 const struct net_device *dev) 780 { 781 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 782 struct nlattr *data; 783 int err; 784 785 if (!ops) 786 return 0; 787 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 788 return -EMSGSIZE; 789 if (ops->fill_xstats) { 790 err = ops->fill_xstats(skb, dev); 791 if (err < 0) 792 return err; 793 } 794 if (ops->fill_info) { 795 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 796 if (data == NULL) 797 return -EMSGSIZE; 798 err = ops->fill_info(skb, dev); 799 if (err < 0) 800 goto err_cancel_data; 801 nla_nest_end(skb, data); 802 } 803 return 0; 804 805 err_cancel_data: 806 nla_nest_cancel(skb, data); 807 return err; 808 } 809 810 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 811 { 812 struct nlattr *linkinfo; 813 int err = -EMSGSIZE; 814 815 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 816 if (linkinfo == NULL) 817 goto out; 818 819 err = rtnl_link_info_fill(skb, dev); 820 if (err < 0) 821 goto err_cancel_link; 822 823 err = rtnl_link_slave_info_fill(skb, dev); 824 if (err < 0) 825 goto err_cancel_link; 826 827 nla_nest_end(skb, linkinfo); 828 return 0; 829 830 err_cancel_link: 831 nla_nest_cancel(skb, linkinfo); 832 out: 833 return err; 834 } 835 836 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 837 { 838 struct sock *rtnl = net->rtnl; 839 840 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); 841 } 842 843 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 844 { 845 struct sock *rtnl = net->rtnl; 846 847 return nlmsg_unicast(rtnl, skb, pid); 848 } 849 EXPORT_SYMBOL(rtnl_unicast); 850 851 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 852 const struct nlmsghdr *nlh, gfp_t flags) 853 { 854 struct sock *rtnl = net->rtnl; 855 856 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); 857 } 858 EXPORT_SYMBOL(rtnl_notify); 859 860 void rtnl_set_sk_err(struct net *net, u32 group, int error) 861 { 862 struct sock *rtnl = net->rtnl; 863 864 netlink_set_err(rtnl, 0, group, error); 865 } 866 EXPORT_SYMBOL(rtnl_set_sk_err); 867 868 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 869 { 870 struct nlattr *mx; 871 int i, valid = 0; 872 873 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 874 if (metrics == dst_default_metrics.metrics) 875 return 0; 876 877 mx = nla_nest_start_noflag(skb, RTA_METRICS); 878 if (mx == NULL) 879 return -ENOBUFS; 880 881 for (i = 0; i < RTAX_MAX; i++) { 882 if (metrics[i]) { 883 if (i == RTAX_CC_ALGO - 1) { 884 char tmp[TCP_CA_NAME_MAX], *name; 885 886 name = tcp_ca_get_name_by_key(metrics[i], tmp); 887 if (!name) 888 continue; 889 if (nla_put_string(skb, i + 1, name)) 890 goto nla_put_failure; 891 } else if (i == RTAX_FEATURES - 1) { 892 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 893 894 if (!user_features) 895 continue; 896 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 897 if (nla_put_u32(skb, i + 1, user_features)) 898 goto nla_put_failure; 899 } else { 900 if (nla_put_u32(skb, i + 1, metrics[i])) 901 goto nla_put_failure; 902 } 903 valid++; 904 } 905 } 906 907 if (!valid) { 908 nla_nest_cancel(skb, mx); 909 return 0; 910 } 911 912 return nla_nest_end(skb, mx); 913 914 nla_put_failure: 915 nla_nest_cancel(skb, mx); 916 return -EMSGSIZE; 917 } 918 EXPORT_SYMBOL(rtnetlink_put_metrics); 919 920 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 921 long expires, u32 error) 922 { 923 struct rta_cacheinfo ci = { 924 .rta_error = error, 925 .rta_id = id, 926 }; 927 928 if (dst) { 929 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 930 ci.rta_used = dst->__use; 931 ci.rta_clntref = rcuref_read(&dst->__rcuref); 932 } 933 if (expires) { 934 unsigned long clock; 935 936 clock = jiffies_to_clock_t(abs(expires)); 937 clock = min_t(unsigned long, clock, INT_MAX); 938 ci.rta_expires = (expires > 0) ? clock : -clock; 939 } 940 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 941 } 942 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 943 944 void netdev_set_operstate(struct net_device *dev, int newstate) 945 { 946 unsigned int old = READ_ONCE(dev->operstate); 947 948 do { 949 if (old == newstate) 950 return; 951 } while (!try_cmpxchg(&dev->operstate, &old, newstate)); 952 953 netdev_state_change(dev); 954 } 955 EXPORT_SYMBOL(netdev_set_operstate); 956 957 static void set_operstate(struct net_device *dev, unsigned char transition) 958 { 959 unsigned char operstate = READ_ONCE(dev->operstate); 960 961 switch (transition) { 962 case IF_OPER_UP: 963 if ((operstate == IF_OPER_DORMANT || 964 operstate == IF_OPER_TESTING || 965 operstate == IF_OPER_UNKNOWN) && 966 !netif_dormant(dev) && !netif_testing(dev)) 967 operstate = IF_OPER_UP; 968 break; 969 970 case IF_OPER_TESTING: 971 if (netif_oper_up(dev)) 972 operstate = IF_OPER_TESTING; 973 break; 974 975 case IF_OPER_DORMANT: 976 if (netif_oper_up(dev)) 977 operstate = IF_OPER_DORMANT; 978 break; 979 } 980 981 netdev_set_operstate(dev, operstate); 982 } 983 984 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 985 { 986 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 987 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 988 } 989 990 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 991 const struct ifinfomsg *ifm) 992 { 993 unsigned int flags = ifm->ifi_flags; 994 995 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 996 if (ifm->ifi_change) 997 flags = (flags & ifm->ifi_change) | 998 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 999 1000 return flags; 1001 } 1002 1003 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 1004 const struct rtnl_link_stats64 *b) 1005 { 1006 a->rx_packets = b->rx_packets; 1007 a->tx_packets = b->tx_packets; 1008 a->rx_bytes = b->rx_bytes; 1009 a->tx_bytes = b->tx_bytes; 1010 a->rx_errors = b->rx_errors; 1011 a->tx_errors = b->tx_errors; 1012 a->rx_dropped = b->rx_dropped; 1013 a->tx_dropped = b->tx_dropped; 1014 1015 a->multicast = b->multicast; 1016 a->collisions = b->collisions; 1017 1018 a->rx_length_errors = b->rx_length_errors; 1019 a->rx_over_errors = b->rx_over_errors; 1020 a->rx_crc_errors = b->rx_crc_errors; 1021 a->rx_frame_errors = b->rx_frame_errors; 1022 a->rx_fifo_errors = b->rx_fifo_errors; 1023 a->rx_missed_errors = b->rx_missed_errors; 1024 1025 a->tx_aborted_errors = b->tx_aborted_errors; 1026 a->tx_carrier_errors = b->tx_carrier_errors; 1027 a->tx_fifo_errors = b->tx_fifo_errors; 1028 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 1029 a->tx_window_errors = b->tx_window_errors; 1030 1031 a->rx_compressed = b->rx_compressed; 1032 a->tx_compressed = b->tx_compressed; 1033 1034 a->rx_nohandler = b->rx_nohandler; 1035 } 1036 1037 /* All VF info */ 1038 static inline int rtnl_vfinfo_size(const struct net_device *dev, 1039 u32 ext_filter_mask) 1040 { 1041 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 1042 int num_vfs = dev_num_vf(dev->dev.parent); 1043 size_t size = nla_total_size(0); 1044 size += num_vfs * 1045 (nla_total_size(0) + 1046 nla_total_size(sizeof(struct ifla_vf_mac)) + 1047 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 1048 nla_total_size(sizeof(struct ifla_vf_vlan)) + 1049 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 1050 nla_total_size(MAX_VLAN_LIST_LEN * 1051 sizeof(struct ifla_vf_vlan_info)) + 1052 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 1053 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 1054 nla_total_size(sizeof(struct ifla_vf_rate)) + 1055 nla_total_size(sizeof(struct ifla_vf_link_state)) + 1056 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 1057 nla_total_size(sizeof(struct ifla_vf_trust))); 1058 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 1059 size += num_vfs * 1060 (nla_total_size(0) + /* nest IFLA_VF_STATS */ 1061 /* IFLA_VF_STATS_RX_PACKETS */ 1062 nla_total_size_64bit(sizeof(__u64)) + 1063 /* IFLA_VF_STATS_TX_PACKETS */ 1064 nla_total_size_64bit(sizeof(__u64)) + 1065 /* IFLA_VF_STATS_RX_BYTES */ 1066 nla_total_size_64bit(sizeof(__u64)) + 1067 /* IFLA_VF_STATS_TX_BYTES */ 1068 nla_total_size_64bit(sizeof(__u64)) + 1069 /* IFLA_VF_STATS_BROADCAST */ 1070 nla_total_size_64bit(sizeof(__u64)) + 1071 /* IFLA_VF_STATS_MULTICAST */ 1072 nla_total_size_64bit(sizeof(__u64)) + 1073 /* IFLA_VF_STATS_RX_DROPPED */ 1074 nla_total_size_64bit(sizeof(__u64)) + 1075 /* IFLA_VF_STATS_TX_DROPPED */ 1076 nla_total_size_64bit(sizeof(__u64))); 1077 } 1078 return size; 1079 } else 1080 return 0; 1081 } 1082 1083 static size_t rtnl_port_size(const struct net_device *dev, 1084 u32 ext_filter_mask) 1085 { 1086 size_t port_size = nla_total_size(4) /* PORT_VF */ 1087 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 1088 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 1089 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 1090 + nla_total_size(1) /* PROT_VDP_REQUEST */ 1091 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 1092 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 1093 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 1094 + port_size; 1095 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 1096 + port_size; 1097 1098 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1099 !(ext_filter_mask & RTEXT_FILTER_VF)) 1100 return 0; 1101 if (dev_num_vf(dev->dev.parent)) 1102 return port_self_size + vf_ports_size + 1103 vf_port_size * dev_num_vf(dev->dev.parent); 1104 else 1105 return port_self_size; 1106 } 1107 1108 static size_t rtnl_xdp_size(void) 1109 { 1110 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 1111 nla_total_size(1) + /* XDP_ATTACHED */ 1112 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 1113 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 1114 1115 return xdp_size; 1116 } 1117 1118 static size_t rtnl_prop_list_size(const struct net_device *dev) 1119 { 1120 struct netdev_name_node *name_node; 1121 unsigned int cnt = 0; 1122 1123 rcu_read_lock(); 1124 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) 1125 cnt++; 1126 rcu_read_unlock(); 1127 1128 if (!cnt) 1129 return 0; 1130 1131 return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ); 1132 } 1133 1134 static size_t rtnl_proto_down_size(const struct net_device *dev) 1135 { 1136 size_t size = nla_total_size(1); 1137 1138 /* Assume dev->proto_down_reason is not zero. */ 1139 size += nla_total_size(0) + nla_total_size(4); 1140 1141 return size; 1142 } 1143 1144 static size_t rtnl_devlink_port_size(const struct net_device *dev) 1145 { 1146 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */ 1147 1148 if (dev->devlink_port) 1149 size += devlink_nl_port_handle_size(dev->devlink_port); 1150 1151 return size; 1152 } 1153 1154 static size_t rtnl_dpll_pin_size(const struct net_device *dev) 1155 { 1156 size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */ 1157 1158 size += dpll_netdev_pin_handle_size(dev); 1159 1160 return size; 1161 } 1162 1163 static noinline size_t if_nlmsg_size(const struct net_device *dev, 1164 u32 ext_filter_mask) 1165 { 1166 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1167 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1168 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1169 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1170 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1171 + nla_total_size(sizeof(struct rtnl_link_stats)) 1172 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1173 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1174 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1175 + nla_total_size(4) /* IFLA_TXQLEN */ 1176 + nla_total_size(4) /* IFLA_WEIGHT */ 1177 + nla_total_size(4) /* IFLA_MTU */ 1178 + nla_total_size(4) /* IFLA_LINK */ 1179 + nla_total_size(4) /* IFLA_MASTER */ 1180 + nla_total_size(1) /* IFLA_CARRIER */ 1181 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1182 + nla_total_size(4) /* IFLA_ALLMULTI */ 1183 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1184 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1185 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1186 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1187 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ 1188 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */ 1189 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */ 1190 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */ 1191 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ 1192 + nla_total_size(1) /* IFLA_OPERSTATE */ 1193 + nla_total_size(1) /* IFLA_LINKMODE */ 1194 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1195 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1196 + nla_total_size(4) /* IFLA_GROUP */ 1197 + nla_total_size(ext_filter_mask 1198 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1199 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1200 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1201 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1202 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1203 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1204 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1205 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1206 + rtnl_xdp_size() /* IFLA_XDP */ 1207 + nla_total_size(4) /* IFLA_EVENT */ 1208 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1209 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1210 + rtnl_proto_down_size(dev) /* proto down */ 1211 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1212 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1213 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1214 + nla_total_size(4) /* IFLA_MIN_MTU */ 1215 + nla_total_size(4) /* IFLA_MAX_MTU */ 1216 + rtnl_prop_list_size(dev) 1217 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ 1218 + rtnl_devlink_port_size(dev) 1219 + rtnl_dpll_pin_size(dev) 1220 + nla_total_size(8) /* IFLA_MAX_PACING_OFFLOAD_HORIZON */ 1221 + 0; 1222 } 1223 1224 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1225 { 1226 struct nlattr *vf_ports; 1227 struct nlattr *vf_port; 1228 int vf; 1229 int err; 1230 1231 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1232 if (!vf_ports) 1233 return -EMSGSIZE; 1234 1235 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1236 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1237 if (!vf_port) 1238 goto nla_put_failure; 1239 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1240 goto nla_put_failure; 1241 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1242 if (err == -EMSGSIZE) 1243 goto nla_put_failure; 1244 if (err) { 1245 nla_nest_cancel(skb, vf_port); 1246 continue; 1247 } 1248 nla_nest_end(skb, vf_port); 1249 } 1250 1251 nla_nest_end(skb, vf_ports); 1252 1253 return 0; 1254 1255 nla_put_failure: 1256 nla_nest_cancel(skb, vf_ports); 1257 return -EMSGSIZE; 1258 } 1259 1260 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1261 { 1262 struct nlattr *port_self; 1263 int err; 1264 1265 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1266 if (!port_self) 1267 return -EMSGSIZE; 1268 1269 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1270 if (err) { 1271 nla_nest_cancel(skb, port_self); 1272 return (err == -EMSGSIZE) ? err : 0; 1273 } 1274 1275 nla_nest_end(skb, port_self); 1276 1277 return 0; 1278 } 1279 1280 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1281 u32 ext_filter_mask) 1282 { 1283 int err; 1284 1285 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1286 !(ext_filter_mask & RTEXT_FILTER_VF)) 1287 return 0; 1288 1289 err = rtnl_port_self_fill(skb, dev); 1290 if (err) 1291 return err; 1292 1293 if (dev_num_vf(dev->dev.parent)) { 1294 err = rtnl_vf_ports_fill(skb, dev); 1295 if (err) 1296 return err; 1297 } 1298 1299 return 0; 1300 } 1301 1302 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1303 { 1304 int err; 1305 struct netdev_phys_item_id ppid; 1306 1307 err = dev_get_phys_port_id(dev, &ppid); 1308 if (err) { 1309 if (err == -EOPNOTSUPP) 1310 return 0; 1311 return err; 1312 } 1313 1314 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1315 return -EMSGSIZE; 1316 1317 return 0; 1318 } 1319 1320 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1321 { 1322 char name[IFNAMSIZ]; 1323 int err; 1324 1325 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1326 if (err) { 1327 if (err == -EOPNOTSUPP) 1328 return 0; 1329 return err; 1330 } 1331 1332 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1333 return -EMSGSIZE; 1334 1335 return 0; 1336 } 1337 1338 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1339 { 1340 struct netdev_phys_item_id ppid = { }; 1341 int err; 1342 1343 err = dev_get_port_parent_id(dev, &ppid, false); 1344 if (err) { 1345 if (err == -EOPNOTSUPP) 1346 return 0; 1347 return err; 1348 } 1349 1350 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1351 return -EMSGSIZE; 1352 1353 return 0; 1354 } 1355 1356 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1357 struct net_device *dev) 1358 { 1359 struct rtnl_link_stats64 *sp; 1360 struct nlattr *attr; 1361 1362 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1363 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1364 if (!attr) 1365 return -EMSGSIZE; 1366 1367 sp = nla_data(attr); 1368 dev_get_stats(dev, sp); 1369 1370 attr = nla_reserve(skb, IFLA_STATS, 1371 sizeof(struct rtnl_link_stats)); 1372 if (!attr) 1373 return -EMSGSIZE; 1374 1375 copy_rtnl_link_stats(nla_data(attr), sp); 1376 1377 return 0; 1378 } 1379 1380 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1381 struct net_device *dev, 1382 int vfs_num, 1383 u32 ext_filter_mask) 1384 { 1385 struct ifla_vf_rss_query_en vf_rss_query_en; 1386 struct nlattr *vf, *vfstats, *vfvlanlist; 1387 struct ifla_vf_link_state vf_linkstate; 1388 struct ifla_vf_vlan_info vf_vlan_info; 1389 struct ifla_vf_spoofchk vf_spoofchk; 1390 struct ifla_vf_tx_rate vf_tx_rate; 1391 struct ifla_vf_stats vf_stats; 1392 struct ifla_vf_trust vf_trust; 1393 struct ifla_vf_vlan vf_vlan; 1394 struct ifla_vf_rate vf_rate; 1395 struct ifla_vf_mac vf_mac; 1396 struct ifla_vf_broadcast vf_broadcast; 1397 struct ifla_vf_info ivi; 1398 struct ifla_vf_guid node_guid; 1399 struct ifla_vf_guid port_guid; 1400 1401 memset(&ivi, 0, sizeof(ivi)); 1402 1403 /* Not all SR-IOV capable drivers support the 1404 * spoofcheck and "RSS query enable" query. Preset to 1405 * -1 so the user space tool can detect that the driver 1406 * didn't report anything. 1407 */ 1408 ivi.spoofchk = -1; 1409 ivi.rss_query_en = -1; 1410 ivi.trusted = -1; 1411 /* The default value for VF link state is "auto" 1412 * IFLA_VF_LINK_STATE_AUTO which equals zero 1413 */ 1414 ivi.linkstate = 0; 1415 /* VLAN Protocol by default is 802.1Q */ 1416 ivi.vlan_proto = htons(ETH_P_8021Q); 1417 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1418 return 0; 1419 1420 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1421 memset(&node_guid, 0, sizeof(node_guid)); 1422 memset(&port_guid, 0, sizeof(port_guid)); 1423 1424 vf_mac.vf = 1425 vf_vlan.vf = 1426 vf_vlan_info.vf = 1427 vf_rate.vf = 1428 vf_tx_rate.vf = 1429 vf_spoofchk.vf = 1430 vf_linkstate.vf = 1431 vf_rss_query_en.vf = 1432 vf_trust.vf = 1433 node_guid.vf = 1434 port_guid.vf = ivi.vf; 1435 1436 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1437 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1438 vf_vlan.vlan = ivi.vlan; 1439 vf_vlan.qos = ivi.qos; 1440 vf_vlan_info.vlan = ivi.vlan; 1441 vf_vlan_info.qos = ivi.qos; 1442 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1443 vf_tx_rate.rate = ivi.max_tx_rate; 1444 vf_rate.min_tx_rate = ivi.min_tx_rate; 1445 vf_rate.max_tx_rate = ivi.max_tx_rate; 1446 vf_spoofchk.setting = ivi.spoofchk; 1447 vf_linkstate.link_state = ivi.linkstate; 1448 vf_rss_query_en.setting = ivi.rss_query_en; 1449 vf_trust.setting = ivi.trusted; 1450 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1451 if (!vf) 1452 return -EMSGSIZE; 1453 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1454 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1455 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1456 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1457 &vf_rate) || 1458 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1459 &vf_tx_rate) || 1460 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1461 &vf_spoofchk) || 1462 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1463 &vf_linkstate) || 1464 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1465 sizeof(vf_rss_query_en), 1466 &vf_rss_query_en) || 1467 nla_put(skb, IFLA_VF_TRUST, 1468 sizeof(vf_trust), &vf_trust)) 1469 goto nla_put_vf_failure; 1470 1471 if (dev->netdev_ops->ndo_get_vf_guid && 1472 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1473 &port_guid)) { 1474 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1475 &node_guid) || 1476 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1477 &port_guid)) 1478 goto nla_put_vf_failure; 1479 } 1480 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1481 if (!vfvlanlist) 1482 goto nla_put_vf_failure; 1483 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1484 &vf_vlan_info)) { 1485 nla_nest_cancel(skb, vfvlanlist); 1486 goto nla_put_vf_failure; 1487 } 1488 nla_nest_end(skb, vfvlanlist); 1489 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { 1490 memset(&vf_stats, 0, sizeof(vf_stats)); 1491 if (dev->netdev_ops->ndo_get_vf_stats) 1492 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1493 &vf_stats); 1494 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1495 if (!vfstats) 1496 goto nla_put_vf_failure; 1497 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1498 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1499 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1500 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1501 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1502 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1503 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1504 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1505 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1506 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1507 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1508 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1509 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1510 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1511 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1512 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1513 nla_nest_cancel(skb, vfstats); 1514 goto nla_put_vf_failure; 1515 } 1516 nla_nest_end(skb, vfstats); 1517 } 1518 nla_nest_end(skb, vf); 1519 return 0; 1520 1521 nla_put_vf_failure: 1522 nla_nest_cancel(skb, vf); 1523 return -EMSGSIZE; 1524 } 1525 1526 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1527 struct net_device *dev, 1528 u32 ext_filter_mask) 1529 { 1530 struct nlattr *vfinfo; 1531 int i, num_vfs; 1532 1533 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1534 return 0; 1535 1536 num_vfs = dev_num_vf(dev->dev.parent); 1537 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1538 return -EMSGSIZE; 1539 1540 if (!dev->netdev_ops->ndo_get_vf_config) 1541 return 0; 1542 1543 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1544 if (!vfinfo) 1545 return -EMSGSIZE; 1546 1547 for (i = 0; i < num_vfs; i++) { 1548 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) { 1549 nla_nest_cancel(skb, vfinfo); 1550 return -EMSGSIZE; 1551 } 1552 } 1553 1554 nla_nest_end(skb, vfinfo); 1555 return 0; 1556 } 1557 1558 static int rtnl_fill_link_ifmap(struct sk_buff *skb, 1559 const struct net_device *dev) 1560 { 1561 struct rtnl_link_ifmap map; 1562 1563 memset(&map, 0, sizeof(map)); 1564 map.mem_start = READ_ONCE(dev->mem_start); 1565 map.mem_end = READ_ONCE(dev->mem_end); 1566 map.base_addr = READ_ONCE(dev->base_addr); 1567 map.irq = READ_ONCE(dev->irq); 1568 map.dma = READ_ONCE(dev->dma); 1569 map.port = READ_ONCE(dev->if_port); 1570 1571 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1572 return -EMSGSIZE; 1573 1574 return 0; 1575 } 1576 1577 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1578 { 1579 const struct bpf_prog *generic_xdp_prog; 1580 u32 res = 0; 1581 1582 rcu_read_lock(); 1583 generic_xdp_prog = rcu_dereference(dev->xdp_prog); 1584 if (generic_xdp_prog) 1585 res = generic_xdp_prog->aux->id; 1586 rcu_read_unlock(); 1587 1588 return res; 1589 } 1590 1591 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1592 { 1593 return dev_xdp_prog_id(dev, XDP_MODE_DRV); 1594 } 1595 1596 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1597 { 1598 return dev_xdp_prog_id(dev, XDP_MODE_HW); 1599 } 1600 1601 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1602 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1603 u32 (*get_prog_id)(struct net_device *dev)) 1604 { 1605 u32 curr_id; 1606 int err; 1607 1608 curr_id = get_prog_id(dev); 1609 if (!curr_id) 1610 return 0; 1611 1612 *prog_id = curr_id; 1613 err = nla_put_u32(skb, attr, curr_id); 1614 if (err) 1615 return err; 1616 1617 if (*mode != XDP_ATTACHED_NONE) 1618 *mode = XDP_ATTACHED_MULTI; 1619 else 1620 *mode = tgt_mode; 1621 1622 return 0; 1623 } 1624 1625 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1626 { 1627 struct nlattr *xdp; 1628 u32 prog_id; 1629 int err; 1630 u8 mode; 1631 1632 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1633 if (!xdp) 1634 return -EMSGSIZE; 1635 1636 prog_id = 0; 1637 mode = XDP_ATTACHED_NONE; 1638 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1639 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1640 if (err) 1641 goto err_cancel; 1642 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1643 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1644 if (err) 1645 goto err_cancel; 1646 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1647 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1648 if (err) 1649 goto err_cancel; 1650 1651 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1652 if (err) 1653 goto err_cancel; 1654 1655 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1656 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1657 if (err) 1658 goto err_cancel; 1659 } 1660 1661 nla_nest_end(skb, xdp); 1662 return 0; 1663 1664 err_cancel: 1665 nla_nest_cancel(skb, xdp); 1666 return err; 1667 } 1668 1669 static u32 rtnl_get_event(unsigned long event) 1670 { 1671 u32 rtnl_event_type = IFLA_EVENT_NONE; 1672 1673 switch (event) { 1674 case NETDEV_REBOOT: 1675 rtnl_event_type = IFLA_EVENT_REBOOT; 1676 break; 1677 case NETDEV_FEAT_CHANGE: 1678 rtnl_event_type = IFLA_EVENT_FEATURES; 1679 break; 1680 case NETDEV_BONDING_FAILOVER: 1681 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1682 break; 1683 case NETDEV_NOTIFY_PEERS: 1684 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1685 break; 1686 case NETDEV_RESEND_IGMP: 1687 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1688 break; 1689 case NETDEV_CHANGEINFODATA: 1690 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1691 break; 1692 default: 1693 break; 1694 } 1695 1696 return rtnl_event_type; 1697 } 1698 1699 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1700 { 1701 const struct net_device *upper_dev; 1702 int ret = 0; 1703 1704 rcu_read_lock(); 1705 1706 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1707 if (upper_dev) 1708 ret = nla_put_u32(skb, IFLA_MASTER, 1709 READ_ONCE(upper_dev->ifindex)); 1710 1711 rcu_read_unlock(); 1712 return ret; 1713 } 1714 1715 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1716 bool force) 1717 { 1718 int iflink = dev_get_iflink(dev); 1719 1720 if (force || READ_ONCE(dev->ifindex) != iflink) 1721 return nla_put_u32(skb, IFLA_LINK, iflink); 1722 1723 return 0; 1724 } 1725 1726 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1727 struct net_device *dev) 1728 { 1729 char buf[IFALIASZ]; 1730 int ret; 1731 1732 ret = dev_get_alias(dev, buf, sizeof(buf)); 1733 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1734 } 1735 1736 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1737 const struct net_device *dev, 1738 struct net *src_net, gfp_t gfp) 1739 { 1740 bool put_iflink = false; 1741 1742 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1743 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1744 1745 if (!net_eq(dev_net(dev), link_net)) { 1746 int id = peernet2id_alloc(src_net, link_net, gfp); 1747 1748 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1749 return -EMSGSIZE; 1750 1751 put_iflink = true; 1752 } 1753 } 1754 1755 return nla_put_iflink(skb, dev, put_iflink); 1756 } 1757 1758 static int rtnl_fill_link_af(struct sk_buff *skb, 1759 const struct net_device *dev, 1760 u32 ext_filter_mask) 1761 { 1762 const struct rtnl_af_ops *af_ops; 1763 struct nlattr *af_spec; 1764 1765 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1766 if (!af_spec) 1767 return -EMSGSIZE; 1768 1769 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1770 struct nlattr *af; 1771 int err; 1772 1773 if (!af_ops->fill_link_af) 1774 continue; 1775 1776 af = nla_nest_start_noflag(skb, af_ops->family); 1777 if (!af) 1778 return -EMSGSIZE; 1779 1780 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1781 /* 1782 * Caller may return ENODATA to indicate that there 1783 * was no data to be dumped. This is not an error, it 1784 * means we should trim the attribute header and 1785 * continue. 1786 */ 1787 if (err == -ENODATA) 1788 nla_nest_cancel(skb, af); 1789 else if (err < 0) 1790 return -EMSGSIZE; 1791 1792 nla_nest_end(skb, af); 1793 } 1794 1795 nla_nest_end(skb, af_spec); 1796 return 0; 1797 } 1798 1799 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1800 const struct net_device *dev) 1801 { 1802 struct netdev_name_node *name_node; 1803 int count = 0; 1804 1805 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) { 1806 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1807 return -EMSGSIZE; 1808 count++; 1809 } 1810 return count; 1811 } 1812 1813 /* RCU protected. */ 1814 static int rtnl_fill_prop_list(struct sk_buff *skb, 1815 const struct net_device *dev) 1816 { 1817 struct nlattr *prop_list; 1818 int ret; 1819 1820 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1821 if (!prop_list) 1822 return -EMSGSIZE; 1823 1824 ret = rtnl_fill_alt_ifnames(skb, dev); 1825 if (ret <= 0) 1826 goto nest_cancel; 1827 1828 nla_nest_end(skb, prop_list); 1829 return 0; 1830 1831 nest_cancel: 1832 nla_nest_cancel(skb, prop_list); 1833 return ret; 1834 } 1835 1836 static int rtnl_fill_proto_down(struct sk_buff *skb, 1837 const struct net_device *dev) 1838 { 1839 struct nlattr *pr; 1840 u32 preason; 1841 1842 if (nla_put_u8(skb, IFLA_PROTO_DOWN, READ_ONCE(dev->proto_down))) 1843 goto nla_put_failure; 1844 1845 preason = READ_ONCE(dev->proto_down_reason); 1846 if (!preason) 1847 return 0; 1848 1849 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); 1850 if (!pr) 1851 return -EMSGSIZE; 1852 1853 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { 1854 nla_nest_cancel(skb, pr); 1855 goto nla_put_failure; 1856 } 1857 1858 nla_nest_end(skb, pr); 1859 return 0; 1860 1861 nla_put_failure: 1862 return -EMSGSIZE; 1863 } 1864 1865 static int rtnl_fill_devlink_port(struct sk_buff *skb, 1866 const struct net_device *dev) 1867 { 1868 struct nlattr *devlink_port_nest; 1869 int ret; 1870 1871 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT); 1872 if (!devlink_port_nest) 1873 return -EMSGSIZE; 1874 1875 if (dev->devlink_port) { 1876 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port); 1877 if (ret < 0) 1878 goto nest_cancel; 1879 } 1880 1881 nla_nest_end(skb, devlink_port_nest); 1882 return 0; 1883 1884 nest_cancel: 1885 nla_nest_cancel(skb, devlink_port_nest); 1886 return ret; 1887 } 1888 1889 static int rtnl_fill_dpll_pin(struct sk_buff *skb, 1890 const struct net_device *dev) 1891 { 1892 struct nlattr *dpll_pin_nest; 1893 int ret; 1894 1895 dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN); 1896 if (!dpll_pin_nest) 1897 return -EMSGSIZE; 1898 1899 ret = dpll_netdev_add_pin_handle(skb, dev); 1900 if (ret < 0) 1901 goto nest_cancel; 1902 1903 nla_nest_end(skb, dpll_pin_nest); 1904 return 0; 1905 1906 nest_cancel: 1907 nla_nest_cancel(skb, dpll_pin_nest); 1908 return ret; 1909 } 1910 1911 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1912 struct net_device *dev, struct net *src_net, 1913 int type, u32 pid, u32 seq, u32 change, 1914 unsigned int flags, u32 ext_filter_mask, 1915 u32 event, int *new_nsid, int new_ifindex, 1916 int tgt_netnsid, gfp_t gfp) 1917 { 1918 char devname[IFNAMSIZ]; 1919 struct ifinfomsg *ifm; 1920 struct nlmsghdr *nlh; 1921 struct Qdisc *qdisc; 1922 1923 ASSERT_RTNL(); 1924 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1925 if (nlh == NULL) 1926 return -EMSGSIZE; 1927 1928 ifm = nlmsg_data(nlh); 1929 ifm->ifi_family = AF_UNSPEC; 1930 ifm->__ifi_pad = 0; 1931 ifm->ifi_type = READ_ONCE(dev->type); 1932 ifm->ifi_index = READ_ONCE(dev->ifindex); 1933 ifm->ifi_flags = dev_get_flags(dev); 1934 ifm->ifi_change = change; 1935 1936 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1937 goto nla_put_failure; 1938 1939 netdev_copy_name(dev, devname); 1940 if (nla_put_string(skb, IFLA_IFNAME, devname)) 1941 goto nla_put_failure; 1942 1943 if (nla_put_u32(skb, IFLA_TXQLEN, READ_ONCE(dev->tx_queue_len)) || 1944 nla_put_u8(skb, IFLA_OPERSTATE, 1945 netif_running(dev) ? READ_ONCE(dev->operstate) : 1946 IF_OPER_DOWN) || 1947 nla_put_u8(skb, IFLA_LINKMODE, READ_ONCE(dev->link_mode)) || 1948 nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) || 1949 nla_put_u32(skb, IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) || 1950 nla_put_u32(skb, IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) || 1951 nla_put_u32(skb, IFLA_GROUP, READ_ONCE(dev->group)) || 1952 nla_put_u32(skb, IFLA_PROMISCUITY, READ_ONCE(dev->promiscuity)) || 1953 nla_put_u32(skb, IFLA_ALLMULTI, READ_ONCE(dev->allmulti)) || 1954 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, 1955 READ_ONCE(dev->num_tx_queues)) || 1956 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, 1957 READ_ONCE(dev->gso_max_segs)) || 1958 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, 1959 READ_ONCE(dev->gso_max_size)) || 1960 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, 1961 READ_ONCE(dev->gro_max_size)) || 1962 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, 1963 READ_ONCE(dev->gso_ipv4_max_size)) || 1964 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, 1965 READ_ONCE(dev->gro_ipv4_max_size)) || 1966 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, 1967 READ_ONCE(dev->tso_max_size)) || 1968 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, 1969 READ_ONCE(dev->tso_max_segs)) || 1970 nla_put_uint(skb, IFLA_MAX_PACING_OFFLOAD_HORIZON, 1971 READ_ONCE(dev->max_pacing_offload_horizon)) || 1972 #ifdef CONFIG_RPS 1973 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, 1974 READ_ONCE(dev->num_rx_queues)) || 1975 #endif 1976 put_master_ifindex(skb, dev) || 1977 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1978 nla_put_ifalias(skb, dev) || 1979 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1980 atomic_read(&dev->carrier_up_count) + 1981 atomic_read(&dev->carrier_down_count)) || 1982 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1983 atomic_read(&dev->carrier_up_count)) || 1984 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1985 atomic_read(&dev->carrier_down_count))) 1986 goto nla_put_failure; 1987 1988 if (rtnl_fill_proto_down(skb, dev)) 1989 goto nla_put_failure; 1990 1991 if (event != IFLA_EVENT_NONE) { 1992 if (nla_put_u32(skb, IFLA_EVENT, event)) 1993 goto nla_put_failure; 1994 } 1995 1996 if (dev->addr_len) { 1997 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1998 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1999 goto nla_put_failure; 2000 } 2001 2002 if (rtnl_phys_port_id_fill(skb, dev)) 2003 goto nla_put_failure; 2004 2005 if (rtnl_phys_port_name_fill(skb, dev)) 2006 goto nla_put_failure; 2007 2008 if (rtnl_phys_switch_id_fill(skb, dev)) 2009 goto nla_put_failure; 2010 2011 if (rtnl_fill_stats(skb, dev)) 2012 goto nla_put_failure; 2013 2014 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 2015 goto nla_put_failure; 2016 2017 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 2018 goto nla_put_failure; 2019 2020 if (rtnl_xdp_fill(skb, dev)) 2021 goto nla_put_failure; 2022 2023 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 2024 if (rtnl_link_fill(skb, dev) < 0) 2025 goto nla_put_failure; 2026 } 2027 2028 if (new_nsid && 2029 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 2030 goto nla_put_failure; 2031 if (new_ifindex && 2032 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 2033 goto nla_put_failure; 2034 2035 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) && 2036 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr)) 2037 goto nla_put_failure; 2038 2039 rcu_read_lock(); 2040 if (rtnl_fill_link_netnsid(skb, dev, src_net, GFP_ATOMIC)) 2041 goto nla_put_failure_rcu; 2042 qdisc = rcu_dereference(dev->qdisc); 2043 if (qdisc && nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) 2044 goto nla_put_failure_rcu; 2045 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 2046 goto nla_put_failure_rcu; 2047 if (rtnl_fill_link_ifmap(skb, dev)) 2048 goto nla_put_failure_rcu; 2049 if (rtnl_fill_prop_list(skb, dev)) 2050 goto nla_put_failure_rcu; 2051 rcu_read_unlock(); 2052 2053 if (dev->dev.parent && 2054 nla_put_string(skb, IFLA_PARENT_DEV_NAME, 2055 dev_name(dev->dev.parent))) 2056 goto nla_put_failure; 2057 2058 if (dev->dev.parent && dev->dev.parent->bus && 2059 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, 2060 dev->dev.parent->bus->name)) 2061 goto nla_put_failure; 2062 2063 if (rtnl_fill_devlink_port(skb, dev)) 2064 goto nla_put_failure; 2065 2066 if (rtnl_fill_dpll_pin(skb, dev)) 2067 goto nla_put_failure; 2068 2069 nlmsg_end(skb, nlh); 2070 return 0; 2071 2072 nla_put_failure_rcu: 2073 rcu_read_unlock(); 2074 nla_put_failure: 2075 nlmsg_cancel(skb, nlh); 2076 return -EMSGSIZE; 2077 } 2078 2079 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 2080 [IFLA_UNSPEC] = { .strict_start_type = IFLA_DPLL_PIN }, 2081 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 2082 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 2083 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 2084 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 2085 [IFLA_MTU] = { .type = NLA_U32 }, 2086 [IFLA_LINK] = { .type = NLA_U32 }, 2087 [IFLA_MASTER] = { .type = NLA_U32 }, 2088 [IFLA_CARRIER] = { .type = NLA_U8 }, 2089 [IFLA_TXQLEN] = { .type = NLA_U32 }, 2090 [IFLA_WEIGHT] = { .type = NLA_U32 }, 2091 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 2092 [IFLA_LINKMODE] = { .type = NLA_U8 }, 2093 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 2094 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 2095 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 2096 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 2097 * allow 0-length string (needed to remove an alias). 2098 */ 2099 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 2100 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 2101 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 2102 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 2103 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 2104 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 2105 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 2106 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 2107 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 2108 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 2109 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 2110 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 2111 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 2112 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 2113 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 2114 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 2115 [IFLA_XDP] = { .type = NLA_NESTED }, 2116 [IFLA_EVENT] = { .type = NLA_U32 }, 2117 [IFLA_GROUP] = { .type = NLA_U32 }, 2118 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 2119 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 2120 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 2121 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 2122 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 2123 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 2124 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 2125 .len = ALTIFNAMSIZ - 1 }, 2126 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, 2127 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, 2128 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 2129 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, 2130 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, 2131 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, 2132 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, 2133 [IFLA_ALLMULTI] = { .type = NLA_REJECT }, 2134 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 2135 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, 2136 }; 2137 2138 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 2139 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 2140 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 2141 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 2142 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 2143 }; 2144 2145 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 2146 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 2147 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 2148 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 2149 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 2150 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 2151 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 2152 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 2153 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 2154 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 2155 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 2156 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 2157 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2158 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 2159 }; 2160 2161 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 2162 [IFLA_PORT_VF] = { .type = NLA_U32 }, 2163 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 2164 .len = PORT_PROFILE_MAX }, 2165 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 2166 .len = PORT_UUID_MAX }, 2167 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 2168 .len = PORT_UUID_MAX }, 2169 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 2170 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 2171 2172 /* Unused, but we need to keep it here since user space could 2173 * fill it. It's also broken with regard to NLA_BINARY use in 2174 * combination with structs. 2175 */ 2176 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 2177 .len = sizeof(struct ifla_port_vsi) }, 2178 }; 2179 2180 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 2181 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, 2182 [IFLA_XDP_FD] = { .type = NLA_S32 }, 2183 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, 2184 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 2185 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 2186 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 2187 }; 2188 2189 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 2190 { 2191 const struct rtnl_link_ops *ops = NULL; 2192 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 2193 2194 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 2195 return NULL; 2196 2197 if (linfo[IFLA_INFO_KIND]) { 2198 char kind[MODULE_NAME_LEN]; 2199 2200 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 2201 ops = rtnl_link_ops_get(kind); 2202 } 2203 2204 return ops; 2205 } 2206 2207 static bool link_master_filtered(struct net_device *dev, int master_idx) 2208 { 2209 struct net_device *master; 2210 2211 if (!master_idx) 2212 return false; 2213 2214 master = netdev_master_upper_dev_get(dev); 2215 2216 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need 2217 * another invalid value for ifindex to denote "no master". 2218 */ 2219 if (master_idx == -1) 2220 return !!master; 2221 2222 if (!master || master->ifindex != master_idx) 2223 return true; 2224 2225 return false; 2226 } 2227 2228 static bool link_kind_filtered(const struct net_device *dev, 2229 const struct rtnl_link_ops *kind_ops) 2230 { 2231 if (kind_ops && dev->rtnl_link_ops != kind_ops) 2232 return true; 2233 2234 return false; 2235 } 2236 2237 static bool link_dump_filtered(struct net_device *dev, 2238 int master_idx, 2239 const struct rtnl_link_ops *kind_ops) 2240 { 2241 if (link_master_filtered(dev, master_idx) || 2242 link_kind_filtered(dev, kind_ops)) 2243 return true; 2244 2245 return false; 2246 } 2247 2248 /** 2249 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 2250 * @sk: netlink socket 2251 * @netnsid: network namespace identifier 2252 * 2253 * Returns the network namespace identified by netnsid on success or an error 2254 * pointer on failure. 2255 */ 2256 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 2257 { 2258 struct net *net; 2259 2260 net = get_net_ns_by_id(sock_net(sk), netnsid); 2261 if (!net) 2262 return ERR_PTR(-EINVAL); 2263 2264 /* For now, the caller is required to have CAP_NET_ADMIN in 2265 * the user namespace owning the target net ns. 2266 */ 2267 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 2268 put_net(net); 2269 return ERR_PTR(-EACCES); 2270 } 2271 return net; 2272 } 2273 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 2274 2275 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 2276 bool strict_check, struct nlattr **tb, 2277 struct netlink_ext_ack *extack) 2278 { 2279 int hdrlen; 2280 2281 if (strict_check) { 2282 struct ifinfomsg *ifm; 2283 2284 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2285 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 2286 return -EINVAL; 2287 } 2288 2289 ifm = nlmsg_data(nlh); 2290 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2291 ifm->ifi_change) { 2292 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 2293 return -EINVAL; 2294 } 2295 if (ifm->ifi_index) { 2296 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 2297 return -EINVAL; 2298 } 2299 2300 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 2301 IFLA_MAX, ifla_policy, 2302 extack); 2303 } 2304 2305 /* A hack to preserve kernel<->userspace interface. 2306 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 2307 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 2308 * what iproute2 < v3.9.0 used. 2309 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 2310 * attribute, its netlink message is shorter than struct ifinfomsg. 2311 */ 2312 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 2313 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 2314 2315 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 2316 extack); 2317 } 2318 2319 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 2320 { 2321 const struct rtnl_link_ops *kind_ops = NULL; 2322 struct netlink_ext_ack *extack = cb->extack; 2323 const struct nlmsghdr *nlh = cb->nlh; 2324 struct net *net = sock_net(skb->sk); 2325 unsigned int flags = NLM_F_MULTI; 2326 struct nlattr *tb[IFLA_MAX+1]; 2327 struct { 2328 unsigned long ifindex; 2329 } *ctx = (void *)cb->ctx; 2330 struct net *tgt_net = net; 2331 u32 ext_filter_mask = 0; 2332 struct net_device *dev; 2333 int master_idx = 0; 2334 int netnsid = -1; 2335 int err, i; 2336 2337 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2338 if (err < 0) { 2339 if (cb->strict_check) 2340 return err; 2341 2342 goto walk_entries; 2343 } 2344 2345 for (i = 0; i <= IFLA_MAX; ++i) { 2346 if (!tb[i]) 2347 continue; 2348 2349 /* new attributes should only be added with strict checking */ 2350 switch (i) { 2351 case IFLA_TARGET_NETNSID: 2352 netnsid = nla_get_s32(tb[i]); 2353 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2354 if (IS_ERR(tgt_net)) { 2355 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2356 return PTR_ERR(tgt_net); 2357 } 2358 break; 2359 case IFLA_EXT_MASK: 2360 ext_filter_mask = nla_get_u32(tb[i]); 2361 break; 2362 case IFLA_MASTER: 2363 master_idx = nla_get_u32(tb[i]); 2364 break; 2365 case IFLA_LINKINFO: 2366 kind_ops = linkinfo_to_kind_ops(tb[i]); 2367 break; 2368 default: 2369 if (cb->strict_check) { 2370 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2371 return -EINVAL; 2372 } 2373 } 2374 } 2375 2376 if (master_idx || kind_ops) 2377 flags |= NLM_F_DUMP_FILTERED; 2378 2379 walk_entries: 2380 err = 0; 2381 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) { 2382 if (link_dump_filtered(dev, master_idx, kind_ops)) 2383 continue; 2384 err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK, 2385 NETLINK_CB(cb->skb).portid, 2386 nlh->nlmsg_seq, 0, flags, 2387 ext_filter_mask, 0, NULL, 0, 2388 netnsid, GFP_KERNEL); 2389 if (err < 0) 2390 break; 2391 } 2392 cb->seq = tgt_net->dev_base_seq; 2393 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2394 if (netnsid >= 0) 2395 put_net(tgt_net); 2396 2397 return err; 2398 } 2399 2400 int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, 2401 struct netlink_ext_ack *exterr) 2402 { 2403 const struct ifinfomsg *ifmp; 2404 const struct nlattr *attrs; 2405 size_t len; 2406 2407 ifmp = nla_data(nla_peer); 2408 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); 2409 len = nla_len(nla_peer) - sizeof(struct ifinfomsg); 2410 2411 if (ifmp->ifi_index < 0) { 2412 NL_SET_ERR_MSG_ATTR(exterr, nla_peer, 2413 "ifindex can't be negative"); 2414 return -EINVAL; 2415 } 2416 2417 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, 2418 exterr); 2419 } 2420 EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); 2421 2422 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2423 { 2424 struct net *net; 2425 /* Examine the link attributes and figure out which 2426 * network namespace we are talking about. 2427 */ 2428 if (tb[IFLA_NET_NS_PID]) 2429 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2430 else if (tb[IFLA_NET_NS_FD]) 2431 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2432 else 2433 net = get_net(src_net); 2434 return net; 2435 } 2436 EXPORT_SYMBOL(rtnl_link_get_net); 2437 2438 /* Figure out which network namespace we are talking about by 2439 * examining the link attributes in the following order: 2440 * 2441 * 1. IFLA_NET_NS_PID 2442 * 2. IFLA_NET_NS_FD 2443 * 3. IFLA_TARGET_NETNSID 2444 */ 2445 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2446 struct nlattr *tb[]) 2447 { 2448 struct net *net; 2449 2450 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2451 return rtnl_link_get_net(src_net, tb); 2452 2453 if (!tb[IFLA_TARGET_NETNSID]) 2454 return get_net(src_net); 2455 2456 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2457 if (!net) 2458 return ERR_PTR(-EINVAL); 2459 2460 return net; 2461 } 2462 2463 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2464 struct net *src_net, 2465 struct nlattr *tb[], int cap) 2466 { 2467 struct net *net; 2468 2469 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2470 if (IS_ERR(net)) 2471 return net; 2472 2473 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2474 put_net(net); 2475 return ERR_PTR(-EPERM); 2476 } 2477 2478 return net; 2479 } 2480 2481 /* Verify that rtnetlink requests do not pass additional properties 2482 * potentially referring to different network namespaces. 2483 */ 2484 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2485 struct netlink_ext_ack *extack, 2486 bool netns_id_only) 2487 { 2488 2489 if (netns_id_only) { 2490 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2491 return 0; 2492 2493 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2494 return -EOPNOTSUPP; 2495 } 2496 2497 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2498 goto invalid_attr; 2499 2500 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2501 goto invalid_attr; 2502 2503 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2504 goto invalid_attr; 2505 2506 return 0; 2507 2508 invalid_attr: 2509 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2510 return -EINVAL; 2511 } 2512 2513 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2514 int max_tx_rate) 2515 { 2516 const struct net_device_ops *ops = dev->netdev_ops; 2517 2518 if (!ops->ndo_set_vf_rate) 2519 return -EOPNOTSUPP; 2520 if (max_tx_rate && max_tx_rate < min_tx_rate) 2521 return -EINVAL; 2522 2523 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate); 2524 } 2525 2526 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], 2527 struct netlink_ext_ack *extack) 2528 { 2529 if (tb[IFLA_ADDRESS] && 2530 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2531 return -EINVAL; 2532 2533 if (tb[IFLA_BROADCAST] && 2534 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2535 return -EINVAL; 2536 2537 if (tb[IFLA_GSO_MAX_SIZE] && 2538 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { 2539 NL_SET_ERR_MSG(extack, "too big gso_max_size"); 2540 return -EINVAL; 2541 } 2542 2543 if (tb[IFLA_GSO_MAX_SEGS] && 2544 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || 2545 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { 2546 NL_SET_ERR_MSG(extack, "too big gso_max_segs"); 2547 return -EINVAL; 2548 } 2549 2550 if (tb[IFLA_GRO_MAX_SIZE] && 2551 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { 2552 NL_SET_ERR_MSG(extack, "too big gro_max_size"); 2553 return -EINVAL; 2554 } 2555 2556 if (tb[IFLA_GSO_IPV4_MAX_SIZE] && 2557 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { 2558 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); 2559 return -EINVAL; 2560 } 2561 2562 if (tb[IFLA_GRO_IPV4_MAX_SIZE] && 2563 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { 2564 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size"); 2565 return -EINVAL; 2566 } 2567 2568 if (tb[IFLA_AF_SPEC]) { 2569 struct nlattr *af; 2570 int rem, err; 2571 2572 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2573 const struct rtnl_af_ops *af_ops; 2574 2575 af_ops = rtnl_af_lookup(nla_type(af)); 2576 if (!af_ops) 2577 return -EAFNOSUPPORT; 2578 2579 if (!af_ops->set_link_af) 2580 return -EOPNOTSUPP; 2581 2582 if (af_ops->validate_link_af) { 2583 err = af_ops->validate_link_af(dev, af, extack); 2584 if (err < 0) 2585 return err; 2586 } 2587 } 2588 } 2589 2590 return 0; 2591 } 2592 2593 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2594 int guid_type) 2595 { 2596 const struct net_device_ops *ops = dev->netdev_ops; 2597 2598 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2599 } 2600 2601 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2602 { 2603 if (dev->type != ARPHRD_INFINIBAND) 2604 return -EOPNOTSUPP; 2605 2606 return handle_infiniband_guid(dev, ivt, guid_type); 2607 } 2608 2609 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2610 { 2611 const struct net_device_ops *ops = dev->netdev_ops; 2612 int err = -EINVAL; 2613 2614 if (tb[IFLA_VF_MAC]) { 2615 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2616 2617 if (ivm->vf >= INT_MAX) 2618 return -EINVAL; 2619 err = -EOPNOTSUPP; 2620 if (ops->ndo_set_vf_mac) 2621 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2622 ivm->mac); 2623 if (err < 0) 2624 return err; 2625 } 2626 2627 if (tb[IFLA_VF_VLAN]) { 2628 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2629 2630 if (ivv->vf >= INT_MAX) 2631 return -EINVAL; 2632 err = -EOPNOTSUPP; 2633 if (ops->ndo_set_vf_vlan) 2634 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2635 ivv->qos, 2636 htons(ETH_P_8021Q)); 2637 if (err < 0) 2638 return err; 2639 } 2640 2641 if (tb[IFLA_VF_VLAN_LIST]) { 2642 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2643 struct nlattr *attr; 2644 int rem, len = 0; 2645 2646 err = -EOPNOTSUPP; 2647 if (!ops->ndo_set_vf_vlan) 2648 return err; 2649 2650 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2651 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2652 nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) { 2653 return -EINVAL; 2654 } 2655 if (len >= MAX_VLAN_LIST_LEN) 2656 return -EOPNOTSUPP; 2657 ivvl[len] = nla_data(attr); 2658 2659 len++; 2660 } 2661 if (len == 0) 2662 return -EINVAL; 2663 2664 if (ivvl[0]->vf >= INT_MAX) 2665 return -EINVAL; 2666 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2667 ivvl[0]->qos, ivvl[0]->vlan_proto); 2668 if (err < 0) 2669 return err; 2670 } 2671 2672 if (tb[IFLA_VF_TX_RATE]) { 2673 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2674 struct ifla_vf_info ivf; 2675 2676 if (ivt->vf >= INT_MAX) 2677 return -EINVAL; 2678 err = -EOPNOTSUPP; 2679 if (ops->ndo_get_vf_config) 2680 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2681 if (err < 0) 2682 return err; 2683 2684 err = rtnl_set_vf_rate(dev, ivt->vf, 2685 ivf.min_tx_rate, ivt->rate); 2686 if (err < 0) 2687 return err; 2688 } 2689 2690 if (tb[IFLA_VF_RATE]) { 2691 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2692 2693 if (ivt->vf >= INT_MAX) 2694 return -EINVAL; 2695 2696 err = rtnl_set_vf_rate(dev, ivt->vf, 2697 ivt->min_tx_rate, ivt->max_tx_rate); 2698 if (err < 0) 2699 return err; 2700 } 2701 2702 if (tb[IFLA_VF_SPOOFCHK]) { 2703 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2704 2705 if (ivs->vf >= INT_MAX) 2706 return -EINVAL; 2707 err = -EOPNOTSUPP; 2708 if (ops->ndo_set_vf_spoofchk) 2709 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2710 ivs->setting); 2711 if (err < 0) 2712 return err; 2713 } 2714 2715 if (tb[IFLA_VF_LINK_STATE]) { 2716 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2717 2718 if (ivl->vf >= INT_MAX) 2719 return -EINVAL; 2720 err = -EOPNOTSUPP; 2721 if (ops->ndo_set_vf_link_state) 2722 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2723 ivl->link_state); 2724 if (err < 0) 2725 return err; 2726 } 2727 2728 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2729 struct ifla_vf_rss_query_en *ivrssq_en; 2730 2731 err = -EOPNOTSUPP; 2732 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2733 if (ivrssq_en->vf >= INT_MAX) 2734 return -EINVAL; 2735 if (ops->ndo_set_vf_rss_query_en) 2736 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2737 ivrssq_en->setting); 2738 if (err < 0) 2739 return err; 2740 } 2741 2742 if (tb[IFLA_VF_TRUST]) { 2743 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2744 2745 if (ivt->vf >= INT_MAX) 2746 return -EINVAL; 2747 err = -EOPNOTSUPP; 2748 if (ops->ndo_set_vf_trust) 2749 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2750 if (err < 0) 2751 return err; 2752 } 2753 2754 if (tb[IFLA_VF_IB_NODE_GUID]) { 2755 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2756 2757 if (ivt->vf >= INT_MAX) 2758 return -EINVAL; 2759 if (!ops->ndo_set_vf_guid) 2760 return -EOPNOTSUPP; 2761 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2762 } 2763 2764 if (tb[IFLA_VF_IB_PORT_GUID]) { 2765 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2766 2767 if (ivt->vf >= INT_MAX) 2768 return -EINVAL; 2769 if (!ops->ndo_set_vf_guid) 2770 return -EOPNOTSUPP; 2771 2772 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2773 } 2774 2775 return err; 2776 } 2777 2778 static int do_set_master(struct net_device *dev, int ifindex, 2779 struct netlink_ext_ack *extack) 2780 { 2781 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2782 const struct net_device_ops *ops; 2783 int err; 2784 2785 if (upper_dev) { 2786 if (upper_dev->ifindex == ifindex) 2787 return 0; 2788 ops = upper_dev->netdev_ops; 2789 if (ops->ndo_del_slave) { 2790 err = ops->ndo_del_slave(upper_dev, dev); 2791 if (err) 2792 return err; 2793 } else { 2794 return -EOPNOTSUPP; 2795 } 2796 } 2797 2798 if (ifindex) { 2799 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2800 if (!upper_dev) 2801 return -EINVAL; 2802 ops = upper_dev->netdev_ops; 2803 if (ops->ndo_add_slave) { 2804 err = ops->ndo_add_slave(upper_dev, dev, extack); 2805 if (err) 2806 return err; 2807 } else { 2808 return -EOPNOTSUPP; 2809 } 2810 } 2811 return 0; 2812 } 2813 2814 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { 2815 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, 2816 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, 2817 }; 2818 2819 static int do_set_proto_down(struct net_device *dev, 2820 struct nlattr *nl_proto_down, 2821 struct nlattr *nl_proto_down_reason, 2822 struct netlink_ext_ack *extack) 2823 { 2824 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; 2825 unsigned long mask = 0; 2826 u32 value; 2827 bool proto_down; 2828 int err; 2829 2830 if (!dev->change_proto_down) { 2831 NL_SET_ERR_MSG(extack, "Protodown not supported by device"); 2832 return -EOPNOTSUPP; 2833 } 2834 2835 if (nl_proto_down_reason) { 2836 err = nla_parse_nested_deprecated(pdreason, 2837 IFLA_PROTO_DOWN_REASON_MAX, 2838 nl_proto_down_reason, 2839 ifla_proto_down_reason_policy, 2840 NULL); 2841 if (err < 0) 2842 return err; 2843 2844 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { 2845 NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); 2846 return -EINVAL; 2847 } 2848 2849 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); 2850 2851 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) 2852 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); 2853 2854 dev_change_proto_down_reason(dev, mask, value); 2855 } 2856 2857 if (nl_proto_down) { 2858 proto_down = nla_get_u8(nl_proto_down); 2859 2860 /* Don't turn off protodown if there are active reasons */ 2861 if (!proto_down && dev->proto_down_reason) { 2862 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); 2863 return -EBUSY; 2864 } 2865 err = dev_change_proto_down(dev, 2866 proto_down); 2867 if (err) 2868 return err; 2869 } 2870 2871 return 0; 2872 } 2873 2874 #define DO_SETLINK_MODIFIED 0x01 2875 /* notify flag means notify + modified. */ 2876 #define DO_SETLINK_NOTIFY 0x03 2877 static int do_setlink(const struct sk_buff *skb, 2878 struct net_device *dev, struct ifinfomsg *ifm, 2879 struct netlink_ext_ack *extack, 2880 struct nlattr **tb, int status) 2881 { 2882 const struct net_device_ops *ops = dev->netdev_ops; 2883 char ifname[IFNAMSIZ]; 2884 int err; 2885 2886 if (tb[IFLA_IFNAME]) 2887 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2888 else 2889 ifname[0] = '\0'; 2890 2891 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2892 const char *pat = ifname[0] ? ifname : NULL; 2893 struct net *net; 2894 int new_ifindex; 2895 2896 net = rtnl_link_get_net_capable(skb, dev_net(dev), 2897 tb, CAP_NET_ADMIN); 2898 if (IS_ERR(net)) { 2899 err = PTR_ERR(net); 2900 goto errout; 2901 } 2902 2903 if (tb[IFLA_NEW_IFINDEX]) 2904 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); 2905 else 2906 new_ifindex = 0; 2907 2908 err = __dev_change_net_namespace(dev, net, pat, new_ifindex); 2909 put_net(net); 2910 if (err) 2911 goto errout; 2912 status |= DO_SETLINK_MODIFIED; 2913 } 2914 2915 if (tb[IFLA_MAP]) { 2916 struct rtnl_link_ifmap *u_map; 2917 struct ifmap k_map; 2918 2919 if (!ops->ndo_set_config) { 2920 err = -EOPNOTSUPP; 2921 goto errout; 2922 } 2923 2924 if (!netif_device_present(dev)) { 2925 err = -ENODEV; 2926 goto errout; 2927 } 2928 2929 u_map = nla_data(tb[IFLA_MAP]); 2930 k_map.mem_start = (unsigned long) u_map->mem_start; 2931 k_map.mem_end = (unsigned long) u_map->mem_end; 2932 k_map.base_addr = (unsigned short) u_map->base_addr; 2933 k_map.irq = (unsigned char) u_map->irq; 2934 k_map.dma = (unsigned char) u_map->dma; 2935 k_map.port = (unsigned char) u_map->port; 2936 2937 err = ops->ndo_set_config(dev, &k_map); 2938 if (err < 0) 2939 goto errout; 2940 2941 status |= DO_SETLINK_NOTIFY; 2942 } 2943 2944 if (tb[IFLA_ADDRESS]) { 2945 struct sockaddr *sa; 2946 int len; 2947 2948 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2949 sizeof(*sa)); 2950 sa = kmalloc(len, GFP_KERNEL); 2951 if (!sa) { 2952 err = -ENOMEM; 2953 goto errout; 2954 } 2955 sa->sa_family = dev->type; 2956 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2957 dev->addr_len); 2958 err = dev_set_mac_address_user(dev, sa, extack); 2959 kfree(sa); 2960 if (err) 2961 goto errout; 2962 status |= DO_SETLINK_MODIFIED; 2963 } 2964 2965 if (tb[IFLA_MTU]) { 2966 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2967 if (err < 0) 2968 goto errout; 2969 status |= DO_SETLINK_MODIFIED; 2970 } 2971 2972 if (tb[IFLA_GROUP]) { 2973 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2974 status |= DO_SETLINK_NOTIFY; 2975 } 2976 2977 /* 2978 * Interface selected by interface index but interface 2979 * name provided implies that a name change has been 2980 * requested. 2981 */ 2982 if (ifm->ifi_index > 0 && ifname[0]) { 2983 err = dev_change_name(dev, ifname); 2984 if (err < 0) 2985 goto errout; 2986 status |= DO_SETLINK_MODIFIED; 2987 } 2988 2989 if (tb[IFLA_IFALIAS]) { 2990 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2991 nla_len(tb[IFLA_IFALIAS])); 2992 if (err < 0) 2993 goto errout; 2994 status |= DO_SETLINK_NOTIFY; 2995 } 2996 2997 if (tb[IFLA_BROADCAST]) { 2998 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2999 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 3000 } 3001 3002 if (ifm->ifi_flags || ifm->ifi_change) { 3003 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3004 extack); 3005 if (err < 0) 3006 goto errout; 3007 } 3008 3009 if (tb[IFLA_MASTER]) { 3010 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3011 if (err) 3012 goto errout; 3013 status |= DO_SETLINK_MODIFIED; 3014 } 3015 3016 if (tb[IFLA_CARRIER]) { 3017 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 3018 if (err) 3019 goto errout; 3020 status |= DO_SETLINK_MODIFIED; 3021 } 3022 3023 if (tb[IFLA_TXQLEN]) { 3024 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 3025 3026 err = dev_change_tx_queue_len(dev, value); 3027 if (err) 3028 goto errout; 3029 status |= DO_SETLINK_MODIFIED; 3030 } 3031 3032 if (tb[IFLA_GSO_MAX_SIZE]) { 3033 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 3034 3035 if (dev->gso_max_size ^ max_size) { 3036 netif_set_gso_max_size(dev, max_size); 3037 status |= DO_SETLINK_MODIFIED; 3038 } 3039 } 3040 3041 if (tb[IFLA_GSO_MAX_SEGS]) { 3042 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 3043 3044 if (dev->gso_max_segs ^ max_segs) { 3045 netif_set_gso_max_segs(dev, max_segs); 3046 status |= DO_SETLINK_MODIFIED; 3047 } 3048 } 3049 3050 if (tb[IFLA_GRO_MAX_SIZE]) { 3051 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 3052 3053 if (dev->gro_max_size ^ gro_max_size) { 3054 netif_set_gro_max_size(dev, gro_max_size); 3055 status |= DO_SETLINK_MODIFIED; 3056 } 3057 } 3058 3059 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) { 3060 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]); 3061 3062 if (dev->gso_ipv4_max_size ^ max_size) { 3063 netif_set_gso_ipv4_max_size(dev, max_size); 3064 status |= DO_SETLINK_MODIFIED; 3065 } 3066 } 3067 3068 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) { 3069 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]); 3070 3071 if (dev->gro_ipv4_max_size ^ gro_max_size) { 3072 netif_set_gro_ipv4_max_size(dev, gro_max_size); 3073 status |= DO_SETLINK_MODIFIED; 3074 } 3075 } 3076 3077 if (tb[IFLA_OPERSTATE]) 3078 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3079 3080 if (tb[IFLA_LINKMODE]) { 3081 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 3082 3083 if (dev->link_mode ^ value) 3084 status |= DO_SETLINK_NOTIFY; 3085 WRITE_ONCE(dev->link_mode, value); 3086 } 3087 3088 if (tb[IFLA_VFINFO_LIST]) { 3089 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 3090 struct nlattr *attr; 3091 int rem; 3092 3093 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 3094 if (nla_type(attr) != IFLA_VF_INFO || 3095 nla_len(attr) < NLA_HDRLEN) { 3096 err = -EINVAL; 3097 goto errout; 3098 } 3099 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 3100 attr, 3101 ifla_vf_policy, 3102 NULL); 3103 if (err < 0) 3104 goto errout; 3105 err = do_setvfinfo(dev, vfinfo); 3106 if (err < 0) 3107 goto errout; 3108 status |= DO_SETLINK_NOTIFY; 3109 } 3110 } 3111 err = 0; 3112 3113 if (tb[IFLA_VF_PORTS]) { 3114 struct nlattr *port[IFLA_PORT_MAX+1]; 3115 struct nlattr *attr; 3116 int vf; 3117 int rem; 3118 3119 err = -EOPNOTSUPP; 3120 if (!ops->ndo_set_vf_port) 3121 goto errout; 3122 3123 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 3124 if (nla_type(attr) != IFLA_VF_PORT || 3125 nla_len(attr) < NLA_HDRLEN) { 3126 err = -EINVAL; 3127 goto errout; 3128 } 3129 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3130 attr, 3131 ifla_port_policy, 3132 NULL); 3133 if (err < 0) 3134 goto errout; 3135 if (!port[IFLA_PORT_VF]) { 3136 err = -EOPNOTSUPP; 3137 goto errout; 3138 } 3139 vf = nla_get_u32(port[IFLA_PORT_VF]); 3140 err = ops->ndo_set_vf_port(dev, vf, port); 3141 if (err < 0) 3142 goto errout; 3143 status |= DO_SETLINK_NOTIFY; 3144 } 3145 } 3146 err = 0; 3147 3148 if (tb[IFLA_PORT_SELF]) { 3149 struct nlattr *port[IFLA_PORT_MAX+1]; 3150 3151 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 3152 tb[IFLA_PORT_SELF], 3153 ifla_port_policy, NULL); 3154 if (err < 0) 3155 goto errout; 3156 3157 err = -EOPNOTSUPP; 3158 if (ops->ndo_set_vf_port) 3159 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 3160 if (err < 0) 3161 goto errout; 3162 status |= DO_SETLINK_NOTIFY; 3163 } 3164 3165 if (tb[IFLA_AF_SPEC]) { 3166 struct nlattr *af; 3167 int rem; 3168 3169 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 3170 const struct rtnl_af_ops *af_ops; 3171 3172 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 3173 3174 err = af_ops->set_link_af(dev, af, extack); 3175 if (err < 0) 3176 goto errout; 3177 3178 status |= DO_SETLINK_NOTIFY; 3179 } 3180 } 3181 err = 0; 3182 3183 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { 3184 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], 3185 tb[IFLA_PROTO_DOWN_REASON], extack); 3186 if (err) 3187 goto errout; 3188 status |= DO_SETLINK_NOTIFY; 3189 } 3190 3191 if (tb[IFLA_XDP]) { 3192 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 3193 u32 xdp_flags = 0; 3194 3195 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 3196 tb[IFLA_XDP], 3197 ifla_xdp_policy, NULL); 3198 if (err < 0) 3199 goto errout; 3200 3201 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 3202 err = -EINVAL; 3203 goto errout; 3204 } 3205 3206 if (xdp[IFLA_XDP_FLAGS]) { 3207 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 3208 if (xdp_flags & ~XDP_FLAGS_MASK) { 3209 err = -EINVAL; 3210 goto errout; 3211 } 3212 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 3213 err = -EINVAL; 3214 goto errout; 3215 } 3216 } 3217 3218 if (xdp[IFLA_XDP_FD]) { 3219 int expected_fd = -1; 3220 3221 if (xdp_flags & XDP_FLAGS_REPLACE) { 3222 if (!xdp[IFLA_XDP_EXPECTED_FD]) { 3223 err = -EINVAL; 3224 goto errout; 3225 } 3226 expected_fd = 3227 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]); 3228 } 3229 3230 err = dev_change_xdp_fd(dev, extack, 3231 nla_get_s32(xdp[IFLA_XDP_FD]), 3232 expected_fd, 3233 xdp_flags); 3234 if (err) 3235 goto errout; 3236 status |= DO_SETLINK_NOTIFY; 3237 } 3238 } 3239 3240 errout: 3241 if (status & DO_SETLINK_MODIFIED) { 3242 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 3243 netdev_state_change(dev); 3244 3245 if (err < 0) 3246 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 3247 dev->name); 3248 } 3249 3250 return err; 3251 } 3252 3253 static struct net_device *rtnl_dev_get(struct net *net, 3254 struct nlattr *tb[]) 3255 { 3256 char ifname[ALTIFNAMSIZ]; 3257 3258 if (tb[IFLA_IFNAME]) 3259 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3260 else if (tb[IFLA_ALT_IFNAME]) 3261 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ); 3262 else 3263 return NULL; 3264 3265 return __dev_get_by_name(net, ifname); 3266 } 3267 3268 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3269 struct netlink_ext_ack *extack) 3270 { 3271 struct net *net = sock_net(skb->sk); 3272 struct ifinfomsg *ifm; 3273 struct net_device *dev; 3274 int err; 3275 struct nlattr *tb[IFLA_MAX+1]; 3276 3277 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3278 ifla_policy, extack); 3279 if (err < 0) 3280 goto errout; 3281 3282 err = rtnl_ensure_unique_netns(tb, extack, false); 3283 if (err < 0) 3284 goto errout; 3285 3286 err = -EINVAL; 3287 ifm = nlmsg_data(nlh); 3288 if (ifm->ifi_index > 0) 3289 dev = __dev_get_by_index(net, ifm->ifi_index); 3290 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3291 dev = rtnl_dev_get(net, tb); 3292 else 3293 goto errout; 3294 3295 if (dev == NULL) { 3296 err = -ENODEV; 3297 goto errout; 3298 } 3299 3300 err = validate_linkmsg(dev, tb, extack); 3301 if (err < 0) 3302 goto errout; 3303 3304 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3305 errout: 3306 return err; 3307 } 3308 3309 static int rtnl_group_dellink(const struct net *net, int group) 3310 { 3311 struct net_device *dev, *aux; 3312 LIST_HEAD(list_kill); 3313 bool found = false; 3314 3315 if (!group) 3316 return -EPERM; 3317 3318 for_each_netdev(net, dev) { 3319 if (dev->group == group) { 3320 const struct rtnl_link_ops *ops; 3321 3322 found = true; 3323 ops = dev->rtnl_link_ops; 3324 if (!ops || !ops->dellink) 3325 return -EOPNOTSUPP; 3326 } 3327 } 3328 3329 if (!found) 3330 return -ENODEV; 3331 3332 for_each_netdev_safe(net, dev, aux) { 3333 if (dev->group == group) { 3334 const struct rtnl_link_ops *ops; 3335 3336 ops = dev->rtnl_link_ops; 3337 ops->dellink(dev, &list_kill); 3338 } 3339 } 3340 unregister_netdevice_many(&list_kill); 3341 3342 return 0; 3343 } 3344 3345 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh) 3346 { 3347 const struct rtnl_link_ops *ops; 3348 LIST_HEAD(list_kill); 3349 3350 ops = dev->rtnl_link_ops; 3351 if (!ops || !ops->dellink) 3352 return -EOPNOTSUPP; 3353 3354 ops->dellink(dev, &list_kill); 3355 unregister_netdevice_many_notify(&list_kill, portid, nlh); 3356 3357 return 0; 3358 } 3359 EXPORT_SYMBOL_GPL(rtnl_delete_link); 3360 3361 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 3362 struct netlink_ext_ack *extack) 3363 { 3364 struct net *net = sock_net(skb->sk); 3365 u32 portid = NETLINK_CB(skb).portid; 3366 struct net *tgt_net = net; 3367 struct net_device *dev = NULL; 3368 struct ifinfomsg *ifm; 3369 struct nlattr *tb[IFLA_MAX+1]; 3370 int err; 3371 int netnsid = -1; 3372 3373 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3374 ifla_policy, extack); 3375 if (err < 0) 3376 return err; 3377 3378 err = rtnl_ensure_unique_netns(tb, extack, true); 3379 if (err < 0) 3380 return err; 3381 3382 if (tb[IFLA_TARGET_NETNSID]) { 3383 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3384 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3385 if (IS_ERR(tgt_net)) 3386 return PTR_ERR(tgt_net); 3387 } 3388 3389 err = -EINVAL; 3390 ifm = nlmsg_data(nlh); 3391 if (ifm->ifi_index > 0) 3392 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3393 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3394 dev = rtnl_dev_get(tgt_net, tb); 3395 else if (tb[IFLA_GROUP]) 3396 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3397 else 3398 goto out; 3399 3400 if (!dev) { 3401 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0) 3402 err = -ENODEV; 3403 3404 goto out; 3405 } 3406 3407 err = rtnl_delete_link(dev, portid, nlh); 3408 3409 out: 3410 if (netnsid >= 0) 3411 put_net(tgt_net); 3412 3413 return err; 3414 } 3415 3416 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, 3417 u32 portid, const struct nlmsghdr *nlh) 3418 { 3419 unsigned int old_flags; 3420 int err; 3421 3422 old_flags = dev->flags; 3423 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 3424 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3425 NULL); 3426 if (err < 0) 3427 return err; 3428 } 3429 3430 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3431 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh); 3432 } else { 3433 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3434 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh); 3435 } 3436 return 0; 3437 } 3438 EXPORT_SYMBOL(rtnl_configure_link); 3439 3440 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3441 unsigned char name_assign_type, 3442 const struct rtnl_link_ops *ops, 3443 struct nlattr *tb[], 3444 struct netlink_ext_ack *extack) 3445 { 3446 struct net_device *dev; 3447 unsigned int num_tx_queues = 1; 3448 unsigned int num_rx_queues = 1; 3449 int err; 3450 3451 if (tb[IFLA_NUM_TX_QUEUES]) 3452 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3453 else if (ops->get_num_tx_queues) 3454 num_tx_queues = ops->get_num_tx_queues(); 3455 3456 if (tb[IFLA_NUM_RX_QUEUES]) 3457 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3458 else if (ops->get_num_rx_queues) 3459 num_rx_queues = ops->get_num_rx_queues(); 3460 3461 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3462 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3463 return ERR_PTR(-EINVAL); 3464 } 3465 3466 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3467 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3468 return ERR_PTR(-EINVAL); 3469 } 3470 3471 if (ops->alloc) { 3472 dev = ops->alloc(tb, ifname, name_assign_type, 3473 num_tx_queues, num_rx_queues); 3474 if (IS_ERR(dev)) 3475 return dev; 3476 } else { 3477 dev = alloc_netdev_mqs(ops->priv_size, ifname, 3478 name_assign_type, ops->setup, 3479 num_tx_queues, num_rx_queues); 3480 } 3481 3482 if (!dev) 3483 return ERR_PTR(-ENOMEM); 3484 3485 err = validate_linkmsg(dev, tb, extack); 3486 if (err < 0) { 3487 free_netdev(dev); 3488 return ERR_PTR(err); 3489 } 3490 3491 dev_net_set(dev, net); 3492 dev->rtnl_link_ops = ops; 3493 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3494 3495 if (tb[IFLA_MTU]) { 3496 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3497 3498 err = dev_validate_mtu(dev, mtu, extack); 3499 if (err) { 3500 free_netdev(dev); 3501 return ERR_PTR(err); 3502 } 3503 dev->mtu = mtu; 3504 } 3505 if (tb[IFLA_ADDRESS]) { 3506 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), 3507 nla_len(tb[IFLA_ADDRESS])); 3508 dev->addr_assign_type = NET_ADDR_SET; 3509 } 3510 if (tb[IFLA_BROADCAST]) 3511 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3512 nla_len(tb[IFLA_BROADCAST])); 3513 if (tb[IFLA_TXQLEN]) 3514 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3515 if (tb[IFLA_OPERSTATE]) 3516 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3517 if (tb[IFLA_LINKMODE]) 3518 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3519 if (tb[IFLA_GROUP]) 3520 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3521 if (tb[IFLA_GSO_MAX_SIZE]) 3522 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3523 if (tb[IFLA_GSO_MAX_SEGS]) 3524 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); 3525 if (tb[IFLA_GRO_MAX_SIZE]) 3526 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE])); 3527 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) 3528 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE])); 3529 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) 3530 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE])); 3531 3532 return dev; 3533 } 3534 EXPORT_SYMBOL(rtnl_create_link); 3535 3536 static int rtnl_group_changelink(const struct sk_buff *skb, 3537 struct net *net, int group, 3538 struct ifinfomsg *ifm, 3539 struct netlink_ext_ack *extack, 3540 struct nlattr **tb) 3541 { 3542 struct net_device *dev, *aux; 3543 int err; 3544 3545 for_each_netdev_safe(net, dev, aux) { 3546 if (dev->group == group) { 3547 err = validate_linkmsg(dev, tb, extack); 3548 if (err < 0) 3549 return err; 3550 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3551 if (err < 0) 3552 return err; 3553 } 3554 } 3555 3556 return 0; 3557 } 3558 3559 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, 3560 const struct rtnl_link_ops *ops, 3561 const struct nlmsghdr *nlh, 3562 struct nlattr **tb, struct nlattr **data, 3563 struct netlink_ext_ack *extack) 3564 { 3565 unsigned char name_assign_type = NET_NAME_USER; 3566 struct net *net = sock_net(skb->sk); 3567 u32 portid = NETLINK_CB(skb).portid; 3568 struct net *dest_net, *link_net; 3569 struct net_device *dev; 3570 char ifname[IFNAMSIZ]; 3571 int err; 3572 3573 if (!ops->alloc && !ops->setup) 3574 return -EOPNOTSUPP; 3575 3576 if (tb[IFLA_IFNAME]) { 3577 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3578 } else { 3579 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3580 name_assign_type = NET_NAME_ENUM; 3581 } 3582 3583 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3584 if (IS_ERR(dest_net)) 3585 return PTR_ERR(dest_net); 3586 3587 if (tb[IFLA_LINK_NETNSID]) { 3588 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3589 3590 link_net = get_net_ns_by_id(dest_net, id); 3591 if (!link_net) { 3592 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3593 err = -EINVAL; 3594 goto out; 3595 } 3596 err = -EPERM; 3597 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3598 goto out; 3599 } else { 3600 link_net = NULL; 3601 } 3602 3603 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3604 name_assign_type, ops, tb, extack); 3605 if (IS_ERR(dev)) { 3606 err = PTR_ERR(dev); 3607 goto out; 3608 } 3609 3610 dev->ifindex = ifm->ifi_index; 3611 3612 if (ops->newlink) 3613 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3614 else 3615 err = register_netdevice(dev); 3616 if (err < 0) { 3617 free_netdev(dev); 3618 goto out; 3619 } 3620 3621 err = rtnl_configure_link(dev, ifm, portid, nlh); 3622 if (err < 0) 3623 goto out_unregister; 3624 if (link_net) { 3625 err = dev_change_net_namespace(dev, dest_net, ifname); 3626 if (err < 0) 3627 goto out_unregister; 3628 } 3629 if (tb[IFLA_MASTER]) { 3630 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3631 if (err) 3632 goto out_unregister; 3633 } 3634 out: 3635 if (link_net) 3636 put_net(link_net); 3637 put_net(dest_net); 3638 return err; 3639 out_unregister: 3640 if (ops->newlink) { 3641 LIST_HEAD(list_kill); 3642 3643 ops->dellink(dev, &list_kill); 3644 unregister_netdevice_many(&list_kill); 3645 } else { 3646 unregister_netdevice(dev); 3647 } 3648 goto out; 3649 } 3650 3651 struct rtnl_newlink_tbs { 3652 struct nlattr *tb[IFLA_MAX + 1]; 3653 struct nlattr *attr[RTNL_MAX_TYPE + 1]; 3654 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3655 }; 3656 3657 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3658 struct rtnl_newlink_tbs *tbs, 3659 struct netlink_ext_ack *extack) 3660 { 3661 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3662 struct nlattr ** const tb = tbs->tb; 3663 const struct rtnl_link_ops *m_ops; 3664 struct net_device *master_dev; 3665 struct net *net = sock_net(skb->sk); 3666 const struct rtnl_link_ops *ops; 3667 struct nlattr **slave_data; 3668 char kind[MODULE_NAME_LEN]; 3669 struct net_device *dev; 3670 struct ifinfomsg *ifm; 3671 struct nlattr **data; 3672 bool link_specified; 3673 int err; 3674 3675 #ifdef CONFIG_MODULES 3676 replay: 3677 #endif 3678 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3679 ifla_policy, extack); 3680 if (err < 0) 3681 return err; 3682 3683 err = rtnl_ensure_unique_netns(tb, extack, false); 3684 if (err < 0) 3685 return err; 3686 3687 ifm = nlmsg_data(nlh); 3688 if (ifm->ifi_index > 0) { 3689 link_specified = true; 3690 dev = __dev_get_by_index(net, ifm->ifi_index); 3691 } else if (ifm->ifi_index < 0) { 3692 NL_SET_ERR_MSG(extack, "ifindex can't be negative"); 3693 return -EINVAL; 3694 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { 3695 link_specified = true; 3696 dev = rtnl_dev_get(net, tb); 3697 } else { 3698 link_specified = false; 3699 dev = NULL; 3700 } 3701 3702 master_dev = NULL; 3703 m_ops = NULL; 3704 if (dev) { 3705 master_dev = netdev_master_upper_dev_get(dev); 3706 if (master_dev) 3707 m_ops = master_dev->rtnl_link_ops; 3708 } 3709 3710 if (tb[IFLA_LINKINFO]) { 3711 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3712 tb[IFLA_LINKINFO], 3713 ifla_info_policy, NULL); 3714 if (err < 0) 3715 return err; 3716 } else 3717 memset(linkinfo, 0, sizeof(linkinfo)); 3718 3719 if (linkinfo[IFLA_INFO_KIND]) { 3720 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3721 ops = rtnl_link_ops_get(kind); 3722 } else { 3723 kind[0] = '\0'; 3724 ops = NULL; 3725 } 3726 3727 data = NULL; 3728 if (ops) { 3729 if (ops->maxtype > RTNL_MAX_TYPE) 3730 return -EINVAL; 3731 3732 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3733 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, 3734 linkinfo[IFLA_INFO_DATA], 3735 ops->policy, extack); 3736 if (err < 0) 3737 return err; 3738 data = tbs->attr; 3739 } 3740 if (ops->validate) { 3741 err = ops->validate(tb, data, extack); 3742 if (err < 0) 3743 return err; 3744 } 3745 } 3746 3747 slave_data = NULL; 3748 if (m_ops) { 3749 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3750 return -EINVAL; 3751 3752 if (m_ops->slave_maxtype && 3753 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3754 err = nla_parse_nested_deprecated(tbs->slave_attr, 3755 m_ops->slave_maxtype, 3756 linkinfo[IFLA_INFO_SLAVE_DATA], 3757 m_ops->slave_policy, 3758 extack); 3759 if (err < 0) 3760 return err; 3761 slave_data = tbs->slave_attr; 3762 } 3763 } 3764 3765 if (dev) { 3766 int status = 0; 3767 3768 if (nlh->nlmsg_flags & NLM_F_EXCL) 3769 return -EEXIST; 3770 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3771 return -EOPNOTSUPP; 3772 3773 err = validate_linkmsg(dev, tb, extack); 3774 if (err < 0) 3775 return err; 3776 3777 if (linkinfo[IFLA_INFO_DATA]) { 3778 if (!ops || ops != dev->rtnl_link_ops || 3779 !ops->changelink) 3780 return -EOPNOTSUPP; 3781 3782 err = ops->changelink(dev, tb, data, extack); 3783 if (err < 0) 3784 return err; 3785 status |= DO_SETLINK_NOTIFY; 3786 } 3787 3788 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3789 if (!m_ops || !m_ops->slave_changelink) 3790 return -EOPNOTSUPP; 3791 3792 err = m_ops->slave_changelink(master_dev, dev, tb, 3793 slave_data, extack); 3794 if (err < 0) 3795 return err; 3796 status |= DO_SETLINK_NOTIFY; 3797 } 3798 3799 return do_setlink(skb, dev, ifm, extack, tb, status); 3800 } 3801 3802 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3803 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, 3804 * or it's for a group 3805 */ 3806 if (link_specified) 3807 return -ENODEV; 3808 if (tb[IFLA_GROUP]) 3809 return rtnl_group_changelink(skb, net, 3810 nla_get_u32(tb[IFLA_GROUP]), 3811 ifm, extack, tb); 3812 return -ENODEV; 3813 } 3814 3815 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3816 return -EOPNOTSUPP; 3817 3818 if (!ops) { 3819 #ifdef CONFIG_MODULES 3820 if (kind[0]) { 3821 __rtnl_unlock(); 3822 request_module("rtnl-link-%s", kind); 3823 rtnl_lock(); 3824 ops = rtnl_link_ops_get(kind); 3825 if (ops) 3826 goto replay; 3827 } 3828 #endif 3829 NL_SET_ERR_MSG(extack, "Unknown device type"); 3830 return -EOPNOTSUPP; 3831 } 3832 3833 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack); 3834 } 3835 3836 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3837 struct netlink_ext_ack *extack) 3838 { 3839 struct rtnl_newlink_tbs *tbs; 3840 int ret; 3841 3842 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); 3843 if (!tbs) 3844 return -ENOMEM; 3845 3846 ret = __rtnl_newlink(skb, nlh, tbs, extack); 3847 kfree(tbs); 3848 return ret; 3849 } 3850 3851 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3852 const struct nlmsghdr *nlh, 3853 struct nlattr **tb, 3854 struct netlink_ext_ack *extack) 3855 { 3856 struct ifinfomsg *ifm; 3857 int i, err; 3858 3859 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3860 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3861 return -EINVAL; 3862 } 3863 3864 if (!netlink_strict_get_check(skb)) 3865 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3866 ifla_policy, extack); 3867 3868 ifm = nlmsg_data(nlh); 3869 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3870 ifm->ifi_change) { 3871 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3872 return -EINVAL; 3873 } 3874 3875 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3876 ifla_policy, extack); 3877 if (err) 3878 return err; 3879 3880 for (i = 0; i <= IFLA_MAX; i++) { 3881 if (!tb[i]) 3882 continue; 3883 3884 switch (i) { 3885 case IFLA_IFNAME: 3886 case IFLA_ALT_IFNAME: 3887 case IFLA_EXT_MASK: 3888 case IFLA_TARGET_NETNSID: 3889 break; 3890 default: 3891 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3892 return -EINVAL; 3893 } 3894 } 3895 3896 return 0; 3897 } 3898 3899 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3900 struct netlink_ext_ack *extack) 3901 { 3902 struct net *net = sock_net(skb->sk); 3903 struct net *tgt_net = net; 3904 struct ifinfomsg *ifm; 3905 struct nlattr *tb[IFLA_MAX+1]; 3906 struct net_device *dev = NULL; 3907 struct sk_buff *nskb; 3908 int netnsid = -1; 3909 int err; 3910 u32 ext_filter_mask = 0; 3911 3912 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3913 if (err < 0) 3914 return err; 3915 3916 err = rtnl_ensure_unique_netns(tb, extack, true); 3917 if (err < 0) 3918 return err; 3919 3920 if (tb[IFLA_TARGET_NETNSID]) { 3921 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3922 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3923 if (IS_ERR(tgt_net)) 3924 return PTR_ERR(tgt_net); 3925 } 3926 3927 if (tb[IFLA_EXT_MASK]) 3928 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3929 3930 err = -EINVAL; 3931 ifm = nlmsg_data(nlh); 3932 if (ifm->ifi_index > 0) 3933 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3934 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3935 dev = rtnl_dev_get(tgt_net, tb); 3936 else 3937 goto out; 3938 3939 err = -ENODEV; 3940 if (dev == NULL) 3941 goto out; 3942 3943 err = -ENOBUFS; 3944 nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask)); 3945 if (nskb == NULL) 3946 goto out; 3947 3948 /* Synchronize the carrier state so we don't report a state 3949 * that we're not actually going to honour immediately; if 3950 * the driver just did a carrier off->on transition, we can 3951 * only TX if link watch work has run, but without this we'd 3952 * already report carrier on, even if it doesn't work yet. 3953 */ 3954 linkwatch_sync_dev(dev); 3955 3956 err = rtnl_fill_ifinfo(nskb, dev, net, 3957 RTM_NEWLINK, NETLINK_CB(skb).portid, 3958 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3959 0, NULL, 0, netnsid, GFP_KERNEL); 3960 if (err < 0) { 3961 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3962 WARN_ON(err == -EMSGSIZE); 3963 kfree_skb(nskb); 3964 } else 3965 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3966 out: 3967 if (netnsid >= 0) 3968 put_net(tgt_net); 3969 3970 return err; 3971 } 3972 3973 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3974 bool *changed, struct netlink_ext_ack *extack) 3975 { 3976 char *alt_ifname; 3977 size_t size; 3978 int err; 3979 3980 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3981 if (err) 3982 return err; 3983 3984 if (cmd == RTM_NEWLINKPROP) { 3985 size = rtnl_prop_list_size(dev); 3986 size += nla_total_size(ALTIFNAMSIZ); 3987 if (size >= U16_MAX) { 3988 NL_SET_ERR_MSG(extack, 3989 "effective property list too long"); 3990 return -EINVAL; 3991 } 3992 } 3993 3994 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 3995 if (!alt_ifname) 3996 return -ENOMEM; 3997 3998 if (cmd == RTM_NEWLINKPROP) { 3999 err = netdev_name_node_alt_create(dev, alt_ifname); 4000 if (!err) 4001 alt_ifname = NULL; 4002 } else if (cmd == RTM_DELLINKPROP) { 4003 err = netdev_name_node_alt_destroy(dev, alt_ifname); 4004 } else { 4005 WARN_ON_ONCE(1); 4006 err = -EINVAL; 4007 } 4008 4009 kfree(alt_ifname); 4010 if (!err) 4011 *changed = true; 4012 return err; 4013 } 4014 4015 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 4016 struct netlink_ext_ack *extack) 4017 { 4018 struct net *net = sock_net(skb->sk); 4019 struct nlattr *tb[IFLA_MAX + 1]; 4020 struct net_device *dev; 4021 struct ifinfomsg *ifm; 4022 bool changed = false; 4023 struct nlattr *attr; 4024 int err, rem; 4025 4026 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 4027 if (err) 4028 return err; 4029 4030 err = rtnl_ensure_unique_netns(tb, extack, true); 4031 if (err) 4032 return err; 4033 4034 ifm = nlmsg_data(nlh); 4035 if (ifm->ifi_index > 0) 4036 dev = __dev_get_by_index(net, ifm->ifi_index); 4037 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 4038 dev = rtnl_dev_get(net, tb); 4039 else 4040 return -EINVAL; 4041 4042 if (!dev) 4043 return -ENODEV; 4044 4045 if (!tb[IFLA_PROP_LIST]) 4046 return 0; 4047 4048 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 4049 switch (nla_type(attr)) { 4050 case IFLA_ALT_IFNAME: 4051 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 4052 if (err) 4053 return err; 4054 break; 4055 } 4056 } 4057 4058 if (changed) 4059 netdev_state_change(dev); 4060 return 0; 4061 } 4062 4063 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 4064 struct netlink_ext_ack *extack) 4065 { 4066 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 4067 } 4068 4069 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 4070 struct netlink_ext_ack *extack) 4071 { 4072 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 4073 } 4074 4075 static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb, 4076 struct nlmsghdr *nlh) 4077 { 4078 struct net *net = sock_net(skb->sk); 4079 size_t min_ifinfo_dump_size = 0; 4080 u32 ext_filter_mask = 0; 4081 struct net_device *dev; 4082 struct nlattr *nla; 4083 int hdrlen, rem; 4084 4085 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 4086 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 4087 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 4088 4089 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) 4090 return NLMSG_GOODSIZE; 4091 4092 nla_for_each_attr_type(nla, IFLA_EXT_MASK, 4093 nlmsg_attrdata(nlh, hdrlen), 4094 nlmsg_attrlen(nlh, hdrlen), rem) { 4095 if (nla_len(nla) == sizeof(u32)) 4096 ext_filter_mask = nla_get_u32(nla); 4097 } 4098 4099 if (!ext_filter_mask) 4100 return NLMSG_GOODSIZE; 4101 /* 4102 * traverse the list of net devices and compute the minimum 4103 * buffer size based upon the filter mask. 4104 */ 4105 rcu_read_lock(); 4106 for_each_netdev_rcu(net, dev) { 4107 min_ifinfo_dump_size = max(min_ifinfo_dump_size, 4108 if_nlmsg_size(dev, ext_filter_mask)); 4109 } 4110 rcu_read_unlock(); 4111 4112 return nlmsg_total_size(min_ifinfo_dump_size); 4113 } 4114 4115 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 4116 { 4117 int idx; 4118 int s_idx = cb->family; 4119 int type = cb->nlh->nlmsg_type - RTM_BASE; 4120 int ret = 0; 4121 4122 if (s_idx == 0) 4123 s_idx = 1; 4124 4125 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 4126 struct rtnl_link __rcu **tab; 4127 struct rtnl_link *link; 4128 rtnl_dumpit_func dumpit; 4129 4130 if (idx < s_idx || idx == PF_PACKET) 4131 continue; 4132 4133 if (type < 0 || type >= RTM_NR_MSGTYPES) 4134 continue; 4135 4136 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 4137 if (!tab) 4138 continue; 4139 4140 link = rcu_dereference_rtnl(tab[type]); 4141 if (!link) 4142 continue; 4143 4144 dumpit = link->dumpit; 4145 if (!dumpit) 4146 continue; 4147 4148 if (idx > s_idx) { 4149 memset(&cb->args[0], 0, sizeof(cb->args)); 4150 cb->prev_seq = 0; 4151 cb->seq = 0; 4152 } 4153 ret = dumpit(skb, cb); 4154 if (ret) 4155 break; 4156 } 4157 cb->family = idx; 4158 4159 return skb->len ? : ret; 4160 } 4161 4162 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 4163 unsigned int change, 4164 u32 event, gfp_t flags, int *new_nsid, 4165 int new_ifindex, u32 portid, 4166 const struct nlmsghdr *nlh) 4167 { 4168 struct net *net = dev_net(dev); 4169 struct sk_buff *skb; 4170 int err = -ENOBUFS; 4171 u32 seq = 0; 4172 4173 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); 4174 if (skb == NULL) 4175 goto errout; 4176 4177 if (nlmsg_report(nlh)) 4178 seq = nlmsg_seq(nlh); 4179 else 4180 portid = 0; 4181 4182 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 4183 type, portid, seq, change, 0, 0, event, 4184 new_nsid, new_ifindex, -1, flags); 4185 if (err < 0) { 4186 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 4187 WARN_ON(err == -EMSGSIZE); 4188 kfree_skb(skb); 4189 goto errout; 4190 } 4191 return skb; 4192 errout: 4193 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 4194 return NULL; 4195 } 4196 4197 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags, 4198 u32 portid, const struct nlmsghdr *nlh) 4199 { 4200 struct net *net = dev_net(dev); 4201 4202 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags); 4203 } 4204 4205 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 4206 unsigned int change, u32 event, 4207 gfp_t flags, int *new_nsid, int new_ifindex, 4208 u32 portid, const struct nlmsghdr *nlh) 4209 { 4210 struct sk_buff *skb; 4211 4212 if (dev->reg_state != NETREG_REGISTERED) 4213 return; 4214 4215 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 4216 new_ifindex, portid, nlh); 4217 if (skb) 4218 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh); 4219 } 4220 4221 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 4222 gfp_t flags, u32 portid, const struct nlmsghdr *nlh) 4223 { 4224 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4225 NULL, 0, portid, nlh); 4226 } 4227 4228 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 4229 gfp_t flags, int *new_nsid, int new_ifindex) 4230 { 4231 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4232 new_nsid, new_ifindex, 0, NULL); 4233 } 4234 4235 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 4236 struct net_device *dev, 4237 u8 *addr, u16 vid, u32 pid, u32 seq, 4238 int type, unsigned int flags, 4239 int nlflags, u16 ndm_state) 4240 { 4241 struct nlmsghdr *nlh; 4242 struct ndmsg *ndm; 4243 4244 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 4245 if (!nlh) 4246 return -EMSGSIZE; 4247 4248 ndm = nlmsg_data(nlh); 4249 ndm->ndm_family = AF_BRIDGE; 4250 ndm->ndm_pad1 = 0; 4251 ndm->ndm_pad2 = 0; 4252 ndm->ndm_flags = flags; 4253 ndm->ndm_type = 0; 4254 ndm->ndm_ifindex = dev->ifindex; 4255 ndm->ndm_state = ndm_state; 4256 4257 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr)) 4258 goto nla_put_failure; 4259 if (vid) 4260 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 4261 goto nla_put_failure; 4262 4263 nlmsg_end(skb, nlh); 4264 return 0; 4265 4266 nla_put_failure: 4267 nlmsg_cancel(skb, nlh); 4268 return -EMSGSIZE; 4269 } 4270 4271 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev) 4272 { 4273 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 4274 nla_total_size(dev->addr_len) + /* NDA_LLADDR */ 4275 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 4276 0; 4277 } 4278 4279 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 4280 u16 ndm_state) 4281 { 4282 struct net *net = dev_net(dev); 4283 struct sk_buff *skb; 4284 int err = -ENOBUFS; 4285 4286 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC); 4287 if (!skb) 4288 goto errout; 4289 4290 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 4291 0, 0, type, NTF_SELF, 0, ndm_state); 4292 if (err < 0) { 4293 kfree_skb(skb); 4294 goto errout; 4295 } 4296 4297 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 4298 return; 4299 errout: 4300 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 4301 } 4302 4303 /* 4304 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 4305 */ 4306 int ndo_dflt_fdb_add(struct ndmsg *ndm, 4307 struct nlattr *tb[], 4308 struct net_device *dev, 4309 const unsigned char *addr, u16 vid, 4310 u16 flags) 4311 { 4312 int err = -EINVAL; 4313 4314 /* If aging addresses are supported device will need to 4315 * implement its own handler for this. 4316 */ 4317 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 4318 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4319 return err; 4320 } 4321 4322 if (tb[NDA_FLAGS_EXT]) { 4323 netdev_info(dev, "invalid flags given to default FDB implementation\n"); 4324 return err; 4325 } 4326 4327 if (vid) { 4328 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n"); 4329 return err; 4330 } 4331 4332 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4333 err = dev_uc_add_excl(dev, addr); 4334 else if (is_multicast_ether_addr(addr)) 4335 err = dev_mc_add_excl(dev, addr); 4336 4337 /* Only return duplicate errors if NLM_F_EXCL is set */ 4338 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 4339 err = 0; 4340 4341 return err; 4342 } 4343 EXPORT_SYMBOL(ndo_dflt_fdb_add); 4344 4345 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 4346 struct netlink_ext_ack *extack) 4347 { 4348 u16 vid = 0; 4349 4350 if (vlan_attr) { 4351 if (nla_len(vlan_attr) != sizeof(u16)) { 4352 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 4353 return -EINVAL; 4354 } 4355 4356 vid = nla_get_u16(vlan_attr); 4357 4358 if (!vid || vid >= VLAN_VID_MASK) { 4359 NL_SET_ERR_MSG(extack, "invalid vlan id"); 4360 return -EINVAL; 4361 } 4362 } 4363 *p_vid = vid; 4364 return 0; 4365 } 4366 4367 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 4368 struct netlink_ext_ack *extack) 4369 { 4370 struct net *net = sock_net(skb->sk); 4371 struct ndmsg *ndm; 4372 struct nlattr *tb[NDA_MAX+1]; 4373 struct net_device *dev; 4374 u8 *addr; 4375 u16 vid; 4376 int err; 4377 4378 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 4379 extack); 4380 if (err < 0) 4381 return err; 4382 4383 ndm = nlmsg_data(nlh); 4384 if (ndm->ndm_ifindex == 0) { 4385 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4386 return -EINVAL; 4387 } 4388 4389 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4390 if (dev == NULL) { 4391 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4392 return -ENODEV; 4393 } 4394 4395 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4396 NL_SET_ERR_MSG(extack, "invalid address"); 4397 return -EINVAL; 4398 } 4399 4400 if (dev->type != ARPHRD_ETHER) { 4401 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 4402 return -EINVAL; 4403 } 4404 4405 addr = nla_data(tb[NDA_LLADDR]); 4406 4407 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4408 if (err) 4409 return err; 4410 4411 err = -EOPNOTSUPP; 4412 4413 /* Support fdb on master device the net/bridge default case */ 4414 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4415 netif_is_bridge_port(dev)) { 4416 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4417 const struct net_device_ops *ops = br_dev->netdev_ops; 4418 4419 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 4420 nlh->nlmsg_flags, extack); 4421 if (err) 4422 goto out; 4423 else 4424 ndm->ndm_flags &= ~NTF_MASTER; 4425 } 4426 4427 /* Embedded bridge, macvlan, and any other device support */ 4428 if ((ndm->ndm_flags & NTF_SELF)) { 4429 if (dev->netdev_ops->ndo_fdb_add) 4430 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 4431 vid, 4432 nlh->nlmsg_flags, 4433 extack); 4434 else 4435 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 4436 nlh->nlmsg_flags); 4437 4438 if (!err) { 4439 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 4440 ndm->ndm_state); 4441 ndm->ndm_flags &= ~NTF_SELF; 4442 } 4443 } 4444 out: 4445 return err; 4446 } 4447 4448 /* 4449 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 4450 */ 4451 int ndo_dflt_fdb_del(struct ndmsg *ndm, 4452 struct nlattr *tb[], 4453 struct net_device *dev, 4454 const unsigned char *addr, u16 vid) 4455 { 4456 int err = -EINVAL; 4457 4458 /* If aging addresses are supported device will need to 4459 * implement its own handler for this. 4460 */ 4461 if (!(ndm->ndm_state & NUD_PERMANENT)) { 4462 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4463 return err; 4464 } 4465 4466 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4467 err = dev_uc_del(dev, addr); 4468 else if (is_multicast_ether_addr(addr)) 4469 err = dev_mc_del(dev, addr); 4470 4471 return err; 4472 } 4473 EXPORT_SYMBOL(ndo_dflt_fdb_del); 4474 4475 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 4476 struct netlink_ext_ack *extack) 4477 { 4478 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 4479 struct net *net = sock_net(skb->sk); 4480 const struct net_device_ops *ops; 4481 struct ndmsg *ndm; 4482 struct nlattr *tb[NDA_MAX+1]; 4483 struct net_device *dev; 4484 __u8 *addr = NULL; 4485 int err; 4486 u16 vid; 4487 4488 if (!netlink_capable(skb, CAP_NET_ADMIN)) 4489 return -EPERM; 4490 4491 if (!del_bulk) { 4492 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4493 NULL, extack); 4494 } else { 4495 /* For bulk delete, the drivers will parse the message with 4496 * policy. 4497 */ 4498 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); 4499 } 4500 if (err < 0) 4501 return err; 4502 4503 ndm = nlmsg_data(nlh); 4504 if (ndm->ndm_ifindex == 0) { 4505 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4506 return -EINVAL; 4507 } 4508 4509 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4510 if (dev == NULL) { 4511 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4512 return -ENODEV; 4513 } 4514 4515 if (!del_bulk) { 4516 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4517 NL_SET_ERR_MSG(extack, "invalid address"); 4518 return -EINVAL; 4519 } 4520 addr = nla_data(tb[NDA_LLADDR]); 4521 4522 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4523 if (err) 4524 return err; 4525 } 4526 4527 if (dev->type != ARPHRD_ETHER) { 4528 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 4529 return -EINVAL; 4530 } 4531 4532 err = -EOPNOTSUPP; 4533 4534 /* Support fdb on master device the net/bridge default case */ 4535 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4536 netif_is_bridge_port(dev)) { 4537 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4538 4539 ops = br_dev->netdev_ops; 4540 if (!del_bulk) { 4541 if (ops->ndo_fdb_del) 4542 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4543 } else { 4544 if (ops->ndo_fdb_del_bulk) 4545 err = ops->ndo_fdb_del_bulk(nlh, dev, extack); 4546 } 4547 4548 if (err) 4549 goto out; 4550 else 4551 ndm->ndm_flags &= ~NTF_MASTER; 4552 } 4553 4554 /* Embedded bridge, macvlan, and any other device support */ 4555 if (ndm->ndm_flags & NTF_SELF) { 4556 ops = dev->netdev_ops; 4557 if (!del_bulk) { 4558 if (ops->ndo_fdb_del) 4559 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4560 else 4561 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4562 } else { 4563 /* in case err was cleared by NTF_MASTER call */ 4564 err = -EOPNOTSUPP; 4565 if (ops->ndo_fdb_del_bulk) 4566 err = ops->ndo_fdb_del_bulk(nlh, dev, extack); 4567 } 4568 4569 if (!err) { 4570 if (!del_bulk) 4571 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4572 ndm->ndm_state); 4573 ndm->ndm_flags &= ~NTF_SELF; 4574 } 4575 } 4576 out: 4577 return err; 4578 } 4579 4580 static int nlmsg_populate_fdb(struct sk_buff *skb, 4581 struct netlink_callback *cb, 4582 struct net_device *dev, 4583 int *idx, 4584 struct netdev_hw_addr_list *list) 4585 { 4586 struct netdev_hw_addr *ha; 4587 int err; 4588 u32 portid, seq; 4589 4590 portid = NETLINK_CB(cb->skb).portid; 4591 seq = cb->nlh->nlmsg_seq; 4592 4593 list_for_each_entry(ha, &list->list, list) { 4594 if (*idx < cb->args[2]) 4595 goto skip; 4596 4597 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4598 portid, seq, 4599 RTM_NEWNEIGH, NTF_SELF, 4600 NLM_F_MULTI, NUD_PERMANENT); 4601 if (err < 0) 4602 return err; 4603 skip: 4604 *idx += 1; 4605 } 4606 return 0; 4607 } 4608 4609 /** 4610 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4611 * @skb: socket buffer to store message in 4612 * @cb: netlink callback 4613 * @dev: netdevice 4614 * @filter_dev: ignored 4615 * @idx: the number of FDB table entries dumped is added to *@idx 4616 * 4617 * Default netdevice operation to dump the existing unicast address list. 4618 * Returns number of addresses from list put in skb. 4619 */ 4620 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4621 struct netlink_callback *cb, 4622 struct net_device *dev, 4623 struct net_device *filter_dev, 4624 int *idx) 4625 { 4626 int err; 4627 4628 if (dev->type != ARPHRD_ETHER) 4629 return -EINVAL; 4630 4631 netif_addr_lock_bh(dev); 4632 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4633 if (err) 4634 goto out; 4635 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4636 out: 4637 netif_addr_unlock_bh(dev); 4638 return err; 4639 } 4640 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4641 4642 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4643 int *br_idx, int *brport_idx, 4644 struct netlink_ext_ack *extack) 4645 { 4646 struct nlattr *tb[NDA_MAX + 1]; 4647 struct ndmsg *ndm; 4648 int err, i; 4649 4650 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4651 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4652 return -EINVAL; 4653 } 4654 4655 ndm = nlmsg_data(nlh); 4656 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4657 ndm->ndm_flags || ndm->ndm_type) { 4658 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4659 return -EINVAL; 4660 } 4661 4662 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4663 NDA_MAX, NULL, extack); 4664 if (err < 0) 4665 return err; 4666 4667 *brport_idx = ndm->ndm_ifindex; 4668 for (i = 0; i <= NDA_MAX; ++i) { 4669 if (!tb[i]) 4670 continue; 4671 4672 switch (i) { 4673 case NDA_IFINDEX: 4674 if (nla_len(tb[i]) != sizeof(u32)) { 4675 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4676 return -EINVAL; 4677 } 4678 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4679 break; 4680 case NDA_MASTER: 4681 if (nla_len(tb[i]) != sizeof(u32)) { 4682 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4683 return -EINVAL; 4684 } 4685 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4686 break; 4687 default: 4688 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4689 return -EINVAL; 4690 } 4691 } 4692 4693 return 0; 4694 } 4695 4696 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4697 int *br_idx, int *brport_idx, 4698 struct netlink_ext_ack *extack) 4699 { 4700 struct nlattr *tb[IFLA_MAX+1]; 4701 int err; 4702 4703 /* A hack to preserve kernel<->userspace interface. 4704 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4705 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4706 * So, check for ndmsg with an optional u32 attribute (not used here). 4707 * Fortunately these sizes don't conflict with the size of ifinfomsg 4708 * with an optional attribute. 4709 */ 4710 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4711 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4712 nla_attr_size(sizeof(u32)))) { 4713 struct ifinfomsg *ifm; 4714 4715 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4716 tb, IFLA_MAX, ifla_policy, 4717 extack); 4718 if (err < 0) { 4719 return -EINVAL; 4720 } else if (err == 0) { 4721 if (tb[IFLA_MASTER]) 4722 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4723 } 4724 4725 ifm = nlmsg_data(nlh); 4726 *brport_idx = ifm->ifi_index; 4727 } 4728 return 0; 4729 } 4730 4731 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4732 { 4733 struct net_device *dev; 4734 struct net_device *br_dev = NULL; 4735 const struct net_device_ops *ops = NULL; 4736 const struct net_device_ops *cops = NULL; 4737 struct net *net = sock_net(skb->sk); 4738 struct hlist_head *head; 4739 int brport_idx = 0; 4740 int br_idx = 0; 4741 int h, s_h; 4742 int idx = 0, s_idx; 4743 int err = 0; 4744 int fidx = 0; 4745 4746 if (cb->strict_check) 4747 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4748 cb->extack); 4749 else 4750 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4751 cb->extack); 4752 if (err < 0) 4753 return err; 4754 4755 if (br_idx) { 4756 br_dev = __dev_get_by_index(net, br_idx); 4757 if (!br_dev) 4758 return -ENODEV; 4759 4760 ops = br_dev->netdev_ops; 4761 } 4762 4763 s_h = cb->args[0]; 4764 s_idx = cb->args[1]; 4765 4766 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4767 idx = 0; 4768 head = &net->dev_index_head[h]; 4769 hlist_for_each_entry(dev, head, index_hlist) { 4770 4771 if (brport_idx && (dev->ifindex != brport_idx)) 4772 continue; 4773 4774 if (!br_idx) { /* user did not specify a specific bridge */ 4775 if (netif_is_bridge_port(dev)) { 4776 br_dev = netdev_master_upper_dev_get(dev); 4777 cops = br_dev->netdev_ops; 4778 } 4779 } else { 4780 if (dev != br_dev && 4781 !netif_is_bridge_port(dev)) 4782 continue; 4783 4784 if (br_dev != netdev_master_upper_dev_get(dev) && 4785 !netif_is_bridge_master(dev)) 4786 continue; 4787 cops = ops; 4788 } 4789 4790 if (idx < s_idx) 4791 goto cont; 4792 4793 if (netif_is_bridge_port(dev)) { 4794 if (cops && cops->ndo_fdb_dump) { 4795 err = cops->ndo_fdb_dump(skb, cb, 4796 br_dev, dev, 4797 &fidx); 4798 if (err == -EMSGSIZE) 4799 goto out; 4800 } 4801 } 4802 4803 if (dev->netdev_ops->ndo_fdb_dump) 4804 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4805 dev, NULL, 4806 &fidx); 4807 else 4808 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4809 &fidx); 4810 if (err == -EMSGSIZE) 4811 goto out; 4812 4813 cops = NULL; 4814 4815 /* reset fdb offset to 0 for rest of the interfaces */ 4816 cb->args[2] = 0; 4817 fidx = 0; 4818 cont: 4819 idx++; 4820 } 4821 } 4822 4823 out: 4824 cb->args[0] = h; 4825 cb->args[1] = idx; 4826 cb->args[2] = fidx; 4827 4828 return skb->len; 4829 } 4830 4831 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4832 struct nlattr **tb, u8 *ndm_flags, 4833 int *br_idx, int *brport_idx, u8 **addr, 4834 u16 *vid, struct netlink_ext_ack *extack) 4835 { 4836 struct ndmsg *ndm; 4837 int err, i; 4838 4839 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4840 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4841 return -EINVAL; 4842 } 4843 4844 ndm = nlmsg_data(nlh); 4845 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4846 ndm->ndm_type) { 4847 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4848 return -EINVAL; 4849 } 4850 4851 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4852 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4853 return -EINVAL; 4854 } 4855 4856 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4857 NDA_MAX, nda_policy, extack); 4858 if (err < 0) 4859 return err; 4860 4861 *ndm_flags = ndm->ndm_flags; 4862 *brport_idx = ndm->ndm_ifindex; 4863 for (i = 0; i <= NDA_MAX; ++i) { 4864 if (!tb[i]) 4865 continue; 4866 4867 switch (i) { 4868 case NDA_MASTER: 4869 *br_idx = nla_get_u32(tb[i]); 4870 break; 4871 case NDA_LLADDR: 4872 if (nla_len(tb[i]) != ETH_ALEN) { 4873 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4874 return -EINVAL; 4875 } 4876 *addr = nla_data(tb[i]); 4877 break; 4878 case NDA_VLAN: 4879 err = fdb_vid_parse(tb[i], vid, extack); 4880 if (err) 4881 return err; 4882 break; 4883 case NDA_VNI: 4884 break; 4885 default: 4886 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4887 return -EINVAL; 4888 } 4889 } 4890 4891 return 0; 4892 } 4893 4894 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4895 struct netlink_ext_ack *extack) 4896 { 4897 struct net_device *dev = NULL, *br_dev = NULL; 4898 const struct net_device_ops *ops = NULL; 4899 struct net *net = sock_net(in_skb->sk); 4900 struct nlattr *tb[NDA_MAX + 1]; 4901 struct sk_buff *skb; 4902 int brport_idx = 0; 4903 u8 ndm_flags = 0; 4904 int br_idx = 0; 4905 u8 *addr = NULL; 4906 u16 vid = 0; 4907 int err; 4908 4909 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4910 &brport_idx, &addr, &vid, extack); 4911 if (err < 0) 4912 return err; 4913 4914 if (!addr) { 4915 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4916 return -EINVAL; 4917 } 4918 4919 if (brport_idx) { 4920 dev = __dev_get_by_index(net, brport_idx); 4921 if (!dev) { 4922 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4923 return -ENODEV; 4924 } 4925 } 4926 4927 if (br_idx) { 4928 if (dev) { 4929 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4930 return -EINVAL; 4931 } 4932 4933 br_dev = __dev_get_by_index(net, br_idx); 4934 if (!br_dev) { 4935 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4936 return -EINVAL; 4937 } 4938 ops = br_dev->netdev_ops; 4939 } 4940 4941 if (dev) { 4942 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4943 if (!netif_is_bridge_port(dev)) { 4944 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4945 return -EINVAL; 4946 } 4947 br_dev = netdev_master_upper_dev_get(dev); 4948 if (!br_dev) { 4949 NL_SET_ERR_MSG(extack, "Master of device not found"); 4950 return -EINVAL; 4951 } 4952 ops = br_dev->netdev_ops; 4953 } else { 4954 if (!(ndm_flags & NTF_SELF)) { 4955 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4956 return -EINVAL; 4957 } 4958 ops = dev->netdev_ops; 4959 } 4960 } 4961 4962 if (!br_dev && !dev) { 4963 NL_SET_ERR_MSG(extack, "No device specified"); 4964 return -ENODEV; 4965 } 4966 4967 if (!ops || !ops->ndo_fdb_get) { 4968 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4969 return -EOPNOTSUPP; 4970 } 4971 4972 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4973 if (!skb) 4974 return -ENOBUFS; 4975 4976 if (br_dev) 4977 dev = br_dev; 4978 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4979 NETLINK_CB(in_skb).portid, 4980 nlh->nlmsg_seq, extack); 4981 if (err) 4982 goto out; 4983 4984 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4985 out: 4986 kfree_skb(skb); 4987 return err; 4988 } 4989 4990 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4991 unsigned int attrnum, unsigned int flag) 4992 { 4993 if (mask & flag) 4994 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4995 return 0; 4996 } 4997 4998 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4999 struct net_device *dev, u16 mode, 5000 u32 flags, u32 mask, int nlflags, 5001 u32 filter_mask, 5002 int (*vlan_fill)(struct sk_buff *skb, 5003 struct net_device *dev, 5004 u32 filter_mask)) 5005 { 5006 struct nlmsghdr *nlh; 5007 struct ifinfomsg *ifm; 5008 struct nlattr *br_afspec; 5009 struct nlattr *protinfo; 5010 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 5011 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5012 int err = 0; 5013 5014 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 5015 if (nlh == NULL) 5016 return -EMSGSIZE; 5017 5018 ifm = nlmsg_data(nlh); 5019 ifm->ifi_family = AF_BRIDGE; 5020 ifm->__ifi_pad = 0; 5021 ifm->ifi_type = dev->type; 5022 ifm->ifi_index = dev->ifindex; 5023 ifm->ifi_flags = dev_get_flags(dev); 5024 ifm->ifi_change = 0; 5025 5026 5027 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 5028 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 5029 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 5030 (br_dev && 5031 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 5032 (dev->addr_len && 5033 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 5034 (dev->ifindex != dev_get_iflink(dev) && 5035 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 5036 goto nla_put_failure; 5037 5038 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 5039 if (!br_afspec) 5040 goto nla_put_failure; 5041 5042 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 5043 nla_nest_cancel(skb, br_afspec); 5044 goto nla_put_failure; 5045 } 5046 5047 if (mode != BRIDGE_MODE_UNDEF) { 5048 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 5049 nla_nest_cancel(skb, br_afspec); 5050 goto nla_put_failure; 5051 } 5052 } 5053 if (vlan_fill) { 5054 err = vlan_fill(skb, dev, filter_mask); 5055 if (err) { 5056 nla_nest_cancel(skb, br_afspec); 5057 goto nla_put_failure; 5058 } 5059 } 5060 nla_nest_end(skb, br_afspec); 5061 5062 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 5063 if (!protinfo) 5064 goto nla_put_failure; 5065 5066 if (brport_nla_put_flag(skb, flags, mask, 5067 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 5068 brport_nla_put_flag(skb, flags, mask, 5069 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 5070 brport_nla_put_flag(skb, flags, mask, 5071 IFLA_BRPORT_FAST_LEAVE, 5072 BR_MULTICAST_FAST_LEAVE) || 5073 brport_nla_put_flag(skb, flags, mask, 5074 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 5075 brport_nla_put_flag(skb, flags, mask, 5076 IFLA_BRPORT_LEARNING, BR_LEARNING) || 5077 brport_nla_put_flag(skb, flags, mask, 5078 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 5079 brport_nla_put_flag(skb, flags, mask, 5080 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 5081 brport_nla_put_flag(skb, flags, mask, 5082 IFLA_BRPORT_PROXYARP, BR_PROXYARP) || 5083 brport_nla_put_flag(skb, flags, mask, 5084 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || 5085 brport_nla_put_flag(skb, flags, mask, 5086 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { 5087 nla_nest_cancel(skb, protinfo); 5088 goto nla_put_failure; 5089 } 5090 5091 nla_nest_end(skb, protinfo); 5092 5093 nlmsg_end(skb, nlh); 5094 return 0; 5095 nla_put_failure: 5096 nlmsg_cancel(skb, nlh); 5097 return err ? err : -EMSGSIZE; 5098 } 5099 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 5100 5101 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 5102 bool strict_check, u32 *filter_mask, 5103 struct netlink_ext_ack *extack) 5104 { 5105 struct nlattr *tb[IFLA_MAX+1]; 5106 int err, i; 5107 5108 if (strict_check) { 5109 struct ifinfomsg *ifm; 5110 5111 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 5112 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 5113 return -EINVAL; 5114 } 5115 5116 ifm = nlmsg_data(nlh); 5117 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 5118 ifm->ifi_change || ifm->ifi_index) { 5119 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 5120 return -EINVAL; 5121 } 5122 5123 err = nlmsg_parse_deprecated_strict(nlh, 5124 sizeof(struct ifinfomsg), 5125 tb, IFLA_MAX, ifla_policy, 5126 extack); 5127 } else { 5128 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 5129 tb, IFLA_MAX, ifla_policy, 5130 extack); 5131 } 5132 if (err < 0) 5133 return err; 5134 5135 /* new attributes should only be added with strict checking */ 5136 for (i = 0; i <= IFLA_MAX; ++i) { 5137 if (!tb[i]) 5138 continue; 5139 5140 switch (i) { 5141 case IFLA_EXT_MASK: 5142 *filter_mask = nla_get_u32(tb[i]); 5143 break; 5144 default: 5145 if (strict_check) { 5146 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 5147 return -EINVAL; 5148 } 5149 } 5150 } 5151 5152 return 0; 5153 } 5154 5155 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 5156 { 5157 const struct nlmsghdr *nlh = cb->nlh; 5158 struct net *net = sock_net(skb->sk); 5159 struct net_device *dev; 5160 int idx = 0; 5161 u32 portid = NETLINK_CB(cb->skb).portid; 5162 u32 seq = nlh->nlmsg_seq; 5163 u32 filter_mask = 0; 5164 int err; 5165 5166 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 5167 cb->extack); 5168 if (err < 0 && cb->strict_check) 5169 return err; 5170 5171 rcu_read_lock(); 5172 for_each_netdev_rcu(net, dev) { 5173 const struct net_device_ops *ops = dev->netdev_ops; 5174 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5175 5176 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 5177 if (idx >= cb->args[0]) { 5178 err = br_dev->netdev_ops->ndo_bridge_getlink( 5179 skb, portid, seq, dev, 5180 filter_mask, NLM_F_MULTI); 5181 if (err < 0 && err != -EOPNOTSUPP) { 5182 if (likely(skb->len)) 5183 break; 5184 5185 goto out_err; 5186 } 5187 } 5188 idx++; 5189 } 5190 5191 if (ops->ndo_bridge_getlink) { 5192 if (idx >= cb->args[0]) { 5193 err = ops->ndo_bridge_getlink(skb, portid, 5194 seq, dev, 5195 filter_mask, 5196 NLM_F_MULTI); 5197 if (err < 0 && err != -EOPNOTSUPP) { 5198 if (likely(skb->len)) 5199 break; 5200 5201 goto out_err; 5202 } 5203 } 5204 idx++; 5205 } 5206 } 5207 err = skb->len; 5208 out_err: 5209 rcu_read_unlock(); 5210 cb->args[0] = idx; 5211 5212 return err; 5213 } 5214 5215 static inline size_t bridge_nlmsg_size(void) 5216 { 5217 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 5218 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 5219 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 5220 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 5221 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 5222 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 5223 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 5224 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 5225 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 5226 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 5227 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 5228 } 5229 5230 static int rtnl_bridge_notify(struct net_device *dev) 5231 { 5232 struct net *net = dev_net(dev); 5233 struct sk_buff *skb; 5234 int err = -EOPNOTSUPP; 5235 5236 if (!dev->netdev_ops->ndo_bridge_getlink) 5237 return 0; 5238 5239 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 5240 if (!skb) { 5241 err = -ENOMEM; 5242 goto errout; 5243 } 5244 5245 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 5246 if (err < 0) 5247 goto errout; 5248 5249 /* Notification info is only filled for bridge ports, not the bridge 5250 * device itself. Therefore, a zero notification length is valid and 5251 * should not result in an error. 5252 */ 5253 if (!skb->len) 5254 goto errout; 5255 5256 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 5257 return 0; 5258 errout: 5259 WARN_ON(err == -EMSGSIZE); 5260 kfree_skb(skb); 5261 if (err) 5262 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 5263 return err; 5264 } 5265 5266 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 5267 struct netlink_ext_ack *extack) 5268 { 5269 struct net *net = sock_net(skb->sk); 5270 struct ifinfomsg *ifm; 5271 struct net_device *dev; 5272 struct nlattr *br_spec, *attr, *br_flags_attr = NULL; 5273 int rem, err = -EOPNOTSUPP; 5274 u16 flags = 0; 5275 5276 if (nlmsg_len(nlh) < sizeof(*ifm)) 5277 return -EINVAL; 5278 5279 ifm = nlmsg_data(nlh); 5280 if (ifm->ifi_family != AF_BRIDGE) 5281 return -EPFNOSUPPORT; 5282 5283 dev = __dev_get_by_index(net, ifm->ifi_index); 5284 if (!dev) { 5285 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5286 return -ENODEV; 5287 } 5288 5289 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5290 if (br_spec) { 5291 nla_for_each_nested(attr, br_spec, rem) { 5292 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) { 5293 if (nla_len(attr) < sizeof(flags)) 5294 return -EINVAL; 5295 5296 br_flags_attr = attr; 5297 flags = nla_get_u16(attr); 5298 } 5299 5300 if (nla_type(attr) == IFLA_BRIDGE_MODE) { 5301 if (nla_len(attr) < sizeof(u16)) 5302 return -EINVAL; 5303 } 5304 } 5305 } 5306 5307 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5308 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5309 5310 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 5311 err = -EOPNOTSUPP; 5312 goto out; 5313 } 5314 5315 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 5316 extack); 5317 if (err) 5318 goto out; 5319 5320 flags &= ~BRIDGE_FLAGS_MASTER; 5321 } 5322 5323 if ((flags & BRIDGE_FLAGS_SELF)) { 5324 if (!dev->netdev_ops->ndo_bridge_setlink) 5325 err = -EOPNOTSUPP; 5326 else 5327 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 5328 flags, 5329 extack); 5330 if (!err) { 5331 flags &= ~BRIDGE_FLAGS_SELF; 5332 5333 /* Generate event to notify upper layer of bridge 5334 * change 5335 */ 5336 err = rtnl_bridge_notify(dev); 5337 } 5338 } 5339 5340 if (br_flags_attr) 5341 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags)); 5342 out: 5343 return err; 5344 } 5345 5346 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 5347 struct netlink_ext_ack *extack) 5348 { 5349 struct net *net = sock_net(skb->sk); 5350 struct ifinfomsg *ifm; 5351 struct net_device *dev; 5352 struct nlattr *br_spec, *attr = NULL; 5353 int rem, err = -EOPNOTSUPP; 5354 u16 flags = 0; 5355 bool have_flags = false; 5356 5357 if (nlmsg_len(nlh) < sizeof(*ifm)) 5358 return -EINVAL; 5359 5360 ifm = nlmsg_data(nlh); 5361 if (ifm->ifi_family != AF_BRIDGE) 5362 return -EPFNOSUPPORT; 5363 5364 dev = __dev_get_by_index(net, ifm->ifi_index); 5365 if (!dev) { 5366 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5367 return -ENODEV; 5368 } 5369 5370 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5371 if (br_spec) { 5372 nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec, 5373 rem) { 5374 if (nla_len(attr) < sizeof(flags)) 5375 return -EINVAL; 5376 5377 have_flags = true; 5378 flags = nla_get_u16(attr); 5379 break; 5380 } 5381 } 5382 5383 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5384 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5385 5386 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 5387 err = -EOPNOTSUPP; 5388 goto out; 5389 } 5390 5391 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 5392 if (err) 5393 goto out; 5394 5395 flags &= ~BRIDGE_FLAGS_MASTER; 5396 } 5397 5398 if ((flags & BRIDGE_FLAGS_SELF)) { 5399 if (!dev->netdev_ops->ndo_bridge_dellink) 5400 err = -EOPNOTSUPP; 5401 else 5402 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 5403 flags); 5404 5405 if (!err) { 5406 flags &= ~BRIDGE_FLAGS_SELF; 5407 5408 /* Generate event to notify upper layer of bridge 5409 * change 5410 */ 5411 err = rtnl_bridge_notify(dev); 5412 } 5413 } 5414 5415 if (have_flags) 5416 memcpy(nla_data(attr), &flags, sizeof(flags)); 5417 out: 5418 return err; 5419 } 5420 5421 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 5422 { 5423 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 5424 (!idxattr || idxattr == attrid); 5425 } 5426 5427 static bool 5428 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) 5429 { 5430 return dev->netdev_ops && 5431 dev->netdev_ops->ndo_has_offload_stats && 5432 dev->netdev_ops->ndo_get_offload_stats && 5433 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); 5434 } 5435 5436 static unsigned int 5437 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) 5438 { 5439 return rtnl_offload_xstats_have_ndo(dev, attr_id) ? 5440 sizeof(struct rtnl_link_stats64) : 0; 5441 } 5442 5443 static int 5444 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, 5445 struct sk_buff *skb) 5446 { 5447 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); 5448 struct nlattr *attr = NULL; 5449 void *attr_data; 5450 int err; 5451 5452 if (!size) 5453 return -ENODATA; 5454 5455 attr = nla_reserve_64bit(skb, attr_id, size, 5456 IFLA_OFFLOAD_XSTATS_UNSPEC); 5457 if (!attr) 5458 return -EMSGSIZE; 5459 5460 attr_data = nla_data(attr); 5461 memset(attr_data, 0, size); 5462 5463 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); 5464 if (err) 5465 return err; 5466 5467 return 0; 5468 } 5469 5470 static unsigned int 5471 rtnl_offload_xstats_get_size_stats(const struct net_device *dev, 5472 enum netdev_offload_xstats_type type) 5473 { 5474 bool enabled = netdev_offload_xstats_enabled(dev, type); 5475 5476 return enabled ? sizeof(struct rtnl_hw_stats64) : 0; 5477 } 5478 5479 struct rtnl_offload_xstats_request_used { 5480 bool request; 5481 bool used; 5482 }; 5483 5484 static int 5485 rtnl_offload_xstats_get_stats(struct net_device *dev, 5486 enum netdev_offload_xstats_type type, 5487 struct rtnl_offload_xstats_request_used *ru, 5488 struct rtnl_hw_stats64 *stats, 5489 struct netlink_ext_ack *extack) 5490 { 5491 bool request; 5492 bool used; 5493 int err; 5494 5495 request = netdev_offload_xstats_enabled(dev, type); 5496 if (!request) { 5497 used = false; 5498 goto out; 5499 } 5500 5501 err = netdev_offload_xstats_get(dev, type, stats, &used, extack); 5502 if (err) 5503 return err; 5504 5505 out: 5506 if (ru) { 5507 ru->request = request; 5508 ru->used = used; 5509 } 5510 return 0; 5511 } 5512 5513 static int 5514 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, 5515 struct rtnl_offload_xstats_request_used *ru) 5516 { 5517 struct nlattr *nest; 5518 5519 nest = nla_nest_start(skb, attr_id); 5520 if (!nest) 5521 return -EMSGSIZE; 5522 5523 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) 5524 goto nla_put_failure; 5525 5526 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) 5527 goto nla_put_failure; 5528 5529 nla_nest_end(skb, nest); 5530 return 0; 5531 5532 nla_put_failure: 5533 nla_nest_cancel(skb, nest); 5534 return -EMSGSIZE; 5535 } 5536 5537 static int 5538 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, 5539 struct netlink_ext_ack *extack) 5540 { 5541 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5542 struct rtnl_offload_xstats_request_used ru_l3; 5543 struct nlattr *nest; 5544 int err; 5545 5546 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); 5547 if (err) 5548 return err; 5549 5550 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5551 if (!nest) 5552 return -EMSGSIZE; 5553 5554 if (rtnl_offload_xstats_fill_hw_s_info_one(skb, 5555 IFLA_OFFLOAD_XSTATS_L3_STATS, 5556 &ru_l3)) 5557 goto nla_put_failure; 5558 5559 nla_nest_end(skb, nest); 5560 return 0; 5561 5562 nla_put_failure: 5563 nla_nest_cancel(skb, nest); 5564 return -EMSGSIZE; 5565 } 5566 5567 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, 5568 int *prividx, u32 off_filter_mask, 5569 struct netlink_ext_ack *extack) 5570 { 5571 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5572 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; 5573 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; 5574 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5575 bool have_data = false; 5576 int err; 5577 5578 if (*prividx <= attr_id_cpu_hit && 5579 (off_filter_mask & 5580 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { 5581 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); 5582 if (!err) { 5583 have_data = true; 5584 } else if (err != -ENODATA) { 5585 *prividx = attr_id_cpu_hit; 5586 return err; 5587 } 5588 } 5589 5590 if (*prividx <= attr_id_hw_s_info && 5591 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { 5592 *prividx = attr_id_hw_s_info; 5593 5594 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); 5595 if (err) 5596 return err; 5597 5598 have_data = true; 5599 *prividx = 0; 5600 } 5601 5602 if (*prividx <= attr_id_l3_stats && 5603 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { 5604 unsigned int size_l3; 5605 struct nlattr *attr; 5606 5607 *prividx = attr_id_l3_stats; 5608 5609 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5610 if (!size_l3) 5611 goto skip_l3_stats; 5612 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, 5613 IFLA_OFFLOAD_XSTATS_UNSPEC); 5614 if (!attr) 5615 return -EMSGSIZE; 5616 5617 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, 5618 nla_data(attr), extack); 5619 if (err) 5620 return err; 5621 5622 have_data = true; 5623 skip_l3_stats: 5624 *prividx = 0; 5625 } 5626 5627 if (!have_data) 5628 return -ENODATA; 5629 5630 *prividx = 0; 5631 return 0; 5632 } 5633 5634 static unsigned int 5635 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, 5636 enum netdev_offload_xstats_type type) 5637 { 5638 return nla_total_size(0) + 5639 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ 5640 nla_total_size(sizeof(u8)) + 5641 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ 5642 nla_total_size(sizeof(u8)) + 5643 0; 5644 } 5645 5646 static unsigned int 5647 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) 5648 { 5649 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5650 5651 return nla_total_size(0) + 5652 /* IFLA_OFFLOAD_XSTATS_L3_STATS */ 5653 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + 5654 0; 5655 } 5656 5657 static int rtnl_offload_xstats_get_size(const struct net_device *dev, 5658 u32 off_filter_mask) 5659 { 5660 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5661 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5662 int nla_size = 0; 5663 int size; 5664 5665 if (off_filter_mask & 5666 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { 5667 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); 5668 nla_size += nla_total_size_64bit(size); 5669 } 5670 5671 if (off_filter_mask & 5672 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) 5673 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); 5674 5675 if (off_filter_mask & 5676 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { 5677 size = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5678 nla_size += nla_total_size_64bit(size); 5679 } 5680 5681 if (nla_size != 0) 5682 nla_size += nla_total_size(0); 5683 5684 return nla_size; 5685 } 5686 5687 struct rtnl_stats_dump_filters { 5688 /* mask[0] filters outer attributes. Then individual nests have their 5689 * filtering mask at the index of the nested attribute. 5690 */ 5691 u32 mask[IFLA_STATS_MAX + 1]; 5692 }; 5693 5694 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 5695 int type, u32 pid, u32 seq, u32 change, 5696 unsigned int flags, 5697 const struct rtnl_stats_dump_filters *filters, 5698 int *idxattr, int *prividx, 5699 struct netlink_ext_ack *extack) 5700 { 5701 unsigned int filter_mask = filters->mask[0]; 5702 struct if_stats_msg *ifsm; 5703 struct nlmsghdr *nlh; 5704 struct nlattr *attr; 5705 int s_prividx = *prividx; 5706 int err; 5707 5708 ASSERT_RTNL(); 5709 5710 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 5711 if (!nlh) 5712 return -EMSGSIZE; 5713 5714 ifsm = nlmsg_data(nlh); 5715 ifsm->family = PF_UNSPEC; 5716 ifsm->pad1 = 0; 5717 ifsm->pad2 = 0; 5718 ifsm->ifindex = dev->ifindex; 5719 ifsm->filter_mask = filter_mask; 5720 5721 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 5722 struct rtnl_link_stats64 *sp; 5723 5724 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 5725 sizeof(struct rtnl_link_stats64), 5726 IFLA_STATS_UNSPEC); 5727 if (!attr) { 5728 err = -EMSGSIZE; 5729 goto nla_put_failure; 5730 } 5731 5732 sp = nla_data(attr); 5733 dev_get_stats(dev, sp); 5734 } 5735 5736 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5737 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5738 5739 if (ops && ops->fill_linkxstats) { 5740 *idxattr = IFLA_STATS_LINK_XSTATS; 5741 attr = nla_nest_start_noflag(skb, 5742 IFLA_STATS_LINK_XSTATS); 5743 if (!attr) { 5744 err = -EMSGSIZE; 5745 goto nla_put_failure; 5746 } 5747 5748 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5749 nla_nest_end(skb, attr); 5750 if (err) 5751 goto nla_put_failure; 5752 *idxattr = 0; 5753 } 5754 } 5755 5756 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5757 *idxattr)) { 5758 const struct rtnl_link_ops *ops = NULL; 5759 const struct net_device *master; 5760 5761 master = netdev_master_upper_dev_get(dev); 5762 if (master) 5763 ops = master->rtnl_link_ops; 5764 if (ops && ops->fill_linkxstats) { 5765 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5766 attr = nla_nest_start_noflag(skb, 5767 IFLA_STATS_LINK_XSTATS_SLAVE); 5768 if (!attr) { 5769 err = -EMSGSIZE; 5770 goto nla_put_failure; 5771 } 5772 5773 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5774 nla_nest_end(skb, attr); 5775 if (err) 5776 goto nla_put_failure; 5777 *idxattr = 0; 5778 } 5779 } 5780 5781 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5782 *idxattr)) { 5783 u32 off_filter_mask; 5784 5785 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5786 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5787 attr = nla_nest_start_noflag(skb, 5788 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5789 if (!attr) { 5790 err = -EMSGSIZE; 5791 goto nla_put_failure; 5792 } 5793 5794 err = rtnl_offload_xstats_fill(skb, dev, prividx, 5795 off_filter_mask, extack); 5796 if (err == -ENODATA) 5797 nla_nest_cancel(skb, attr); 5798 else 5799 nla_nest_end(skb, attr); 5800 5801 if (err && err != -ENODATA) 5802 goto nla_put_failure; 5803 *idxattr = 0; 5804 } 5805 5806 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5807 struct rtnl_af_ops *af_ops; 5808 5809 *idxattr = IFLA_STATS_AF_SPEC; 5810 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5811 if (!attr) { 5812 err = -EMSGSIZE; 5813 goto nla_put_failure; 5814 } 5815 5816 rcu_read_lock(); 5817 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5818 if (af_ops->fill_stats_af) { 5819 struct nlattr *af; 5820 5821 af = nla_nest_start_noflag(skb, 5822 af_ops->family); 5823 if (!af) { 5824 rcu_read_unlock(); 5825 err = -EMSGSIZE; 5826 goto nla_put_failure; 5827 } 5828 err = af_ops->fill_stats_af(skb, dev); 5829 5830 if (err == -ENODATA) { 5831 nla_nest_cancel(skb, af); 5832 } else if (err < 0) { 5833 rcu_read_unlock(); 5834 goto nla_put_failure; 5835 } 5836 5837 nla_nest_end(skb, af); 5838 } 5839 } 5840 rcu_read_unlock(); 5841 5842 nla_nest_end(skb, attr); 5843 5844 *idxattr = 0; 5845 } 5846 5847 nlmsg_end(skb, nlh); 5848 5849 return 0; 5850 5851 nla_put_failure: 5852 /* not a multi message or no progress mean a real error */ 5853 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5854 nlmsg_cancel(skb, nlh); 5855 else 5856 nlmsg_end(skb, nlh); 5857 5858 return err; 5859 } 5860 5861 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5862 const struct rtnl_stats_dump_filters *filters) 5863 { 5864 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5865 unsigned int filter_mask = filters->mask[0]; 5866 5867 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5868 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5869 5870 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5871 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5872 int attr = IFLA_STATS_LINK_XSTATS; 5873 5874 if (ops && ops->get_linkxstats_size) { 5875 size += nla_total_size(ops->get_linkxstats_size(dev, 5876 attr)); 5877 /* for IFLA_STATS_LINK_XSTATS */ 5878 size += nla_total_size(0); 5879 } 5880 } 5881 5882 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5883 struct net_device *_dev = (struct net_device *)dev; 5884 const struct rtnl_link_ops *ops = NULL; 5885 const struct net_device *master; 5886 5887 /* netdev_master_upper_dev_get can't take const */ 5888 master = netdev_master_upper_dev_get(_dev); 5889 if (master) 5890 ops = master->rtnl_link_ops; 5891 if (ops && ops->get_linkxstats_size) { 5892 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5893 5894 size += nla_total_size(ops->get_linkxstats_size(dev, 5895 attr)); 5896 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5897 size += nla_total_size(0); 5898 } 5899 } 5900 5901 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { 5902 u32 off_filter_mask; 5903 5904 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5905 size += rtnl_offload_xstats_get_size(dev, off_filter_mask); 5906 } 5907 5908 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5909 struct rtnl_af_ops *af_ops; 5910 5911 /* for IFLA_STATS_AF_SPEC */ 5912 size += nla_total_size(0); 5913 5914 rcu_read_lock(); 5915 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5916 if (af_ops->get_stats_af_size) { 5917 size += nla_total_size( 5918 af_ops->get_stats_af_size(dev)); 5919 5920 /* for AF_* */ 5921 size += nla_total_size(0); 5922 } 5923 } 5924 rcu_read_unlock(); 5925 } 5926 5927 return size; 5928 } 5929 5930 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) 5931 5932 static const struct nla_policy 5933 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { 5934 [IFLA_STATS_LINK_OFFLOAD_XSTATS] = 5935 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), 5936 }; 5937 5938 static const struct nla_policy 5939 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { 5940 [IFLA_STATS_GET_FILTERS] = 5941 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), 5942 }; 5943 5944 static const struct nla_policy 5945 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { 5946 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), 5947 }; 5948 5949 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, 5950 struct rtnl_stats_dump_filters *filters, 5951 struct netlink_ext_ack *extack) 5952 { 5953 struct nlattr *tb[IFLA_STATS_MAX + 1]; 5954 int err; 5955 int at; 5956 5957 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, 5958 rtnl_stats_get_policy_filters, extack); 5959 if (err < 0) 5960 return err; 5961 5962 for (at = 1; at <= IFLA_STATS_MAX; at++) { 5963 if (tb[at]) { 5964 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { 5965 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); 5966 return -EINVAL; 5967 } 5968 filters->mask[at] = nla_get_u32(tb[at]); 5969 } 5970 } 5971 5972 return 0; 5973 } 5974 5975 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, 5976 u32 filter_mask, 5977 struct rtnl_stats_dump_filters *filters, 5978 struct netlink_ext_ack *extack) 5979 { 5980 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5981 int err; 5982 int i; 5983 5984 filters->mask[0] = filter_mask; 5985 for (i = 1; i < ARRAY_SIZE(filters->mask); i++) 5986 filters->mask[i] = -1U; 5987 5988 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, 5989 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); 5990 if (err < 0) 5991 return err; 5992 5993 if (tb[IFLA_STATS_GET_FILTERS]) { 5994 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], 5995 filters, extack); 5996 if (err) 5997 return err; 5998 } 5999 6000 return 0; 6001 } 6002 6003 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 6004 bool is_dump, struct netlink_ext_ack *extack) 6005 { 6006 struct if_stats_msg *ifsm; 6007 6008 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 6009 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 6010 return -EINVAL; 6011 } 6012 6013 if (!strict_check) 6014 return 0; 6015 6016 ifsm = nlmsg_data(nlh); 6017 6018 /* only requests using strict checks can pass data to influence 6019 * the dump. The legacy exception is filter_mask. 6020 */ 6021 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 6022 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 6023 return -EINVAL; 6024 } 6025 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 6026 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 6027 return -EINVAL; 6028 } 6029 6030 return 0; 6031 } 6032 6033 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 6034 struct netlink_ext_ack *extack) 6035 { 6036 struct rtnl_stats_dump_filters filters; 6037 struct net *net = sock_net(skb->sk); 6038 struct net_device *dev = NULL; 6039 int idxattr = 0, prividx = 0; 6040 struct if_stats_msg *ifsm; 6041 struct sk_buff *nskb; 6042 int err; 6043 6044 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 6045 false, extack); 6046 if (err) 6047 return err; 6048 6049 ifsm = nlmsg_data(nlh); 6050 if (ifsm->ifindex > 0) 6051 dev = __dev_get_by_index(net, ifsm->ifindex); 6052 else 6053 return -EINVAL; 6054 6055 if (!dev) 6056 return -ENODEV; 6057 6058 if (!ifsm->filter_mask) { 6059 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); 6060 return -EINVAL; 6061 } 6062 6063 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); 6064 if (err) 6065 return err; 6066 6067 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); 6068 if (!nskb) 6069 return -ENOBUFS; 6070 6071 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 6072 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 6073 0, &filters, &idxattr, &prividx, extack); 6074 if (err < 0) { 6075 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 6076 WARN_ON(err == -EMSGSIZE); 6077 kfree_skb(nskb); 6078 } else { 6079 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 6080 } 6081 6082 return err; 6083 } 6084 6085 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 6086 { 6087 struct netlink_ext_ack *extack = cb->extack; 6088 struct rtnl_stats_dump_filters filters; 6089 struct net *net = sock_net(skb->sk); 6090 unsigned int flags = NLM_F_MULTI; 6091 struct if_stats_msg *ifsm; 6092 struct { 6093 unsigned long ifindex; 6094 int idxattr; 6095 int prividx; 6096 } *ctx = (void *)cb->ctx; 6097 struct net_device *dev; 6098 int err; 6099 6100 cb->seq = net->dev_base_seq; 6101 6102 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 6103 if (err) 6104 return err; 6105 6106 ifsm = nlmsg_data(cb->nlh); 6107 if (!ifsm->filter_mask) { 6108 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 6109 return -EINVAL; 6110 } 6111 6112 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, 6113 extack); 6114 if (err) 6115 return err; 6116 6117 for_each_netdev_dump(net, dev, ctx->ifindex) { 6118 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 6119 NETLINK_CB(cb->skb).portid, 6120 cb->nlh->nlmsg_seq, 0, 6121 flags, &filters, 6122 &ctx->idxattr, &ctx->prividx, 6123 extack); 6124 /* If we ran out of room on the first message, 6125 * we're in trouble. 6126 */ 6127 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 6128 6129 if (err < 0) 6130 break; 6131 ctx->prividx = 0; 6132 ctx->idxattr = 0; 6133 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 6134 } 6135 6136 return err; 6137 } 6138 6139 void rtnl_offload_xstats_notify(struct net_device *dev) 6140 { 6141 struct rtnl_stats_dump_filters response_filters = {}; 6142 struct net *net = dev_net(dev); 6143 int idxattr = 0, prividx = 0; 6144 struct sk_buff *skb; 6145 int err = -ENOBUFS; 6146 6147 ASSERT_RTNL(); 6148 6149 response_filters.mask[0] |= 6150 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6151 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6152 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6153 6154 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), 6155 GFP_KERNEL); 6156 if (!skb) 6157 goto errout; 6158 6159 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, 6160 &response_filters, &idxattr, &prividx, NULL); 6161 if (err < 0) { 6162 kfree_skb(skb); 6163 goto errout; 6164 } 6165 6166 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); 6167 return; 6168 6169 errout: 6170 rtnl_set_sk_err(net, RTNLGRP_STATS, err); 6171 } 6172 EXPORT_SYMBOL(rtnl_offload_xstats_notify); 6173 6174 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, 6175 struct netlink_ext_ack *extack) 6176 { 6177 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 6178 struct rtnl_stats_dump_filters response_filters = {}; 6179 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 6180 struct net *net = sock_net(skb->sk); 6181 struct net_device *dev = NULL; 6182 struct if_stats_msg *ifsm; 6183 bool notify = false; 6184 int err; 6185 6186 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 6187 false, extack); 6188 if (err) 6189 return err; 6190 6191 ifsm = nlmsg_data(nlh); 6192 if (ifsm->family != AF_UNSPEC) { 6193 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); 6194 return -EINVAL; 6195 } 6196 6197 if (ifsm->ifindex > 0) 6198 dev = __dev_get_by_index(net, ifsm->ifindex); 6199 else 6200 return -EINVAL; 6201 6202 if (!dev) 6203 return -ENODEV; 6204 6205 if (ifsm->filter_mask) { 6206 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); 6207 return -EINVAL; 6208 } 6209 6210 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, 6211 ifla_stats_set_policy, extack); 6212 if (err < 0) 6213 return err; 6214 6215 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { 6216 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); 6217 6218 if (req) 6219 err = netdev_offload_xstats_enable(dev, t_l3, extack); 6220 else 6221 err = netdev_offload_xstats_disable(dev, t_l3); 6222 6223 if (!err) 6224 notify = true; 6225 else if (err != -EALREADY) 6226 return err; 6227 6228 response_filters.mask[0] |= 6229 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6230 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6231 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6232 } 6233 6234 if (notify) 6235 rtnl_offload_xstats_notify(dev); 6236 6237 return 0; 6238 } 6239 6240 static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh, 6241 struct netlink_ext_ack *extack) 6242 { 6243 struct br_port_msg *bpm; 6244 6245 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 6246 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request"); 6247 return -EINVAL; 6248 } 6249 6250 bpm = nlmsg_data(nlh); 6251 if (bpm->ifindex) { 6252 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request"); 6253 return -EINVAL; 6254 } 6255 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 6256 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 6257 return -EINVAL; 6258 } 6259 6260 return 0; 6261 } 6262 6263 struct rtnl_mdb_dump_ctx { 6264 long idx; 6265 }; 6266 6267 static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 6268 { 6269 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx; 6270 struct net *net = sock_net(skb->sk); 6271 struct net_device *dev; 6272 int idx, s_idx; 6273 int err; 6274 6275 NL_ASSERT_CTX_FITS(struct rtnl_mdb_dump_ctx); 6276 6277 if (cb->strict_check) { 6278 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack); 6279 if (err) 6280 return err; 6281 } 6282 6283 s_idx = ctx->idx; 6284 idx = 0; 6285 6286 for_each_netdev(net, dev) { 6287 if (idx < s_idx) 6288 goto skip; 6289 if (!dev->netdev_ops->ndo_mdb_dump) 6290 goto skip; 6291 6292 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb); 6293 if (err == -EMSGSIZE) 6294 goto out; 6295 /* Moving on to next device, reset markers and sequence 6296 * counters since they are all maintained per-device. 6297 */ 6298 memset(cb->ctx, 0, sizeof(cb->ctx)); 6299 cb->prev_seq = 0; 6300 cb->seq = 0; 6301 skip: 6302 idx++; 6303 } 6304 6305 out: 6306 ctx->idx = idx; 6307 return skb->len; 6308 } 6309 6310 static int rtnl_validate_mdb_entry_get(const struct nlattr *attr, 6311 struct netlink_ext_ack *extack) 6312 { 6313 struct br_mdb_entry *entry = nla_data(attr); 6314 6315 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6316 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6317 return -EINVAL; 6318 } 6319 6320 if (entry->ifindex) { 6321 NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified"); 6322 return -EINVAL; 6323 } 6324 6325 if (entry->state) { 6326 NL_SET_ERR_MSG(extack, "Entry state cannot be specified"); 6327 return -EINVAL; 6328 } 6329 6330 if (entry->flags) { 6331 NL_SET_ERR_MSG(extack, "Entry flags cannot be specified"); 6332 return -EINVAL; 6333 } 6334 6335 if (entry->vid >= VLAN_VID_MASK) { 6336 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6337 return -EINVAL; 6338 } 6339 6340 if (entry->addr.proto != htons(ETH_P_IP) && 6341 entry->addr.proto != htons(ETH_P_IPV6) && 6342 entry->addr.proto != 0) { 6343 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6344 return -EINVAL; 6345 } 6346 6347 return 0; 6348 } 6349 6350 static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = { 6351 [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6352 rtnl_validate_mdb_entry_get, 6353 sizeof(struct br_mdb_entry)), 6354 [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6355 }; 6356 6357 static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 6358 struct netlink_ext_ack *extack) 6359 { 6360 struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1]; 6361 struct net *net = sock_net(in_skb->sk); 6362 struct br_port_msg *bpm; 6363 struct net_device *dev; 6364 int err; 6365 6366 err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb, 6367 MDBA_GET_ENTRY_MAX, mdba_get_policy, extack); 6368 if (err) 6369 return err; 6370 6371 bpm = nlmsg_data(nlh); 6372 if (!bpm->ifindex) { 6373 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6374 return -EINVAL; 6375 } 6376 6377 dev = __dev_get_by_index(net, bpm->ifindex); 6378 if (!dev) { 6379 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6380 return -ENODEV; 6381 } 6382 6383 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) { 6384 NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute"); 6385 return -EINVAL; 6386 } 6387 6388 if (!dev->netdev_ops->ndo_mdb_get) { 6389 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6390 return -EOPNOTSUPP; 6391 } 6392 6393 return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid, 6394 nlh->nlmsg_seq, extack); 6395 } 6396 6397 static int rtnl_validate_mdb_entry(const struct nlattr *attr, 6398 struct netlink_ext_ack *extack) 6399 { 6400 struct br_mdb_entry *entry = nla_data(attr); 6401 6402 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6403 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6404 return -EINVAL; 6405 } 6406 6407 if (entry->ifindex == 0) { 6408 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed"); 6409 return -EINVAL; 6410 } 6411 6412 if (entry->addr.proto == htons(ETH_P_IP)) { 6413 if (!ipv4_is_multicast(entry->addr.u.ip4) && 6414 !ipv4_is_zeronet(entry->addr.u.ip4)) { 6415 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0"); 6416 return -EINVAL; 6417 } 6418 if (ipv4_is_local_multicast(entry->addr.u.ip4)) { 6419 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast"); 6420 return -EINVAL; 6421 } 6422 #if IS_ENABLED(CONFIG_IPV6) 6423 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 6424 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) { 6425 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes"); 6426 return -EINVAL; 6427 } 6428 #endif 6429 } else if (entry->addr.proto == 0) { 6430 /* L2 mdb */ 6431 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) { 6432 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast"); 6433 return -EINVAL; 6434 } 6435 } else { 6436 NL_SET_ERR_MSG(extack, "Unknown entry protocol"); 6437 return -EINVAL; 6438 } 6439 6440 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6441 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6442 return -EINVAL; 6443 } 6444 if (entry->vid >= VLAN_VID_MASK) { 6445 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6446 return -EINVAL; 6447 } 6448 6449 return 0; 6450 } 6451 6452 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = { 6453 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 }, 6454 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6455 rtnl_validate_mdb_entry, 6456 sizeof(struct br_mdb_entry)), 6457 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6458 }; 6459 6460 static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 6461 struct netlink_ext_ack *extack) 6462 { 6463 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6464 struct net *net = sock_net(skb->sk); 6465 struct br_port_msg *bpm; 6466 struct net_device *dev; 6467 int err; 6468 6469 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6470 MDBA_SET_ENTRY_MAX, mdba_policy, extack); 6471 if (err) 6472 return err; 6473 6474 bpm = nlmsg_data(nlh); 6475 if (!bpm->ifindex) { 6476 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6477 return -EINVAL; 6478 } 6479 6480 dev = __dev_get_by_index(net, bpm->ifindex); 6481 if (!dev) { 6482 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6483 return -ENODEV; 6484 } 6485 6486 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6487 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6488 return -EINVAL; 6489 } 6490 6491 if (!dev->netdev_ops->ndo_mdb_add) { 6492 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6493 return -EOPNOTSUPP; 6494 } 6495 6496 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack); 6497 } 6498 6499 static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr, 6500 struct netlink_ext_ack *extack) 6501 { 6502 struct br_mdb_entry *entry = nla_data(attr); 6503 struct br_mdb_entry zero_entry = {}; 6504 6505 if (nla_len(attr) != sizeof(struct br_mdb_entry)) { 6506 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length"); 6507 return -EINVAL; 6508 } 6509 6510 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) { 6511 NL_SET_ERR_MSG(extack, "Unknown entry state"); 6512 return -EINVAL; 6513 } 6514 6515 if (entry->flags) { 6516 NL_SET_ERR_MSG(extack, "Entry flags cannot be set"); 6517 return -EINVAL; 6518 } 6519 6520 if (entry->vid >= VLAN_N_VID - 1) { 6521 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id"); 6522 return -EINVAL; 6523 } 6524 6525 if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) { 6526 NL_SET_ERR_MSG(extack, "Entry address cannot be set"); 6527 return -EINVAL; 6528 } 6529 6530 return 0; 6531 } 6532 6533 static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = { 6534 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 6535 rtnl_validate_mdb_entry_del_bulk, 6536 sizeof(struct br_mdb_entry)), 6537 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED }, 6538 }; 6539 6540 static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 6541 struct netlink_ext_ack *extack) 6542 { 6543 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 6544 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1]; 6545 struct net *net = sock_net(skb->sk); 6546 struct br_port_msg *bpm; 6547 struct net_device *dev; 6548 int err; 6549 6550 if (!del_bulk) 6551 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 6552 MDBA_SET_ENTRY_MAX, mdba_policy, 6553 extack); 6554 else 6555 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, 6556 mdba_del_bulk_policy, extack); 6557 if (err) 6558 return err; 6559 6560 bpm = nlmsg_data(nlh); 6561 if (!bpm->ifindex) { 6562 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 6563 return -EINVAL; 6564 } 6565 6566 dev = __dev_get_by_index(net, bpm->ifindex); 6567 if (!dev) { 6568 NL_SET_ERR_MSG(extack, "Device doesn't exist"); 6569 return -ENODEV; 6570 } 6571 6572 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) { 6573 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute"); 6574 return -EINVAL; 6575 } 6576 6577 if (del_bulk) { 6578 if (!dev->netdev_ops->ndo_mdb_del_bulk) { 6579 NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion"); 6580 return -EOPNOTSUPP; 6581 } 6582 return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack); 6583 } 6584 6585 if (!dev->netdev_ops->ndo_mdb_del) { 6586 NL_SET_ERR_MSG(extack, "Device does not support MDB operations"); 6587 return -EOPNOTSUPP; 6588 } 6589 6590 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack); 6591 } 6592 6593 /* Process one rtnetlink message. */ 6594 6595 static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 6596 { 6597 const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED); 6598 rtnl_dumpit_func dumpit = cb->data; 6599 int err; 6600 6601 /* Previous iteration have already finished, avoid calling->dumpit() 6602 * again, it may not expect to be called after it reached the end. 6603 */ 6604 if (!dumpit) 6605 return 0; 6606 6607 if (needs_lock) 6608 rtnl_lock(); 6609 err = dumpit(skb, cb); 6610 if (needs_lock) 6611 rtnl_unlock(); 6612 6613 /* Old dump handlers used to send NLM_DONE as in a separate recvmsg(). 6614 * Some applications which parse netlink manually depend on this. 6615 */ 6616 if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) { 6617 if (err < 0 && err != -EMSGSIZE) 6618 return err; 6619 if (!err) 6620 cb->data = NULL; 6621 6622 return skb->len; 6623 } 6624 return err; 6625 } 6626 6627 static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb, 6628 const struct nlmsghdr *nlh, 6629 struct netlink_dump_control *control) 6630 { 6631 if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE || 6632 !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) { 6633 WARN_ON(control->data); 6634 control->data = control->dump; 6635 control->dump = rtnl_dumpit; 6636 } 6637 6638 return netlink_dump_start(ssk, skb, nlh, control); 6639 } 6640 6641 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 6642 struct netlink_ext_ack *extack) 6643 { 6644 struct net *net = sock_net(skb->sk); 6645 struct rtnl_link *link; 6646 enum rtnl_kinds kind; 6647 struct module *owner; 6648 int err = -EOPNOTSUPP; 6649 rtnl_doit_func doit; 6650 unsigned int flags; 6651 int family; 6652 int type; 6653 6654 type = nlh->nlmsg_type; 6655 if (type > RTM_MAX) 6656 return -EOPNOTSUPP; 6657 6658 type -= RTM_BASE; 6659 6660 /* All the messages must have at least 1 byte length */ 6661 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 6662 return 0; 6663 6664 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 6665 kind = rtnl_msgtype_kind(type); 6666 6667 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) 6668 return -EPERM; 6669 6670 rcu_read_lock(); 6671 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { 6672 struct sock *rtnl; 6673 rtnl_dumpit_func dumpit; 6674 u32 min_dump_alloc = 0; 6675 6676 link = rtnl_get_link(family, type); 6677 if (!link || !link->dumpit) { 6678 family = PF_UNSPEC; 6679 link = rtnl_get_link(family, type); 6680 if (!link || !link->dumpit) 6681 goto err_unlock; 6682 } 6683 owner = link->owner; 6684 dumpit = link->dumpit; 6685 flags = link->flags; 6686 6687 if (type == RTM_GETLINK - RTM_BASE) 6688 min_dump_alloc = rtnl_calcit(skb, nlh); 6689 6690 err = 0; 6691 /* need to do this before rcu_read_unlock() */ 6692 if (!try_module_get(owner)) 6693 err = -EPROTONOSUPPORT; 6694 6695 rcu_read_unlock(); 6696 6697 rtnl = net->rtnl; 6698 if (err == 0) { 6699 struct netlink_dump_control c = { 6700 .dump = dumpit, 6701 .min_dump_alloc = min_dump_alloc, 6702 .module = owner, 6703 .flags = flags, 6704 }; 6705 err = rtnetlink_dump_start(rtnl, skb, nlh, &c); 6706 /* netlink_dump_start() will keep a reference on 6707 * module if dump is still in progress. 6708 */ 6709 module_put(owner); 6710 } 6711 return err; 6712 } 6713 6714 link = rtnl_get_link(family, type); 6715 if (!link || !link->doit) { 6716 family = PF_UNSPEC; 6717 link = rtnl_get_link(PF_UNSPEC, type); 6718 if (!link || !link->doit) 6719 goto out_unlock; 6720 } 6721 6722 owner = link->owner; 6723 if (!try_module_get(owner)) { 6724 err = -EPROTONOSUPPORT; 6725 goto out_unlock; 6726 } 6727 6728 flags = link->flags; 6729 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && 6730 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { 6731 NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); 6732 module_put(owner); 6733 goto err_unlock; 6734 } 6735 6736 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 6737 doit = link->doit; 6738 rcu_read_unlock(); 6739 if (doit) 6740 err = doit(skb, nlh, extack); 6741 module_put(owner); 6742 return err; 6743 } 6744 rcu_read_unlock(); 6745 6746 rtnl_lock(); 6747 link = rtnl_get_link(family, type); 6748 if (link && link->doit) 6749 err = link->doit(skb, nlh, extack); 6750 rtnl_unlock(); 6751 6752 module_put(owner); 6753 6754 return err; 6755 6756 out_unlock: 6757 rcu_read_unlock(); 6758 return err; 6759 6760 err_unlock: 6761 rcu_read_unlock(); 6762 return -EOPNOTSUPP; 6763 } 6764 6765 static void rtnetlink_rcv(struct sk_buff *skb) 6766 { 6767 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 6768 } 6769 6770 static int rtnetlink_bind(struct net *net, int group) 6771 { 6772 switch (group) { 6773 case RTNLGRP_IPV4_MROUTE_R: 6774 case RTNLGRP_IPV6_MROUTE_R: 6775 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 6776 return -EPERM; 6777 break; 6778 } 6779 return 0; 6780 } 6781 6782 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 6783 { 6784 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6785 6786 switch (event) { 6787 case NETDEV_REBOOT: 6788 case NETDEV_CHANGEMTU: 6789 case NETDEV_CHANGEADDR: 6790 case NETDEV_CHANGENAME: 6791 case NETDEV_FEAT_CHANGE: 6792 case NETDEV_BONDING_FAILOVER: 6793 case NETDEV_POST_TYPE_CHANGE: 6794 case NETDEV_NOTIFY_PEERS: 6795 case NETDEV_CHANGEUPPER: 6796 case NETDEV_RESEND_IGMP: 6797 case NETDEV_CHANGEINFODATA: 6798 case NETDEV_CHANGELOWERSTATE: 6799 case NETDEV_CHANGE_TX_QUEUE_LEN: 6800 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 6801 GFP_KERNEL, NULL, 0, 0, NULL); 6802 break; 6803 default: 6804 break; 6805 } 6806 return NOTIFY_DONE; 6807 } 6808 6809 static struct notifier_block rtnetlink_dev_notifier = { 6810 .notifier_call = rtnetlink_event, 6811 }; 6812 6813 6814 static int __net_init rtnetlink_net_init(struct net *net) 6815 { 6816 struct sock *sk; 6817 struct netlink_kernel_cfg cfg = { 6818 .groups = RTNLGRP_MAX, 6819 .input = rtnetlink_rcv, 6820 .flags = NL_CFG_F_NONROOT_RECV, 6821 .bind = rtnetlink_bind, 6822 }; 6823 6824 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 6825 if (!sk) 6826 return -ENOMEM; 6827 net->rtnl = sk; 6828 return 0; 6829 } 6830 6831 static void __net_exit rtnetlink_net_exit(struct net *net) 6832 { 6833 netlink_kernel_release(net->rtnl); 6834 net->rtnl = NULL; 6835 } 6836 6837 static struct pernet_operations rtnetlink_net_ops = { 6838 .init = rtnetlink_net_init, 6839 .exit = rtnetlink_net_exit, 6840 }; 6841 6842 void __init rtnetlink_init(void) 6843 { 6844 if (register_pernet_subsys(&rtnetlink_net_ops)) 6845 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 6846 6847 register_netdevice_notifier(&rtnetlink_dev_notifier); 6848 6849 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 6850 rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE); 6851 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 6852 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 6853 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 6854 6855 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 6856 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 6857 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 6858 6859 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 6860 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 6861 6862 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 6863 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 6864 RTNL_FLAG_BULK_DEL_SUPPORTED); 6865 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 6866 6867 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 6868 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 6869 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 6870 6871 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 6872 0); 6873 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); 6874 6875 rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0); 6876 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0); 6877 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 6878 RTNL_FLAG_BULK_DEL_SUPPORTED); 6879 } 6880