1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Routing netlink socket interface: protocol independent part. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/timer.h> 22 #include <linux/string.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/fcntl.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/capability.h> 30 #include <linux/skbuff.h> 31 #include <linux/init.h> 32 #include <linux/security.h> 33 #include <linux/mutex.h> 34 #include <linux/if_addr.h> 35 #include <linux/if_bridge.h> 36 #include <linux/if_vlan.h> 37 #include <linux/pci.h> 38 #include <linux/etherdevice.h> 39 #include <linux/bpf.h> 40 41 #include <linux/uaccess.h> 42 43 #include <linux/inet.h> 44 #include <linux/netdevice.h> 45 #include <net/ip.h> 46 #include <net/protocol.h> 47 #include <net/arp.h> 48 #include <net/route.h> 49 #include <net/udp.h> 50 #include <net/tcp.h> 51 #include <net/sock.h> 52 #include <net/pkt_sched.h> 53 #include <net/fib_rules.h> 54 #include <net/rtnetlink.h> 55 #include <net/net_namespace.h> 56 #include <net/devlink.h> 57 58 #include "dev.h" 59 60 #define RTNL_MAX_TYPE 50 61 #define RTNL_SLAVE_MAX_TYPE 40 62 63 struct rtnl_link { 64 rtnl_doit_func doit; 65 rtnl_dumpit_func dumpit; 66 struct module *owner; 67 unsigned int flags; 68 struct rcu_head rcu; 69 }; 70 71 static DEFINE_MUTEX(rtnl_mutex); 72 73 void rtnl_lock(void) 74 { 75 mutex_lock(&rtnl_mutex); 76 } 77 EXPORT_SYMBOL(rtnl_lock); 78 79 int rtnl_lock_killable(void) 80 { 81 return mutex_lock_killable(&rtnl_mutex); 82 } 83 EXPORT_SYMBOL(rtnl_lock_killable); 84 85 static struct sk_buff *defer_kfree_skb_list; 86 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 87 { 88 if (head && tail) { 89 tail->next = defer_kfree_skb_list; 90 defer_kfree_skb_list = head; 91 } 92 } 93 EXPORT_SYMBOL(rtnl_kfree_skbs); 94 95 void __rtnl_unlock(void) 96 { 97 struct sk_buff *head = defer_kfree_skb_list; 98 99 defer_kfree_skb_list = NULL; 100 101 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock() 102 * is used. In some places, e.g. in cfg80211, we have code that will do 103 * something like 104 * rtnl_lock() 105 * wiphy_lock() 106 * ... 107 * rtnl_unlock() 108 * 109 * and because netdev_run_todo() acquires the RTNL for items on the list 110 * we could cause a situation such as this: 111 * Thread 1 Thread 2 112 * rtnl_lock() 113 * unregister_netdevice() 114 * __rtnl_unlock() 115 * rtnl_lock() 116 * wiphy_lock() 117 * rtnl_unlock() 118 * netdev_run_todo() 119 * __rtnl_unlock() 120 * 121 * // list not empty now 122 * // because of thread 2 123 * rtnl_lock() 124 * while (!list_empty(...)) 125 * rtnl_lock() 126 * wiphy_lock() 127 * **** DEADLOCK **** 128 * 129 * However, usage of __rtnl_unlock() is rare, and so we can ensure that 130 * it's not used in cases where something is added to do the list. 131 */ 132 WARN_ON(!list_empty(&net_todo_list)); 133 134 mutex_unlock(&rtnl_mutex); 135 136 while (head) { 137 struct sk_buff *next = head->next; 138 139 kfree_skb(head); 140 cond_resched(); 141 head = next; 142 } 143 } 144 145 void rtnl_unlock(void) 146 { 147 /* This fellow will unlock it for us. */ 148 netdev_run_todo(); 149 } 150 EXPORT_SYMBOL(rtnl_unlock); 151 152 int rtnl_trylock(void) 153 { 154 return mutex_trylock(&rtnl_mutex); 155 } 156 EXPORT_SYMBOL(rtnl_trylock); 157 158 int rtnl_is_locked(void) 159 { 160 return mutex_is_locked(&rtnl_mutex); 161 } 162 EXPORT_SYMBOL(rtnl_is_locked); 163 164 bool refcount_dec_and_rtnl_lock(refcount_t *r) 165 { 166 return refcount_dec_and_mutex_lock(r, &rtnl_mutex); 167 } 168 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock); 169 170 #ifdef CONFIG_PROVE_LOCKING 171 bool lockdep_rtnl_is_held(void) 172 { 173 return lockdep_is_held(&rtnl_mutex); 174 } 175 EXPORT_SYMBOL(lockdep_rtnl_is_held); 176 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 177 178 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 179 180 static inline int rtm_msgindex(int msgtype) 181 { 182 int msgindex = msgtype - RTM_BASE; 183 184 /* 185 * msgindex < 0 implies someone tried to register a netlink 186 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 187 * the message type has not been added to linux/rtnetlink.h 188 */ 189 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 190 191 return msgindex; 192 } 193 194 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 195 { 196 struct rtnl_link __rcu **tab; 197 198 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 199 protocol = PF_UNSPEC; 200 201 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 202 if (!tab) 203 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 204 205 return rcu_dereference_rtnl(tab[msgtype]); 206 } 207 208 static int rtnl_register_internal(struct module *owner, 209 int protocol, int msgtype, 210 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 211 unsigned int flags) 212 { 213 struct rtnl_link *link, *old; 214 struct rtnl_link __rcu **tab; 215 int msgindex; 216 int ret = -ENOBUFS; 217 218 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 219 msgindex = rtm_msgindex(msgtype); 220 221 rtnl_lock(); 222 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 223 if (tab == NULL) { 224 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 225 if (!tab) 226 goto unlock; 227 228 /* ensures we see the 0 stores */ 229 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 230 } 231 232 old = rtnl_dereference(tab[msgindex]); 233 if (old) { 234 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 235 if (!link) 236 goto unlock; 237 } else { 238 link = kzalloc(sizeof(*link), GFP_KERNEL); 239 if (!link) 240 goto unlock; 241 } 242 243 WARN_ON(link->owner && link->owner != owner); 244 link->owner = owner; 245 246 WARN_ON(doit && link->doit && link->doit != doit); 247 if (doit) 248 link->doit = doit; 249 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 250 if (dumpit) 251 link->dumpit = dumpit; 252 253 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL && 254 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED)); 255 link->flags |= flags; 256 257 /* publish protocol:msgtype */ 258 rcu_assign_pointer(tab[msgindex], link); 259 ret = 0; 260 if (old) 261 kfree_rcu(old, rcu); 262 unlock: 263 rtnl_unlock(); 264 return ret; 265 } 266 267 /** 268 * rtnl_register_module - Register a rtnetlink message type 269 * 270 * @owner: module registering the hook (THIS_MODULE) 271 * @protocol: Protocol family or PF_UNSPEC 272 * @msgtype: rtnetlink message type 273 * @doit: Function pointer called for each request message 274 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 275 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 276 * 277 * Like rtnl_register, but for use by removable modules. 278 */ 279 int rtnl_register_module(struct module *owner, 280 int protocol, int msgtype, 281 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 282 unsigned int flags) 283 { 284 return rtnl_register_internal(owner, protocol, msgtype, 285 doit, dumpit, flags); 286 } 287 EXPORT_SYMBOL_GPL(rtnl_register_module); 288 289 /** 290 * rtnl_register - Register a rtnetlink message type 291 * @protocol: Protocol family or PF_UNSPEC 292 * @msgtype: rtnetlink message type 293 * @doit: Function pointer called for each request message 294 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 295 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions 296 * 297 * Registers the specified function pointers (at least one of them has 298 * to be non-NULL) to be called whenever a request message for the 299 * specified protocol family and message type is received. 300 * 301 * The special protocol family PF_UNSPEC may be used to define fallback 302 * function pointers for the case when no entry for the specific protocol 303 * family exists. 304 */ 305 void rtnl_register(int protocol, int msgtype, 306 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 307 unsigned int flags) 308 { 309 int err; 310 311 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 312 flags); 313 if (err) 314 pr_err("Unable to register rtnetlink message handler, " 315 "protocol = %d, message type = %d\n", protocol, msgtype); 316 } 317 318 /** 319 * rtnl_unregister - Unregister a rtnetlink message type 320 * @protocol: Protocol family or PF_UNSPEC 321 * @msgtype: rtnetlink message type 322 * 323 * Returns 0 on success or a negative error code. 324 */ 325 int rtnl_unregister(int protocol, int msgtype) 326 { 327 struct rtnl_link __rcu **tab; 328 struct rtnl_link *link; 329 int msgindex; 330 331 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 332 msgindex = rtm_msgindex(msgtype); 333 334 rtnl_lock(); 335 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 336 if (!tab) { 337 rtnl_unlock(); 338 return -ENOENT; 339 } 340 341 link = rtnl_dereference(tab[msgindex]); 342 RCU_INIT_POINTER(tab[msgindex], NULL); 343 rtnl_unlock(); 344 345 kfree_rcu(link, rcu); 346 347 return 0; 348 } 349 EXPORT_SYMBOL_GPL(rtnl_unregister); 350 351 /** 352 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 353 * @protocol : Protocol family or PF_UNSPEC 354 * 355 * Identical to calling rtnl_unregster() for all registered message types 356 * of a certain protocol family. 357 */ 358 void rtnl_unregister_all(int protocol) 359 { 360 struct rtnl_link __rcu **tab; 361 struct rtnl_link *link; 362 int msgindex; 363 364 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 365 366 rtnl_lock(); 367 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 368 if (!tab) { 369 rtnl_unlock(); 370 return; 371 } 372 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); 373 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 374 link = rtnl_dereference(tab[msgindex]); 375 if (!link) 376 continue; 377 378 RCU_INIT_POINTER(tab[msgindex], NULL); 379 kfree_rcu(link, rcu); 380 } 381 rtnl_unlock(); 382 383 synchronize_net(); 384 385 kfree(tab); 386 } 387 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 388 389 static LIST_HEAD(link_ops); 390 391 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 392 { 393 const struct rtnl_link_ops *ops; 394 395 list_for_each_entry(ops, &link_ops, list) { 396 if (!strcmp(ops->kind, kind)) 397 return ops; 398 } 399 return NULL; 400 } 401 402 /** 403 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 404 * @ops: struct rtnl_link_ops * to register 405 * 406 * The caller must hold the rtnl_mutex. This function should be used 407 * by drivers that create devices during module initialization. It 408 * must be called before registering the devices. 409 * 410 * Returns 0 on success or a negative error code. 411 */ 412 int __rtnl_link_register(struct rtnl_link_ops *ops) 413 { 414 if (rtnl_link_ops_get(ops->kind)) 415 return -EEXIST; 416 417 /* The check for alloc/setup is here because if ops 418 * does not have that filled up, it is not possible 419 * to use the ops for creating device. So do not 420 * fill up dellink as well. That disables rtnl_dellink. 421 */ 422 if ((ops->alloc || ops->setup) && !ops->dellink) 423 ops->dellink = unregister_netdevice_queue; 424 425 list_add_tail(&ops->list, &link_ops); 426 return 0; 427 } 428 EXPORT_SYMBOL_GPL(__rtnl_link_register); 429 430 /** 431 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 432 * @ops: struct rtnl_link_ops * to register 433 * 434 * Returns 0 on success or a negative error code. 435 */ 436 int rtnl_link_register(struct rtnl_link_ops *ops) 437 { 438 int err; 439 440 /* Sanity-check max sizes to avoid stack buffer overflow. */ 441 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE || 442 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)) 443 return -EINVAL; 444 445 rtnl_lock(); 446 err = __rtnl_link_register(ops); 447 rtnl_unlock(); 448 return err; 449 } 450 EXPORT_SYMBOL_GPL(rtnl_link_register); 451 452 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 453 { 454 struct net_device *dev; 455 LIST_HEAD(list_kill); 456 457 for_each_netdev(net, dev) { 458 if (dev->rtnl_link_ops == ops) 459 ops->dellink(dev, &list_kill); 460 } 461 unregister_netdevice_many(&list_kill); 462 } 463 464 /** 465 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 466 * @ops: struct rtnl_link_ops * to unregister 467 * 468 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 469 * integrity (hold pernet_ops_rwsem for writing to close the race 470 * with setup_net() and cleanup_net()). 471 */ 472 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 473 { 474 struct net *net; 475 476 for_each_net(net) { 477 __rtnl_kill_links(net, ops); 478 } 479 list_del(&ops->list); 480 } 481 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 482 483 /* Return with the rtnl_lock held when there are no network 484 * devices unregistering in any network namespace. 485 */ 486 static void rtnl_lock_unregistering_all(void) 487 { 488 struct net *net; 489 bool unregistering; 490 DEFINE_WAIT_FUNC(wait, woken_wake_function); 491 492 add_wait_queue(&netdev_unregistering_wq, &wait); 493 for (;;) { 494 unregistering = false; 495 rtnl_lock(); 496 /* We held write locked pernet_ops_rwsem, and parallel 497 * setup_net() and cleanup_net() are not possible. 498 */ 499 for_each_net(net) { 500 if (atomic_read(&net->dev_unreg_count) > 0) { 501 unregistering = true; 502 break; 503 } 504 } 505 if (!unregistering) 506 break; 507 __rtnl_unlock(); 508 509 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 510 } 511 remove_wait_queue(&netdev_unregistering_wq, &wait); 512 } 513 514 /** 515 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 516 * @ops: struct rtnl_link_ops * to unregister 517 */ 518 void rtnl_link_unregister(struct rtnl_link_ops *ops) 519 { 520 /* Close the race with setup_net() and cleanup_net() */ 521 down_write(&pernet_ops_rwsem); 522 rtnl_lock_unregistering_all(); 523 __rtnl_link_unregister(ops); 524 rtnl_unlock(); 525 up_write(&pernet_ops_rwsem); 526 } 527 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 528 529 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 530 { 531 struct net_device *master_dev; 532 const struct rtnl_link_ops *ops; 533 size_t size = 0; 534 535 rcu_read_lock(); 536 537 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 538 if (!master_dev) 539 goto out; 540 541 ops = master_dev->rtnl_link_ops; 542 if (!ops || !ops->get_slave_size) 543 goto out; 544 /* IFLA_INFO_SLAVE_DATA + nested data */ 545 size = nla_total_size(sizeof(struct nlattr)) + 546 ops->get_slave_size(master_dev, dev); 547 548 out: 549 rcu_read_unlock(); 550 return size; 551 } 552 553 static size_t rtnl_link_get_size(const struct net_device *dev) 554 { 555 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 556 size_t size; 557 558 if (!ops) 559 return 0; 560 561 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 562 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 563 564 if (ops->get_size) 565 /* IFLA_INFO_DATA + nested data */ 566 size += nla_total_size(sizeof(struct nlattr)) + 567 ops->get_size(dev); 568 569 if (ops->get_xstats_size) 570 /* IFLA_INFO_XSTATS */ 571 size += nla_total_size(ops->get_xstats_size(dev)); 572 573 size += rtnl_link_get_slave_info_data_size(dev); 574 575 return size; 576 } 577 578 static LIST_HEAD(rtnl_af_ops); 579 580 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 581 { 582 const struct rtnl_af_ops *ops; 583 584 ASSERT_RTNL(); 585 586 list_for_each_entry(ops, &rtnl_af_ops, list) { 587 if (ops->family == family) 588 return ops; 589 } 590 591 return NULL; 592 } 593 594 /** 595 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 596 * @ops: struct rtnl_af_ops * to register 597 * 598 * Returns 0 on success or a negative error code. 599 */ 600 void rtnl_af_register(struct rtnl_af_ops *ops) 601 { 602 rtnl_lock(); 603 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 604 rtnl_unlock(); 605 } 606 EXPORT_SYMBOL_GPL(rtnl_af_register); 607 608 /** 609 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 610 * @ops: struct rtnl_af_ops * to unregister 611 */ 612 void rtnl_af_unregister(struct rtnl_af_ops *ops) 613 { 614 rtnl_lock(); 615 list_del_rcu(&ops->list); 616 rtnl_unlock(); 617 618 synchronize_rcu(); 619 } 620 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 621 622 static size_t rtnl_link_get_af_size(const struct net_device *dev, 623 u32 ext_filter_mask) 624 { 625 struct rtnl_af_ops *af_ops; 626 size_t size; 627 628 /* IFLA_AF_SPEC */ 629 size = nla_total_size(sizeof(struct nlattr)); 630 631 rcu_read_lock(); 632 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 633 if (af_ops->get_link_af_size) { 634 /* AF_* + nested data */ 635 size += nla_total_size(sizeof(struct nlattr)) + 636 af_ops->get_link_af_size(dev, ext_filter_mask); 637 } 638 } 639 rcu_read_unlock(); 640 641 return size; 642 } 643 644 static bool rtnl_have_link_slave_info(const struct net_device *dev) 645 { 646 struct net_device *master_dev; 647 bool ret = false; 648 649 rcu_read_lock(); 650 651 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 652 if (master_dev && master_dev->rtnl_link_ops) 653 ret = true; 654 rcu_read_unlock(); 655 return ret; 656 } 657 658 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 659 const struct net_device *dev) 660 { 661 struct net_device *master_dev; 662 const struct rtnl_link_ops *ops; 663 struct nlattr *slave_data; 664 int err; 665 666 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 667 if (!master_dev) 668 return 0; 669 ops = master_dev->rtnl_link_ops; 670 if (!ops) 671 return 0; 672 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 673 return -EMSGSIZE; 674 if (ops->fill_slave_info) { 675 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA); 676 if (!slave_data) 677 return -EMSGSIZE; 678 err = ops->fill_slave_info(skb, master_dev, dev); 679 if (err < 0) 680 goto err_cancel_slave_data; 681 nla_nest_end(skb, slave_data); 682 } 683 return 0; 684 685 err_cancel_slave_data: 686 nla_nest_cancel(skb, slave_data); 687 return err; 688 } 689 690 static int rtnl_link_info_fill(struct sk_buff *skb, 691 const struct net_device *dev) 692 { 693 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 694 struct nlattr *data; 695 int err; 696 697 if (!ops) 698 return 0; 699 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 700 return -EMSGSIZE; 701 if (ops->fill_xstats) { 702 err = ops->fill_xstats(skb, dev); 703 if (err < 0) 704 return err; 705 } 706 if (ops->fill_info) { 707 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA); 708 if (data == NULL) 709 return -EMSGSIZE; 710 err = ops->fill_info(skb, dev); 711 if (err < 0) 712 goto err_cancel_data; 713 nla_nest_end(skb, data); 714 } 715 return 0; 716 717 err_cancel_data: 718 nla_nest_cancel(skb, data); 719 return err; 720 } 721 722 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 723 { 724 struct nlattr *linkinfo; 725 int err = -EMSGSIZE; 726 727 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO); 728 if (linkinfo == NULL) 729 goto out; 730 731 err = rtnl_link_info_fill(skb, dev); 732 if (err < 0) 733 goto err_cancel_link; 734 735 err = rtnl_link_slave_info_fill(skb, dev); 736 if (err < 0) 737 goto err_cancel_link; 738 739 nla_nest_end(skb, linkinfo); 740 return 0; 741 742 err_cancel_link: 743 nla_nest_cancel(skb, linkinfo); 744 out: 745 return err; 746 } 747 748 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 749 { 750 struct sock *rtnl = net->rtnl; 751 752 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); 753 } 754 755 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 756 { 757 struct sock *rtnl = net->rtnl; 758 759 return nlmsg_unicast(rtnl, skb, pid); 760 } 761 EXPORT_SYMBOL(rtnl_unicast); 762 763 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 764 const struct nlmsghdr *nlh, gfp_t flags) 765 { 766 struct sock *rtnl = net->rtnl; 767 768 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); 769 } 770 EXPORT_SYMBOL(rtnl_notify); 771 772 void rtnl_set_sk_err(struct net *net, u32 group, int error) 773 { 774 struct sock *rtnl = net->rtnl; 775 776 netlink_set_err(rtnl, 0, group, error); 777 } 778 EXPORT_SYMBOL(rtnl_set_sk_err); 779 780 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 781 { 782 struct nlattr *mx; 783 int i, valid = 0; 784 785 /* nothing is dumped for dst_default_metrics, so just skip the loop */ 786 if (metrics == dst_default_metrics.metrics) 787 return 0; 788 789 mx = nla_nest_start_noflag(skb, RTA_METRICS); 790 if (mx == NULL) 791 return -ENOBUFS; 792 793 for (i = 0; i < RTAX_MAX; i++) { 794 if (metrics[i]) { 795 if (i == RTAX_CC_ALGO - 1) { 796 char tmp[TCP_CA_NAME_MAX], *name; 797 798 name = tcp_ca_get_name_by_key(metrics[i], tmp); 799 if (!name) 800 continue; 801 if (nla_put_string(skb, i + 1, name)) 802 goto nla_put_failure; 803 } else if (i == RTAX_FEATURES - 1) { 804 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 805 806 if (!user_features) 807 continue; 808 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 809 if (nla_put_u32(skb, i + 1, user_features)) 810 goto nla_put_failure; 811 } else { 812 if (nla_put_u32(skb, i + 1, metrics[i])) 813 goto nla_put_failure; 814 } 815 valid++; 816 } 817 } 818 819 if (!valid) { 820 nla_nest_cancel(skb, mx); 821 return 0; 822 } 823 824 return nla_nest_end(skb, mx); 825 826 nla_put_failure: 827 nla_nest_cancel(skb, mx); 828 return -EMSGSIZE; 829 } 830 EXPORT_SYMBOL(rtnetlink_put_metrics); 831 832 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 833 long expires, u32 error) 834 { 835 struct rta_cacheinfo ci = { 836 .rta_error = error, 837 .rta_id = id, 838 }; 839 840 if (dst) { 841 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); 842 ci.rta_used = dst->__use; 843 ci.rta_clntref = atomic_read(&dst->__refcnt); 844 } 845 if (expires) { 846 unsigned long clock; 847 848 clock = jiffies_to_clock_t(abs(expires)); 849 clock = min_t(unsigned long, clock, INT_MAX); 850 ci.rta_expires = (expires > 0) ? clock : -clock; 851 } 852 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 853 } 854 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 855 856 static void set_operstate(struct net_device *dev, unsigned char transition) 857 { 858 unsigned char operstate = dev->operstate; 859 860 switch (transition) { 861 case IF_OPER_UP: 862 if ((operstate == IF_OPER_DORMANT || 863 operstate == IF_OPER_TESTING || 864 operstate == IF_OPER_UNKNOWN) && 865 !netif_dormant(dev) && !netif_testing(dev)) 866 operstate = IF_OPER_UP; 867 break; 868 869 case IF_OPER_TESTING: 870 if (netif_oper_up(dev)) 871 operstate = IF_OPER_TESTING; 872 break; 873 874 case IF_OPER_DORMANT: 875 if (netif_oper_up(dev)) 876 operstate = IF_OPER_DORMANT; 877 break; 878 } 879 880 if (dev->operstate != operstate) { 881 write_lock(&dev_base_lock); 882 dev->operstate = operstate; 883 write_unlock(&dev_base_lock); 884 netdev_state_change(dev); 885 } 886 } 887 888 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 889 { 890 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 891 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 892 } 893 894 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 895 const struct ifinfomsg *ifm) 896 { 897 unsigned int flags = ifm->ifi_flags; 898 899 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 900 if (ifm->ifi_change) 901 flags = (flags & ifm->ifi_change) | 902 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 903 904 return flags; 905 } 906 907 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 908 const struct rtnl_link_stats64 *b) 909 { 910 a->rx_packets = b->rx_packets; 911 a->tx_packets = b->tx_packets; 912 a->rx_bytes = b->rx_bytes; 913 a->tx_bytes = b->tx_bytes; 914 a->rx_errors = b->rx_errors; 915 a->tx_errors = b->tx_errors; 916 a->rx_dropped = b->rx_dropped; 917 a->tx_dropped = b->tx_dropped; 918 919 a->multicast = b->multicast; 920 a->collisions = b->collisions; 921 922 a->rx_length_errors = b->rx_length_errors; 923 a->rx_over_errors = b->rx_over_errors; 924 a->rx_crc_errors = b->rx_crc_errors; 925 a->rx_frame_errors = b->rx_frame_errors; 926 a->rx_fifo_errors = b->rx_fifo_errors; 927 a->rx_missed_errors = b->rx_missed_errors; 928 929 a->tx_aborted_errors = b->tx_aborted_errors; 930 a->tx_carrier_errors = b->tx_carrier_errors; 931 a->tx_fifo_errors = b->tx_fifo_errors; 932 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 933 a->tx_window_errors = b->tx_window_errors; 934 935 a->rx_compressed = b->rx_compressed; 936 a->tx_compressed = b->tx_compressed; 937 938 a->rx_nohandler = b->rx_nohandler; 939 } 940 941 /* All VF info */ 942 static inline int rtnl_vfinfo_size(const struct net_device *dev, 943 u32 ext_filter_mask) 944 { 945 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 946 int num_vfs = dev_num_vf(dev->dev.parent); 947 size_t size = nla_total_size(0); 948 size += num_vfs * 949 (nla_total_size(0) + 950 nla_total_size(sizeof(struct ifla_vf_mac)) + 951 nla_total_size(sizeof(struct ifla_vf_broadcast)) + 952 nla_total_size(sizeof(struct ifla_vf_vlan)) + 953 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 954 nla_total_size(MAX_VLAN_LIST_LEN * 955 sizeof(struct ifla_vf_vlan_info)) + 956 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 957 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 958 nla_total_size(sizeof(struct ifla_vf_rate)) + 959 nla_total_size(sizeof(struct ifla_vf_link_state)) + 960 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 961 nla_total_size(0) + /* nest IFLA_VF_STATS */ 962 /* IFLA_VF_STATS_RX_PACKETS */ 963 nla_total_size_64bit(sizeof(__u64)) + 964 /* IFLA_VF_STATS_TX_PACKETS */ 965 nla_total_size_64bit(sizeof(__u64)) + 966 /* IFLA_VF_STATS_RX_BYTES */ 967 nla_total_size_64bit(sizeof(__u64)) + 968 /* IFLA_VF_STATS_TX_BYTES */ 969 nla_total_size_64bit(sizeof(__u64)) + 970 /* IFLA_VF_STATS_BROADCAST */ 971 nla_total_size_64bit(sizeof(__u64)) + 972 /* IFLA_VF_STATS_MULTICAST */ 973 nla_total_size_64bit(sizeof(__u64)) + 974 /* IFLA_VF_STATS_RX_DROPPED */ 975 nla_total_size_64bit(sizeof(__u64)) + 976 /* IFLA_VF_STATS_TX_DROPPED */ 977 nla_total_size_64bit(sizeof(__u64)) + 978 nla_total_size(sizeof(struct ifla_vf_trust))); 979 return size; 980 } else 981 return 0; 982 } 983 984 static size_t rtnl_port_size(const struct net_device *dev, 985 u32 ext_filter_mask) 986 { 987 size_t port_size = nla_total_size(4) /* PORT_VF */ 988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 991 + nla_total_size(1) /* PROT_VDP_REQUEST */ 992 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 995 + port_size; 996 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 997 + port_size; 998 999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1000 !(ext_filter_mask & RTEXT_FILTER_VF)) 1001 return 0; 1002 if (dev_num_vf(dev->dev.parent)) 1003 return port_self_size + vf_ports_size + 1004 vf_port_size * dev_num_vf(dev->dev.parent); 1005 else 1006 return port_self_size; 1007 } 1008 1009 static size_t rtnl_xdp_size(void) 1010 { 1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 1012 nla_total_size(1) + /* XDP_ATTACHED */ 1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ 1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */ 1015 1016 return xdp_size; 1017 } 1018 1019 static size_t rtnl_prop_list_size(const struct net_device *dev) 1020 { 1021 struct netdev_name_node *name_node; 1022 size_t size; 1023 1024 if (list_empty(&dev->name_node->list)) 1025 return 0; 1026 size = nla_total_size(0); 1027 list_for_each_entry(name_node, &dev->name_node->list, list) 1028 size += nla_total_size(ALTIFNAMSIZ); 1029 return size; 1030 } 1031 1032 static size_t rtnl_proto_down_size(const struct net_device *dev) 1033 { 1034 size_t size = nla_total_size(1); 1035 1036 if (dev->proto_down_reason) 1037 size += nla_total_size(0) + nla_total_size(4); 1038 1039 return size; 1040 } 1041 1042 static size_t rtnl_devlink_port_size(const struct net_device *dev) 1043 { 1044 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */ 1045 1046 if (dev->devlink_port) 1047 size += devlink_nl_port_handle_size(dev->devlink_port); 1048 1049 return size; 1050 } 1051 1052 static noinline size_t if_nlmsg_size(const struct net_device *dev, 1053 u32 ext_filter_mask) 1054 { 1055 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 1056 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 1057 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 1058 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 1059 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 1060 + nla_total_size(sizeof(struct rtnl_link_stats)) 1061 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 1062 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 1063 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 1064 + nla_total_size(4) /* IFLA_TXQLEN */ 1065 + nla_total_size(4) /* IFLA_WEIGHT */ 1066 + nla_total_size(4) /* IFLA_MTU */ 1067 + nla_total_size(4) /* IFLA_LINK */ 1068 + nla_total_size(4) /* IFLA_MASTER */ 1069 + nla_total_size(1) /* IFLA_CARRIER */ 1070 + nla_total_size(4) /* IFLA_PROMISCUITY */ 1071 + nla_total_size(4) /* IFLA_ALLMULTI */ 1072 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 1073 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 1074 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 1075 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 1076 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */ 1077 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */ 1078 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ 1079 + nla_total_size(1) /* IFLA_OPERSTATE */ 1080 + nla_total_size(1) /* IFLA_LINKMODE */ 1081 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 1082 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 1083 + nla_total_size(4) /* IFLA_GROUP */ 1084 + nla_total_size(ext_filter_mask 1085 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 1086 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 1087 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 1088 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 1089 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 1090 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 1091 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 1092 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 1093 + rtnl_xdp_size() /* IFLA_XDP */ 1094 + nla_total_size(4) /* IFLA_EVENT */ 1095 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1096 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1097 + rtnl_proto_down_size(dev) /* proto down */ 1098 + nla_total_size(4) /* IFLA_TARGET_NETNSID */ 1099 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1100 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1101 + nla_total_size(4) /* IFLA_MIN_MTU */ 1102 + nla_total_size(4) /* IFLA_MAX_MTU */ 1103 + rtnl_prop_list_size(dev) 1104 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */ 1105 + rtnl_devlink_port_size(dev) 1106 + 0; 1107 } 1108 1109 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1110 { 1111 struct nlattr *vf_ports; 1112 struct nlattr *vf_port; 1113 int vf; 1114 int err; 1115 1116 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS); 1117 if (!vf_ports) 1118 return -EMSGSIZE; 1119 1120 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1121 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT); 1122 if (!vf_port) 1123 goto nla_put_failure; 1124 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1125 goto nla_put_failure; 1126 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1127 if (err == -EMSGSIZE) 1128 goto nla_put_failure; 1129 if (err) { 1130 nla_nest_cancel(skb, vf_port); 1131 continue; 1132 } 1133 nla_nest_end(skb, vf_port); 1134 } 1135 1136 nla_nest_end(skb, vf_ports); 1137 1138 return 0; 1139 1140 nla_put_failure: 1141 nla_nest_cancel(skb, vf_ports); 1142 return -EMSGSIZE; 1143 } 1144 1145 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1146 { 1147 struct nlattr *port_self; 1148 int err; 1149 1150 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF); 1151 if (!port_self) 1152 return -EMSGSIZE; 1153 1154 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1155 if (err) { 1156 nla_nest_cancel(skb, port_self); 1157 return (err == -EMSGSIZE) ? err : 0; 1158 } 1159 1160 nla_nest_end(skb, port_self); 1161 1162 return 0; 1163 } 1164 1165 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1166 u32 ext_filter_mask) 1167 { 1168 int err; 1169 1170 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1171 !(ext_filter_mask & RTEXT_FILTER_VF)) 1172 return 0; 1173 1174 err = rtnl_port_self_fill(skb, dev); 1175 if (err) 1176 return err; 1177 1178 if (dev_num_vf(dev->dev.parent)) { 1179 err = rtnl_vf_ports_fill(skb, dev); 1180 if (err) 1181 return err; 1182 } 1183 1184 return 0; 1185 } 1186 1187 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1188 { 1189 int err; 1190 struct netdev_phys_item_id ppid; 1191 1192 err = dev_get_phys_port_id(dev, &ppid); 1193 if (err) { 1194 if (err == -EOPNOTSUPP) 1195 return 0; 1196 return err; 1197 } 1198 1199 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1200 return -EMSGSIZE; 1201 1202 return 0; 1203 } 1204 1205 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1206 { 1207 char name[IFNAMSIZ]; 1208 int err; 1209 1210 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1211 if (err) { 1212 if (err == -EOPNOTSUPP) 1213 return 0; 1214 return err; 1215 } 1216 1217 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1218 return -EMSGSIZE; 1219 1220 return 0; 1221 } 1222 1223 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1224 { 1225 struct netdev_phys_item_id ppid = { }; 1226 int err; 1227 1228 err = dev_get_port_parent_id(dev, &ppid, false); 1229 if (err) { 1230 if (err == -EOPNOTSUPP) 1231 return 0; 1232 return err; 1233 } 1234 1235 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id)) 1236 return -EMSGSIZE; 1237 1238 return 0; 1239 } 1240 1241 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1242 struct net_device *dev) 1243 { 1244 struct rtnl_link_stats64 *sp; 1245 struct nlattr *attr; 1246 1247 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1248 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1249 if (!attr) 1250 return -EMSGSIZE; 1251 1252 sp = nla_data(attr); 1253 dev_get_stats(dev, sp); 1254 1255 attr = nla_reserve(skb, IFLA_STATS, 1256 sizeof(struct rtnl_link_stats)); 1257 if (!attr) 1258 return -EMSGSIZE; 1259 1260 copy_rtnl_link_stats(nla_data(attr), sp); 1261 1262 return 0; 1263 } 1264 1265 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1266 struct net_device *dev, 1267 int vfs_num, 1268 struct nlattr *vfinfo) 1269 { 1270 struct ifla_vf_rss_query_en vf_rss_query_en; 1271 struct nlattr *vf, *vfstats, *vfvlanlist; 1272 struct ifla_vf_link_state vf_linkstate; 1273 struct ifla_vf_vlan_info vf_vlan_info; 1274 struct ifla_vf_spoofchk vf_spoofchk; 1275 struct ifla_vf_tx_rate vf_tx_rate; 1276 struct ifla_vf_stats vf_stats; 1277 struct ifla_vf_trust vf_trust; 1278 struct ifla_vf_vlan vf_vlan; 1279 struct ifla_vf_rate vf_rate; 1280 struct ifla_vf_mac vf_mac; 1281 struct ifla_vf_broadcast vf_broadcast; 1282 struct ifla_vf_info ivi; 1283 struct ifla_vf_guid node_guid; 1284 struct ifla_vf_guid port_guid; 1285 1286 memset(&ivi, 0, sizeof(ivi)); 1287 1288 /* Not all SR-IOV capable drivers support the 1289 * spoofcheck and "RSS query enable" query. Preset to 1290 * -1 so the user space tool can detect that the driver 1291 * didn't report anything. 1292 */ 1293 ivi.spoofchk = -1; 1294 ivi.rss_query_en = -1; 1295 ivi.trusted = -1; 1296 /* The default value for VF link state is "auto" 1297 * IFLA_VF_LINK_STATE_AUTO which equals zero 1298 */ 1299 ivi.linkstate = 0; 1300 /* VLAN Protocol by default is 802.1Q */ 1301 ivi.vlan_proto = htons(ETH_P_8021Q); 1302 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1303 return 0; 1304 1305 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1306 memset(&node_guid, 0, sizeof(node_guid)); 1307 memset(&port_guid, 0, sizeof(port_guid)); 1308 1309 vf_mac.vf = 1310 vf_vlan.vf = 1311 vf_vlan_info.vf = 1312 vf_rate.vf = 1313 vf_tx_rate.vf = 1314 vf_spoofchk.vf = 1315 vf_linkstate.vf = 1316 vf_rss_query_en.vf = 1317 vf_trust.vf = 1318 node_guid.vf = 1319 port_guid.vf = ivi.vf; 1320 1321 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1322 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len); 1323 vf_vlan.vlan = ivi.vlan; 1324 vf_vlan.qos = ivi.qos; 1325 vf_vlan_info.vlan = ivi.vlan; 1326 vf_vlan_info.qos = ivi.qos; 1327 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1328 vf_tx_rate.rate = ivi.max_tx_rate; 1329 vf_rate.min_tx_rate = ivi.min_tx_rate; 1330 vf_rate.max_tx_rate = ivi.max_tx_rate; 1331 vf_spoofchk.setting = ivi.spoofchk; 1332 vf_linkstate.link_state = ivi.linkstate; 1333 vf_rss_query_en.setting = ivi.rss_query_en; 1334 vf_trust.setting = ivi.trusted; 1335 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO); 1336 if (!vf) 1337 goto nla_put_vfinfo_failure; 1338 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1339 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) || 1340 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1341 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1342 &vf_rate) || 1343 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1344 &vf_tx_rate) || 1345 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1346 &vf_spoofchk) || 1347 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1348 &vf_linkstate) || 1349 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1350 sizeof(vf_rss_query_en), 1351 &vf_rss_query_en) || 1352 nla_put(skb, IFLA_VF_TRUST, 1353 sizeof(vf_trust), &vf_trust)) 1354 goto nla_put_vf_failure; 1355 1356 if (dev->netdev_ops->ndo_get_vf_guid && 1357 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid, 1358 &port_guid)) { 1359 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid), 1360 &node_guid) || 1361 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid), 1362 &port_guid)) 1363 goto nla_put_vf_failure; 1364 } 1365 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST); 1366 if (!vfvlanlist) 1367 goto nla_put_vf_failure; 1368 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1369 &vf_vlan_info)) { 1370 nla_nest_cancel(skb, vfvlanlist); 1371 goto nla_put_vf_failure; 1372 } 1373 nla_nest_end(skb, vfvlanlist); 1374 memset(&vf_stats, 0, sizeof(vf_stats)); 1375 if (dev->netdev_ops->ndo_get_vf_stats) 1376 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1377 &vf_stats); 1378 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); 1379 if (!vfstats) 1380 goto nla_put_vf_failure; 1381 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1382 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1383 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1384 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1385 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1386 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1387 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1388 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1389 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1390 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1391 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1392 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1393 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1394 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1395 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1396 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1397 nla_nest_cancel(skb, vfstats); 1398 goto nla_put_vf_failure; 1399 } 1400 nla_nest_end(skb, vfstats); 1401 nla_nest_end(skb, vf); 1402 return 0; 1403 1404 nla_put_vf_failure: 1405 nla_nest_cancel(skb, vf); 1406 nla_put_vfinfo_failure: 1407 nla_nest_cancel(skb, vfinfo); 1408 return -EMSGSIZE; 1409 } 1410 1411 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1412 struct net_device *dev, 1413 u32 ext_filter_mask) 1414 { 1415 struct nlattr *vfinfo; 1416 int i, num_vfs; 1417 1418 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1419 return 0; 1420 1421 num_vfs = dev_num_vf(dev->dev.parent); 1422 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1423 return -EMSGSIZE; 1424 1425 if (!dev->netdev_ops->ndo_get_vf_config) 1426 return 0; 1427 1428 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST); 1429 if (!vfinfo) 1430 return -EMSGSIZE; 1431 1432 for (i = 0; i < num_vfs; i++) { 1433 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo)) 1434 return -EMSGSIZE; 1435 } 1436 1437 nla_nest_end(skb, vfinfo); 1438 return 0; 1439 } 1440 1441 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1442 { 1443 struct rtnl_link_ifmap map; 1444 1445 memset(&map, 0, sizeof(map)); 1446 map.mem_start = dev->mem_start; 1447 map.mem_end = dev->mem_end; 1448 map.base_addr = dev->base_addr; 1449 map.irq = dev->irq; 1450 map.dma = dev->dma; 1451 map.port = dev->if_port; 1452 1453 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1454 return -EMSGSIZE; 1455 1456 return 0; 1457 } 1458 1459 static u32 rtnl_xdp_prog_skb(struct net_device *dev) 1460 { 1461 const struct bpf_prog *generic_xdp_prog; 1462 1463 ASSERT_RTNL(); 1464 1465 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1466 if (!generic_xdp_prog) 1467 return 0; 1468 return generic_xdp_prog->aux->id; 1469 } 1470 1471 static u32 rtnl_xdp_prog_drv(struct net_device *dev) 1472 { 1473 return dev_xdp_prog_id(dev, XDP_MODE_DRV); 1474 } 1475 1476 static u32 rtnl_xdp_prog_hw(struct net_device *dev) 1477 { 1478 return dev_xdp_prog_id(dev, XDP_MODE_HW); 1479 } 1480 1481 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, 1482 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, 1483 u32 (*get_prog_id)(struct net_device *dev)) 1484 { 1485 u32 curr_id; 1486 int err; 1487 1488 curr_id = get_prog_id(dev); 1489 if (!curr_id) 1490 return 0; 1491 1492 *prog_id = curr_id; 1493 err = nla_put_u32(skb, attr, curr_id); 1494 if (err) 1495 return err; 1496 1497 if (*mode != XDP_ATTACHED_NONE) 1498 *mode = XDP_ATTACHED_MULTI; 1499 else 1500 *mode = tgt_mode; 1501 1502 return 0; 1503 } 1504 1505 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1506 { 1507 struct nlattr *xdp; 1508 u32 prog_id; 1509 int err; 1510 u8 mode; 1511 1512 xdp = nla_nest_start_noflag(skb, IFLA_XDP); 1513 if (!xdp) 1514 return -EMSGSIZE; 1515 1516 prog_id = 0; 1517 mode = XDP_ATTACHED_NONE; 1518 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, 1519 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); 1520 if (err) 1521 goto err_cancel; 1522 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, 1523 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); 1524 if (err) 1525 goto err_cancel; 1526 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, 1527 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); 1528 if (err) 1529 goto err_cancel; 1530 1531 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); 1532 if (err) 1533 goto err_cancel; 1534 1535 if (prog_id && mode != XDP_ATTACHED_MULTI) { 1536 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1537 if (err) 1538 goto err_cancel; 1539 } 1540 1541 nla_nest_end(skb, xdp); 1542 return 0; 1543 1544 err_cancel: 1545 nla_nest_cancel(skb, xdp); 1546 return err; 1547 } 1548 1549 static u32 rtnl_get_event(unsigned long event) 1550 { 1551 u32 rtnl_event_type = IFLA_EVENT_NONE; 1552 1553 switch (event) { 1554 case NETDEV_REBOOT: 1555 rtnl_event_type = IFLA_EVENT_REBOOT; 1556 break; 1557 case NETDEV_FEAT_CHANGE: 1558 rtnl_event_type = IFLA_EVENT_FEATURES; 1559 break; 1560 case NETDEV_BONDING_FAILOVER: 1561 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1562 break; 1563 case NETDEV_NOTIFY_PEERS: 1564 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1565 break; 1566 case NETDEV_RESEND_IGMP: 1567 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1568 break; 1569 case NETDEV_CHANGEINFODATA: 1570 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1571 break; 1572 default: 1573 break; 1574 } 1575 1576 return rtnl_event_type; 1577 } 1578 1579 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1580 { 1581 const struct net_device *upper_dev; 1582 int ret = 0; 1583 1584 rcu_read_lock(); 1585 1586 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1587 if (upper_dev) 1588 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1589 1590 rcu_read_unlock(); 1591 return ret; 1592 } 1593 1594 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1595 bool force) 1596 { 1597 int ifindex = dev_get_iflink(dev); 1598 1599 if (force || dev->ifindex != ifindex) 1600 return nla_put_u32(skb, IFLA_LINK, ifindex); 1601 1602 return 0; 1603 } 1604 1605 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1606 struct net_device *dev) 1607 { 1608 char buf[IFALIASZ]; 1609 int ret; 1610 1611 ret = dev_get_alias(dev, buf, sizeof(buf)); 1612 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1613 } 1614 1615 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1616 const struct net_device *dev, 1617 struct net *src_net, gfp_t gfp) 1618 { 1619 bool put_iflink = false; 1620 1621 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1622 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1623 1624 if (!net_eq(dev_net(dev), link_net)) { 1625 int id = peernet2id_alloc(src_net, link_net, gfp); 1626 1627 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1628 return -EMSGSIZE; 1629 1630 put_iflink = true; 1631 } 1632 } 1633 1634 return nla_put_iflink(skb, dev, put_iflink); 1635 } 1636 1637 static int rtnl_fill_link_af(struct sk_buff *skb, 1638 const struct net_device *dev, 1639 u32 ext_filter_mask) 1640 { 1641 const struct rtnl_af_ops *af_ops; 1642 struct nlattr *af_spec; 1643 1644 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 1645 if (!af_spec) 1646 return -EMSGSIZE; 1647 1648 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1649 struct nlattr *af; 1650 int err; 1651 1652 if (!af_ops->fill_link_af) 1653 continue; 1654 1655 af = nla_nest_start_noflag(skb, af_ops->family); 1656 if (!af) 1657 return -EMSGSIZE; 1658 1659 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1660 /* 1661 * Caller may return ENODATA to indicate that there 1662 * was no data to be dumped. This is not an error, it 1663 * means we should trim the attribute header and 1664 * continue. 1665 */ 1666 if (err == -ENODATA) 1667 nla_nest_cancel(skb, af); 1668 else if (err < 0) 1669 return -EMSGSIZE; 1670 1671 nla_nest_end(skb, af); 1672 } 1673 1674 nla_nest_end(skb, af_spec); 1675 return 0; 1676 } 1677 1678 static int rtnl_fill_alt_ifnames(struct sk_buff *skb, 1679 const struct net_device *dev) 1680 { 1681 struct netdev_name_node *name_node; 1682 int count = 0; 1683 1684 list_for_each_entry(name_node, &dev->name_node->list, list) { 1685 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name)) 1686 return -EMSGSIZE; 1687 count++; 1688 } 1689 return count; 1690 } 1691 1692 static int rtnl_fill_prop_list(struct sk_buff *skb, 1693 const struct net_device *dev) 1694 { 1695 struct nlattr *prop_list; 1696 int ret; 1697 1698 prop_list = nla_nest_start(skb, IFLA_PROP_LIST); 1699 if (!prop_list) 1700 return -EMSGSIZE; 1701 1702 ret = rtnl_fill_alt_ifnames(skb, dev); 1703 if (ret <= 0) 1704 goto nest_cancel; 1705 1706 nla_nest_end(skb, prop_list); 1707 return 0; 1708 1709 nest_cancel: 1710 nla_nest_cancel(skb, prop_list); 1711 return ret; 1712 } 1713 1714 static int rtnl_fill_proto_down(struct sk_buff *skb, 1715 const struct net_device *dev) 1716 { 1717 struct nlattr *pr; 1718 u32 preason; 1719 1720 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1721 goto nla_put_failure; 1722 1723 preason = dev->proto_down_reason; 1724 if (!preason) 1725 return 0; 1726 1727 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); 1728 if (!pr) 1729 return -EMSGSIZE; 1730 1731 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { 1732 nla_nest_cancel(skb, pr); 1733 goto nla_put_failure; 1734 } 1735 1736 nla_nest_end(skb, pr); 1737 return 0; 1738 1739 nla_put_failure: 1740 return -EMSGSIZE; 1741 } 1742 1743 static int rtnl_fill_devlink_port(struct sk_buff *skb, 1744 const struct net_device *dev) 1745 { 1746 struct nlattr *devlink_port_nest; 1747 int ret; 1748 1749 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT); 1750 if (!devlink_port_nest) 1751 return -EMSGSIZE; 1752 1753 if (dev->devlink_port) { 1754 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port); 1755 if (ret < 0) 1756 goto nest_cancel; 1757 } 1758 1759 nla_nest_end(skb, devlink_port_nest); 1760 return 0; 1761 1762 nest_cancel: 1763 nla_nest_cancel(skb, devlink_port_nest); 1764 return ret; 1765 } 1766 1767 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1768 struct net_device *dev, struct net *src_net, 1769 int type, u32 pid, u32 seq, u32 change, 1770 unsigned int flags, u32 ext_filter_mask, 1771 u32 event, int *new_nsid, int new_ifindex, 1772 int tgt_netnsid, gfp_t gfp) 1773 { 1774 struct ifinfomsg *ifm; 1775 struct nlmsghdr *nlh; 1776 struct Qdisc *qdisc; 1777 1778 ASSERT_RTNL(); 1779 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1780 if (nlh == NULL) 1781 return -EMSGSIZE; 1782 1783 ifm = nlmsg_data(nlh); 1784 ifm->ifi_family = AF_UNSPEC; 1785 ifm->__ifi_pad = 0; 1786 ifm->ifi_type = dev->type; 1787 ifm->ifi_index = dev->ifindex; 1788 ifm->ifi_flags = dev_get_flags(dev); 1789 ifm->ifi_change = change; 1790 1791 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) 1792 goto nla_put_failure; 1793 1794 qdisc = rtnl_dereference(dev->qdisc); 1795 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1796 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1797 nla_put_u8(skb, IFLA_OPERSTATE, 1798 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1799 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1800 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1801 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || 1802 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || 1803 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1804 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1805 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) || 1806 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1807 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1808 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1809 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) || 1810 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) || 1811 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) || 1812 #ifdef CONFIG_RPS 1813 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1814 #endif 1815 put_master_ifindex(skb, dev) || 1816 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1817 (qdisc && 1818 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || 1819 nla_put_ifalias(skb, dev) || 1820 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1821 atomic_read(&dev->carrier_up_count) + 1822 atomic_read(&dev->carrier_down_count)) || 1823 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1824 atomic_read(&dev->carrier_up_count)) || 1825 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1826 atomic_read(&dev->carrier_down_count))) 1827 goto nla_put_failure; 1828 1829 if (rtnl_fill_proto_down(skb, dev)) 1830 goto nla_put_failure; 1831 1832 if (event != IFLA_EVENT_NONE) { 1833 if (nla_put_u32(skb, IFLA_EVENT, event)) 1834 goto nla_put_failure; 1835 } 1836 1837 if (rtnl_fill_link_ifmap(skb, dev)) 1838 goto nla_put_failure; 1839 1840 if (dev->addr_len) { 1841 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1842 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1843 goto nla_put_failure; 1844 } 1845 1846 if (rtnl_phys_port_id_fill(skb, dev)) 1847 goto nla_put_failure; 1848 1849 if (rtnl_phys_port_name_fill(skb, dev)) 1850 goto nla_put_failure; 1851 1852 if (rtnl_phys_switch_id_fill(skb, dev)) 1853 goto nla_put_failure; 1854 1855 if (rtnl_fill_stats(skb, dev)) 1856 goto nla_put_failure; 1857 1858 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1859 goto nla_put_failure; 1860 1861 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1862 goto nla_put_failure; 1863 1864 if (rtnl_xdp_fill(skb, dev)) 1865 goto nla_put_failure; 1866 1867 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1868 if (rtnl_link_fill(skb, dev) < 0) 1869 goto nla_put_failure; 1870 } 1871 1872 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp)) 1873 goto nla_put_failure; 1874 1875 if (new_nsid && 1876 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1877 goto nla_put_failure; 1878 if (new_ifindex && 1879 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1880 goto nla_put_failure; 1881 1882 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) && 1883 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr)) 1884 goto nla_put_failure; 1885 1886 rcu_read_lock(); 1887 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1888 goto nla_put_failure_rcu; 1889 rcu_read_unlock(); 1890 1891 if (rtnl_fill_prop_list(skb, dev)) 1892 goto nla_put_failure; 1893 1894 if (dev->dev.parent && 1895 nla_put_string(skb, IFLA_PARENT_DEV_NAME, 1896 dev_name(dev->dev.parent))) 1897 goto nla_put_failure; 1898 1899 if (dev->dev.parent && dev->dev.parent->bus && 1900 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, 1901 dev->dev.parent->bus->name)) 1902 goto nla_put_failure; 1903 1904 if (rtnl_fill_devlink_port(skb, dev)) 1905 goto nla_put_failure; 1906 1907 nlmsg_end(skb, nlh); 1908 return 0; 1909 1910 nla_put_failure_rcu: 1911 rcu_read_unlock(); 1912 nla_put_failure: 1913 nlmsg_cancel(skb, nlh); 1914 return -EMSGSIZE; 1915 } 1916 1917 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1918 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1919 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1920 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1921 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1922 [IFLA_MTU] = { .type = NLA_U32 }, 1923 [IFLA_LINK] = { .type = NLA_U32 }, 1924 [IFLA_MASTER] = { .type = NLA_U32 }, 1925 [IFLA_CARRIER] = { .type = NLA_U8 }, 1926 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1927 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1928 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1929 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1930 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1931 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1932 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1933 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1934 * allow 0-length string (needed to remove an alias). 1935 */ 1936 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1937 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1938 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1939 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1940 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1941 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1942 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1943 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1944 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1945 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1946 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 1947 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1948 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1949 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1950 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1951 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1952 [IFLA_XDP] = { .type = NLA_NESTED }, 1953 [IFLA_EVENT] = { .type = NLA_U32 }, 1954 [IFLA_GROUP] = { .type = NLA_U32 }, 1955 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 }, 1956 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 1957 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 1958 [IFLA_MIN_MTU] = { .type = NLA_U32 }, 1959 [IFLA_MAX_MTU] = { .type = NLA_U32 }, 1960 [IFLA_PROP_LIST] = { .type = NLA_NESTED }, 1961 [IFLA_ALT_IFNAME] = { .type = NLA_STRING, 1962 .len = ALTIFNAMSIZ - 1 }, 1963 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, 1964 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, 1965 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 1966 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, 1967 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, 1968 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, 1969 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, 1970 [IFLA_ALLMULTI] = { .type = NLA_REJECT }, 1971 }; 1972 1973 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1974 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 1975 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 1976 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 1977 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1978 }; 1979 1980 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1981 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1982 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT }, 1983 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1984 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 1985 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 1986 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 1987 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 1988 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 1989 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 1990 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 1991 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 1992 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1993 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1994 }; 1995 1996 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 1997 [IFLA_PORT_VF] = { .type = NLA_U32 }, 1998 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 1999 .len = PORT_PROFILE_MAX }, 2000 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 2001 .len = PORT_UUID_MAX }, 2002 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 2003 .len = PORT_UUID_MAX }, 2004 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 2005 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 2006 2007 /* Unused, but we need to keep it here since user space could 2008 * fill it. It's also broken with regard to NLA_BINARY use in 2009 * combination with structs. 2010 */ 2011 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 2012 .len = sizeof(struct ifla_port_vsi) }, 2013 }; 2014 2015 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 2016 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD }, 2017 [IFLA_XDP_FD] = { .type = NLA_S32 }, 2018 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 }, 2019 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 2020 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 2021 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 2022 }; 2023 2024 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 2025 { 2026 const struct rtnl_link_ops *ops = NULL; 2027 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 2028 2029 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0) 2030 return NULL; 2031 2032 if (linfo[IFLA_INFO_KIND]) { 2033 char kind[MODULE_NAME_LEN]; 2034 2035 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 2036 ops = rtnl_link_ops_get(kind); 2037 } 2038 2039 return ops; 2040 } 2041 2042 static bool link_master_filtered(struct net_device *dev, int master_idx) 2043 { 2044 struct net_device *master; 2045 2046 if (!master_idx) 2047 return false; 2048 2049 master = netdev_master_upper_dev_get(dev); 2050 2051 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need 2052 * another invalid value for ifindex to denote "no master". 2053 */ 2054 if (master_idx == -1) 2055 return !!master; 2056 2057 if (!master || master->ifindex != master_idx) 2058 return true; 2059 2060 return false; 2061 } 2062 2063 static bool link_kind_filtered(const struct net_device *dev, 2064 const struct rtnl_link_ops *kind_ops) 2065 { 2066 if (kind_ops && dev->rtnl_link_ops != kind_ops) 2067 return true; 2068 2069 return false; 2070 } 2071 2072 static bool link_dump_filtered(struct net_device *dev, 2073 int master_idx, 2074 const struct rtnl_link_ops *kind_ops) 2075 { 2076 if (link_master_filtered(dev, master_idx) || 2077 link_kind_filtered(dev, kind_ops)) 2078 return true; 2079 2080 return false; 2081 } 2082 2083 /** 2084 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged. 2085 * @sk: netlink socket 2086 * @netnsid: network namespace identifier 2087 * 2088 * Returns the network namespace identified by netnsid on success or an error 2089 * pointer on failure. 2090 */ 2091 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid) 2092 { 2093 struct net *net; 2094 2095 net = get_net_ns_by_id(sock_net(sk), netnsid); 2096 if (!net) 2097 return ERR_PTR(-EINVAL); 2098 2099 /* For now, the caller is required to have CAP_NET_ADMIN in 2100 * the user namespace owning the target net ns. 2101 */ 2102 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 2103 put_net(net); 2104 return ERR_PTR(-EACCES); 2105 } 2106 return net; 2107 } 2108 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable); 2109 2110 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh, 2111 bool strict_check, struct nlattr **tb, 2112 struct netlink_ext_ack *extack) 2113 { 2114 int hdrlen; 2115 2116 if (strict_check) { 2117 struct ifinfomsg *ifm; 2118 2119 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2120 NL_SET_ERR_MSG(extack, "Invalid header for link dump"); 2121 return -EINVAL; 2122 } 2123 2124 ifm = nlmsg_data(nlh); 2125 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2126 ifm->ifi_change) { 2127 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request"); 2128 return -EINVAL; 2129 } 2130 if (ifm->ifi_index) { 2131 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps"); 2132 return -EINVAL; 2133 } 2134 2135 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, 2136 IFLA_MAX, ifla_policy, 2137 extack); 2138 } 2139 2140 /* A hack to preserve kernel<->userspace interface. 2141 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 2142 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 2143 * what iproute2 < v3.9.0 used. 2144 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 2145 * attribute, its netlink message is shorter than struct ifinfomsg. 2146 */ 2147 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 2148 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 2149 2150 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, 2151 extack); 2152 } 2153 2154 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 2155 { 2156 struct netlink_ext_ack *extack = cb->extack; 2157 const struct nlmsghdr *nlh = cb->nlh; 2158 struct net *net = sock_net(skb->sk); 2159 struct net *tgt_net = net; 2160 int h, s_h; 2161 int idx = 0, s_idx; 2162 struct net_device *dev; 2163 struct hlist_head *head; 2164 struct nlattr *tb[IFLA_MAX+1]; 2165 u32 ext_filter_mask = 0; 2166 const struct rtnl_link_ops *kind_ops = NULL; 2167 unsigned int flags = NLM_F_MULTI; 2168 int master_idx = 0; 2169 int netnsid = -1; 2170 int err, i; 2171 2172 s_h = cb->args[0]; 2173 s_idx = cb->args[1]; 2174 2175 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack); 2176 if (err < 0) { 2177 if (cb->strict_check) 2178 return err; 2179 2180 goto walk_entries; 2181 } 2182 2183 for (i = 0; i <= IFLA_MAX; ++i) { 2184 if (!tb[i]) 2185 continue; 2186 2187 /* new attributes should only be added with strict checking */ 2188 switch (i) { 2189 case IFLA_TARGET_NETNSID: 2190 netnsid = nla_get_s32(tb[i]); 2191 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); 2192 if (IS_ERR(tgt_net)) { 2193 NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); 2194 return PTR_ERR(tgt_net); 2195 } 2196 break; 2197 case IFLA_EXT_MASK: 2198 ext_filter_mask = nla_get_u32(tb[i]); 2199 break; 2200 case IFLA_MASTER: 2201 master_idx = nla_get_u32(tb[i]); 2202 break; 2203 case IFLA_LINKINFO: 2204 kind_ops = linkinfo_to_kind_ops(tb[i]); 2205 break; 2206 default: 2207 if (cb->strict_check) { 2208 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request"); 2209 return -EINVAL; 2210 } 2211 } 2212 } 2213 2214 if (master_idx || kind_ops) 2215 flags |= NLM_F_DUMP_FILTERED; 2216 2217 walk_entries: 2218 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 2219 idx = 0; 2220 head = &tgt_net->dev_index_head[h]; 2221 hlist_for_each_entry(dev, head, index_hlist) { 2222 if (link_dump_filtered(dev, master_idx, kind_ops)) 2223 goto cont; 2224 if (idx < s_idx) 2225 goto cont; 2226 err = rtnl_fill_ifinfo(skb, dev, net, 2227 RTM_NEWLINK, 2228 NETLINK_CB(cb->skb).portid, 2229 nlh->nlmsg_seq, 0, flags, 2230 ext_filter_mask, 0, NULL, 0, 2231 netnsid, GFP_KERNEL); 2232 2233 if (err < 0) { 2234 if (likely(skb->len)) 2235 goto out; 2236 2237 goto out_err; 2238 } 2239 cont: 2240 idx++; 2241 } 2242 } 2243 out: 2244 err = skb->len; 2245 out_err: 2246 cb->args[1] = idx; 2247 cb->args[0] = h; 2248 cb->seq = tgt_net->dev_base_seq; 2249 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2250 if (netnsid >= 0) 2251 put_net(tgt_net); 2252 2253 return err; 2254 } 2255 2256 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 2257 struct netlink_ext_ack *exterr) 2258 { 2259 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, 2260 exterr); 2261 } 2262 EXPORT_SYMBOL(rtnl_nla_parse_ifla); 2263 2264 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2265 { 2266 struct net *net; 2267 /* Examine the link attributes and figure out which 2268 * network namespace we are talking about. 2269 */ 2270 if (tb[IFLA_NET_NS_PID]) 2271 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 2272 else if (tb[IFLA_NET_NS_FD]) 2273 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 2274 else 2275 net = get_net(src_net); 2276 return net; 2277 } 2278 EXPORT_SYMBOL(rtnl_link_get_net); 2279 2280 /* Figure out which network namespace we are talking about by 2281 * examining the link attributes in the following order: 2282 * 2283 * 1. IFLA_NET_NS_PID 2284 * 2. IFLA_NET_NS_FD 2285 * 3. IFLA_TARGET_NETNSID 2286 */ 2287 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 2288 struct nlattr *tb[]) 2289 { 2290 struct net *net; 2291 2292 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 2293 return rtnl_link_get_net(src_net, tb); 2294 2295 if (!tb[IFLA_TARGET_NETNSID]) 2296 return get_net(src_net); 2297 2298 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID])); 2299 if (!net) 2300 return ERR_PTR(-EINVAL); 2301 2302 return net; 2303 } 2304 2305 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 2306 struct net *src_net, 2307 struct nlattr *tb[], int cap) 2308 { 2309 struct net *net; 2310 2311 net = rtnl_link_get_net_by_nlattr(src_net, tb); 2312 if (IS_ERR(net)) 2313 return net; 2314 2315 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 2316 put_net(net); 2317 return ERR_PTR(-EPERM); 2318 } 2319 2320 return net; 2321 } 2322 2323 /* Verify that rtnetlink requests do not pass additional properties 2324 * potentially referring to different network namespaces. 2325 */ 2326 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 2327 struct netlink_ext_ack *extack, 2328 bool netns_id_only) 2329 { 2330 2331 if (netns_id_only) { 2332 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 2333 return 0; 2334 2335 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 2336 return -EOPNOTSUPP; 2337 } 2338 2339 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 2340 goto invalid_attr; 2341 2342 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD])) 2343 goto invalid_attr; 2344 2345 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID])) 2346 goto invalid_attr; 2347 2348 return 0; 2349 2350 invalid_attr: 2351 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 2352 return -EINVAL; 2353 } 2354 2355 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2356 int max_tx_rate) 2357 { 2358 const struct net_device_ops *ops = dev->netdev_ops; 2359 2360 if (!ops->ndo_set_vf_rate) 2361 return -EOPNOTSUPP; 2362 if (max_tx_rate && max_tx_rate < min_tx_rate) 2363 return -EINVAL; 2364 2365 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate); 2366 } 2367 2368 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], 2369 struct netlink_ext_ack *extack) 2370 { 2371 if (dev) { 2372 if (tb[IFLA_ADDRESS] && 2373 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2374 return -EINVAL; 2375 2376 if (tb[IFLA_BROADCAST] && 2377 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2378 return -EINVAL; 2379 } 2380 2381 if (tb[IFLA_AF_SPEC]) { 2382 struct nlattr *af; 2383 int rem, err; 2384 2385 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2386 const struct rtnl_af_ops *af_ops; 2387 2388 af_ops = rtnl_af_lookup(nla_type(af)); 2389 if (!af_ops) 2390 return -EAFNOSUPPORT; 2391 2392 if (!af_ops->set_link_af) 2393 return -EOPNOTSUPP; 2394 2395 if (af_ops->validate_link_af) { 2396 err = af_ops->validate_link_af(dev, af, extack); 2397 if (err < 0) 2398 return err; 2399 } 2400 } 2401 } 2402 2403 return 0; 2404 } 2405 2406 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2407 int guid_type) 2408 { 2409 const struct net_device_ops *ops = dev->netdev_ops; 2410 2411 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2412 } 2413 2414 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2415 { 2416 if (dev->type != ARPHRD_INFINIBAND) 2417 return -EOPNOTSUPP; 2418 2419 return handle_infiniband_guid(dev, ivt, guid_type); 2420 } 2421 2422 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2423 { 2424 const struct net_device_ops *ops = dev->netdev_ops; 2425 int err = -EINVAL; 2426 2427 if (tb[IFLA_VF_MAC]) { 2428 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2429 2430 if (ivm->vf >= INT_MAX) 2431 return -EINVAL; 2432 err = -EOPNOTSUPP; 2433 if (ops->ndo_set_vf_mac) 2434 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2435 ivm->mac); 2436 if (err < 0) 2437 return err; 2438 } 2439 2440 if (tb[IFLA_VF_VLAN]) { 2441 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2442 2443 if (ivv->vf >= INT_MAX) 2444 return -EINVAL; 2445 err = -EOPNOTSUPP; 2446 if (ops->ndo_set_vf_vlan) 2447 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2448 ivv->qos, 2449 htons(ETH_P_8021Q)); 2450 if (err < 0) 2451 return err; 2452 } 2453 2454 if (tb[IFLA_VF_VLAN_LIST]) { 2455 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2456 struct nlattr *attr; 2457 int rem, len = 0; 2458 2459 err = -EOPNOTSUPP; 2460 if (!ops->ndo_set_vf_vlan) 2461 return err; 2462 2463 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2464 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2465 nla_len(attr) < NLA_HDRLEN) { 2466 return -EINVAL; 2467 } 2468 if (len >= MAX_VLAN_LIST_LEN) 2469 return -EOPNOTSUPP; 2470 ivvl[len] = nla_data(attr); 2471 2472 len++; 2473 } 2474 if (len == 0) 2475 return -EINVAL; 2476 2477 if (ivvl[0]->vf >= INT_MAX) 2478 return -EINVAL; 2479 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2480 ivvl[0]->qos, ivvl[0]->vlan_proto); 2481 if (err < 0) 2482 return err; 2483 } 2484 2485 if (tb[IFLA_VF_TX_RATE]) { 2486 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2487 struct ifla_vf_info ivf; 2488 2489 if (ivt->vf >= INT_MAX) 2490 return -EINVAL; 2491 err = -EOPNOTSUPP; 2492 if (ops->ndo_get_vf_config) 2493 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2494 if (err < 0) 2495 return err; 2496 2497 err = rtnl_set_vf_rate(dev, ivt->vf, 2498 ivf.min_tx_rate, ivt->rate); 2499 if (err < 0) 2500 return err; 2501 } 2502 2503 if (tb[IFLA_VF_RATE]) { 2504 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2505 2506 if (ivt->vf >= INT_MAX) 2507 return -EINVAL; 2508 2509 err = rtnl_set_vf_rate(dev, ivt->vf, 2510 ivt->min_tx_rate, ivt->max_tx_rate); 2511 if (err < 0) 2512 return err; 2513 } 2514 2515 if (tb[IFLA_VF_SPOOFCHK]) { 2516 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2517 2518 if (ivs->vf >= INT_MAX) 2519 return -EINVAL; 2520 err = -EOPNOTSUPP; 2521 if (ops->ndo_set_vf_spoofchk) 2522 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2523 ivs->setting); 2524 if (err < 0) 2525 return err; 2526 } 2527 2528 if (tb[IFLA_VF_LINK_STATE]) { 2529 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2530 2531 if (ivl->vf >= INT_MAX) 2532 return -EINVAL; 2533 err = -EOPNOTSUPP; 2534 if (ops->ndo_set_vf_link_state) 2535 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2536 ivl->link_state); 2537 if (err < 0) 2538 return err; 2539 } 2540 2541 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2542 struct ifla_vf_rss_query_en *ivrssq_en; 2543 2544 err = -EOPNOTSUPP; 2545 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2546 if (ivrssq_en->vf >= INT_MAX) 2547 return -EINVAL; 2548 if (ops->ndo_set_vf_rss_query_en) 2549 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2550 ivrssq_en->setting); 2551 if (err < 0) 2552 return err; 2553 } 2554 2555 if (tb[IFLA_VF_TRUST]) { 2556 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2557 2558 if (ivt->vf >= INT_MAX) 2559 return -EINVAL; 2560 err = -EOPNOTSUPP; 2561 if (ops->ndo_set_vf_trust) 2562 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2563 if (err < 0) 2564 return err; 2565 } 2566 2567 if (tb[IFLA_VF_IB_NODE_GUID]) { 2568 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2569 2570 if (ivt->vf >= INT_MAX) 2571 return -EINVAL; 2572 if (!ops->ndo_set_vf_guid) 2573 return -EOPNOTSUPP; 2574 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2575 } 2576 2577 if (tb[IFLA_VF_IB_PORT_GUID]) { 2578 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2579 2580 if (ivt->vf >= INT_MAX) 2581 return -EINVAL; 2582 if (!ops->ndo_set_vf_guid) 2583 return -EOPNOTSUPP; 2584 2585 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2586 } 2587 2588 return err; 2589 } 2590 2591 static int do_set_master(struct net_device *dev, int ifindex, 2592 struct netlink_ext_ack *extack) 2593 { 2594 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2595 const struct net_device_ops *ops; 2596 int err; 2597 2598 if (upper_dev) { 2599 if (upper_dev->ifindex == ifindex) 2600 return 0; 2601 ops = upper_dev->netdev_ops; 2602 if (ops->ndo_del_slave) { 2603 err = ops->ndo_del_slave(upper_dev, dev); 2604 if (err) 2605 return err; 2606 } else { 2607 return -EOPNOTSUPP; 2608 } 2609 } 2610 2611 if (ifindex) { 2612 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2613 if (!upper_dev) 2614 return -EINVAL; 2615 ops = upper_dev->netdev_ops; 2616 if (ops->ndo_add_slave) { 2617 err = ops->ndo_add_slave(upper_dev, dev, extack); 2618 if (err) 2619 return err; 2620 } else { 2621 return -EOPNOTSUPP; 2622 } 2623 } 2624 return 0; 2625 } 2626 2627 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { 2628 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, 2629 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, 2630 }; 2631 2632 static int do_set_proto_down(struct net_device *dev, 2633 struct nlattr *nl_proto_down, 2634 struct nlattr *nl_proto_down_reason, 2635 struct netlink_ext_ack *extack) 2636 { 2637 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; 2638 unsigned long mask = 0; 2639 u32 value; 2640 bool proto_down; 2641 int err; 2642 2643 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) { 2644 NL_SET_ERR_MSG(extack, "Protodown not supported by device"); 2645 return -EOPNOTSUPP; 2646 } 2647 2648 if (nl_proto_down_reason) { 2649 err = nla_parse_nested_deprecated(pdreason, 2650 IFLA_PROTO_DOWN_REASON_MAX, 2651 nl_proto_down_reason, 2652 ifla_proto_down_reason_policy, 2653 NULL); 2654 if (err < 0) 2655 return err; 2656 2657 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { 2658 NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); 2659 return -EINVAL; 2660 } 2661 2662 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); 2663 2664 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) 2665 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); 2666 2667 dev_change_proto_down_reason(dev, mask, value); 2668 } 2669 2670 if (nl_proto_down) { 2671 proto_down = nla_get_u8(nl_proto_down); 2672 2673 /* Don't turn off protodown if there are active reasons */ 2674 if (!proto_down && dev->proto_down_reason) { 2675 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); 2676 return -EBUSY; 2677 } 2678 err = dev_change_proto_down(dev, 2679 proto_down); 2680 if (err) 2681 return err; 2682 } 2683 2684 return 0; 2685 } 2686 2687 #define DO_SETLINK_MODIFIED 0x01 2688 /* notify flag means notify + modified. */ 2689 #define DO_SETLINK_NOTIFY 0x03 2690 static int do_setlink(const struct sk_buff *skb, 2691 struct net_device *dev, struct ifinfomsg *ifm, 2692 struct netlink_ext_ack *extack, 2693 struct nlattr **tb, int status) 2694 { 2695 const struct net_device_ops *ops = dev->netdev_ops; 2696 char ifname[IFNAMSIZ]; 2697 int err; 2698 2699 err = validate_linkmsg(dev, tb, extack); 2700 if (err < 0) 2701 return err; 2702 2703 if (tb[IFLA_IFNAME]) 2704 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2705 else 2706 ifname[0] = '\0'; 2707 2708 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { 2709 const char *pat = ifname[0] ? ifname : NULL; 2710 struct net *net; 2711 int new_ifindex; 2712 2713 net = rtnl_link_get_net_capable(skb, dev_net(dev), 2714 tb, CAP_NET_ADMIN); 2715 if (IS_ERR(net)) { 2716 err = PTR_ERR(net); 2717 goto errout; 2718 } 2719 2720 if (tb[IFLA_NEW_IFINDEX]) 2721 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]); 2722 else 2723 new_ifindex = 0; 2724 2725 err = __dev_change_net_namespace(dev, net, pat, new_ifindex); 2726 put_net(net); 2727 if (err) 2728 goto errout; 2729 status |= DO_SETLINK_MODIFIED; 2730 } 2731 2732 if (tb[IFLA_MAP]) { 2733 struct rtnl_link_ifmap *u_map; 2734 struct ifmap k_map; 2735 2736 if (!ops->ndo_set_config) { 2737 err = -EOPNOTSUPP; 2738 goto errout; 2739 } 2740 2741 if (!netif_device_present(dev)) { 2742 err = -ENODEV; 2743 goto errout; 2744 } 2745 2746 u_map = nla_data(tb[IFLA_MAP]); 2747 k_map.mem_start = (unsigned long) u_map->mem_start; 2748 k_map.mem_end = (unsigned long) u_map->mem_end; 2749 k_map.base_addr = (unsigned short) u_map->base_addr; 2750 k_map.irq = (unsigned char) u_map->irq; 2751 k_map.dma = (unsigned char) u_map->dma; 2752 k_map.port = (unsigned char) u_map->port; 2753 2754 err = ops->ndo_set_config(dev, &k_map); 2755 if (err < 0) 2756 goto errout; 2757 2758 status |= DO_SETLINK_NOTIFY; 2759 } 2760 2761 if (tb[IFLA_ADDRESS]) { 2762 struct sockaddr *sa; 2763 int len; 2764 2765 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2766 sizeof(*sa)); 2767 sa = kmalloc(len, GFP_KERNEL); 2768 if (!sa) { 2769 err = -ENOMEM; 2770 goto errout; 2771 } 2772 sa->sa_family = dev->type; 2773 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2774 dev->addr_len); 2775 err = dev_set_mac_address_user(dev, sa, extack); 2776 kfree(sa); 2777 if (err) 2778 goto errout; 2779 status |= DO_SETLINK_MODIFIED; 2780 } 2781 2782 if (tb[IFLA_MTU]) { 2783 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); 2784 if (err < 0) 2785 goto errout; 2786 status |= DO_SETLINK_MODIFIED; 2787 } 2788 2789 if (tb[IFLA_GROUP]) { 2790 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2791 status |= DO_SETLINK_NOTIFY; 2792 } 2793 2794 /* 2795 * Interface selected by interface index but interface 2796 * name provided implies that a name change has been 2797 * requested. 2798 */ 2799 if (ifm->ifi_index > 0 && ifname[0]) { 2800 err = dev_change_name(dev, ifname); 2801 if (err < 0) 2802 goto errout; 2803 status |= DO_SETLINK_MODIFIED; 2804 } 2805 2806 if (tb[IFLA_IFALIAS]) { 2807 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2808 nla_len(tb[IFLA_IFALIAS])); 2809 if (err < 0) 2810 goto errout; 2811 status |= DO_SETLINK_NOTIFY; 2812 } 2813 2814 if (tb[IFLA_BROADCAST]) { 2815 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2816 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2817 } 2818 2819 if (tb[IFLA_MASTER]) { 2820 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2821 if (err) 2822 goto errout; 2823 status |= DO_SETLINK_MODIFIED; 2824 } 2825 2826 if (ifm->ifi_flags || ifm->ifi_change) { 2827 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 2828 extack); 2829 if (err < 0) 2830 goto errout; 2831 } 2832 2833 if (tb[IFLA_CARRIER]) { 2834 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2835 if (err) 2836 goto errout; 2837 status |= DO_SETLINK_MODIFIED; 2838 } 2839 2840 if (tb[IFLA_TXQLEN]) { 2841 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2842 2843 err = dev_change_tx_queue_len(dev, value); 2844 if (err) 2845 goto errout; 2846 status |= DO_SETLINK_MODIFIED; 2847 } 2848 2849 if (tb[IFLA_GSO_MAX_SIZE]) { 2850 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2851 2852 if (max_size > dev->tso_max_size) { 2853 err = -EINVAL; 2854 goto errout; 2855 } 2856 2857 if (dev->gso_max_size ^ max_size) { 2858 netif_set_gso_max_size(dev, max_size); 2859 status |= DO_SETLINK_MODIFIED; 2860 } 2861 } 2862 2863 if (tb[IFLA_GSO_MAX_SEGS]) { 2864 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2865 2866 if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) { 2867 err = -EINVAL; 2868 goto errout; 2869 } 2870 2871 if (dev->gso_max_segs ^ max_segs) { 2872 netif_set_gso_max_segs(dev, max_segs); 2873 status |= DO_SETLINK_MODIFIED; 2874 } 2875 } 2876 2877 if (tb[IFLA_GRO_MAX_SIZE]) { 2878 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); 2879 2880 if (dev->gro_max_size ^ gro_max_size) { 2881 netif_set_gro_max_size(dev, gro_max_size); 2882 status |= DO_SETLINK_MODIFIED; 2883 } 2884 } 2885 2886 if (tb[IFLA_OPERSTATE]) 2887 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2888 2889 if (tb[IFLA_LINKMODE]) { 2890 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2891 2892 write_lock(&dev_base_lock); 2893 if (dev->link_mode ^ value) 2894 status |= DO_SETLINK_NOTIFY; 2895 dev->link_mode = value; 2896 write_unlock(&dev_base_lock); 2897 } 2898 2899 if (tb[IFLA_VFINFO_LIST]) { 2900 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2901 struct nlattr *attr; 2902 int rem; 2903 2904 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2905 if (nla_type(attr) != IFLA_VF_INFO || 2906 nla_len(attr) < NLA_HDRLEN) { 2907 err = -EINVAL; 2908 goto errout; 2909 } 2910 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX, 2911 attr, 2912 ifla_vf_policy, 2913 NULL); 2914 if (err < 0) 2915 goto errout; 2916 err = do_setvfinfo(dev, vfinfo); 2917 if (err < 0) 2918 goto errout; 2919 status |= DO_SETLINK_NOTIFY; 2920 } 2921 } 2922 err = 0; 2923 2924 if (tb[IFLA_VF_PORTS]) { 2925 struct nlattr *port[IFLA_PORT_MAX+1]; 2926 struct nlattr *attr; 2927 int vf; 2928 int rem; 2929 2930 err = -EOPNOTSUPP; 2931 if (!ops->ndo_set_vf_port) 2932 goto errout; 2933 2934 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 2935 if (nla_type(attr) != IFLA_VF_PORT || 2936 nla_len(attr) < NLA_HDRLEN) { 2937 err = -EINVAL; 2938 goto errout; 2939 } 2940 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 2941 attr, 2942 ifla_port_policy, 2943 NULL); 2944 if (err < 0) 2945 goto errout; 2946 if (!port[IFLA_PORT_VF]) { 2947 err = -EOPNOTSUPP; 2948 goto errout; 2949 } 2950 vf = nla_get_u32(port[IFLA_PORT_VF]); 2951 err = ops->ndo_set_vf_port(dev, vf, port); 2952 if (err < 0) 2953 goto errout; 2954 status |= DO_SETLINK_NOTIFY; 2955 } 2956 } 2957 err = 0; 2958 2959 if (tb[IFLA_PORT_SELF]) { 2960 struct nlattr *port[IFLA_PORT_MAX+1]; 2961 2962 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX, 2963 tb[IFLA_PORT_SELF], 2964 ifla_port_policy, NULL); 2965 if (err < 0) 2966 goto errout; 2967 2968 err = -EOPNOTSUPP; 2969 if (ops->ndo_set_vf_port) 2970 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 2971 if (err < 0) 2972 goto errout; 2973 status |= DO_SETLINK_NOTIFY; 2974 } 2975 2976 if (tb[IFLA_AF_SPEC]) { 2977 struct nlattr *af; 2978 int rem; 2979 2980 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2981 const struct rtnl_af_ops *af_ops; 2982 2983 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 2984 2985 err = af_ops->set_link_af(dev, af, extack); 2986 if (err < 0) 2987 goto errout; 2988 2989 status |= DO_SETLINK_NOTIFY; 2990 } 2991 } 2992 err = 0; 2993 2994 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { 2995 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], 2996 tb[IFLA_PROTO_DOWN_REASON], extack); 2997 if (err) 2998 goto errout; 2999 status |= DO_SETLINK_NOTIFY; 3000 } 3001 3002 if (tb[IFLA_XDP]) { 3003 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 3004 u32 xdp_flags = 0; 3005 3006 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, 3007 tb[IFLA_XDP], 3008 ifla_xdp_policy, NULL); 3009 if (err < 0) 3010 goto errout; 3011 3012 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 3013 err = -EINVAL; 3014 goto errout; 3015 } 3016 3017 if (xdp[IFLA_XDP_FLAGS]) { 3018 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 3019 if (xdp_flags & ~XDP_FLAGS_MASK) { 3020 err = -EINVAL; 3021 goto errout; 3022 } 3023 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 3024 err = -EINVAL; 3025 goto errout; 3026 } 3027 } 3028 3029 if (xdp[IFLA_XDP_FD]) { 3030 int expected_fd = -1; 3031 3032 if (xdp_flags & XDP_FLAGS_REPLACE) { 3033 if (!xdp[IFLA_XDP_EXPECTED_FD]) { 3034 err = -EINVAL; 3035 goto errout; 3036 } 3037 expected_fd = 3038 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]); 3039 } 3040 3041 err = dev_change_xdp_fd(dev, extack, 3042 nla_get_s32(xdp[IFLA_XDP_FD]), 3043 expected_fd, 3044 xdp_flags); 3045 if (err) 3046 goto errout; 3047 status |= DO_SETLINK_NOTIFY; 3048 } 3049 } 3050 3051 errout: 3052 if (status & DO_SETLINK_MODIFIED) { 3053 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 3054 netdev_state_change(dev); 3055 3056 if (err < 0) 3057 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 3058 dev->name); 3059 } 3060 3061 return err; 3062 } 3063 3064 static struct net_device *rtnl_dev_get(struct net *net, 3065 struct nlattr *tb[]) 3066 { 3067 char ifname[ALTIFNAMSIZ]; 3068 3069 if (tb[IFLA_IFNAME]) 3070 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3071 else if (tb[IFLA_ALT_IFNAME]) 3072 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ); 3073 else 3074 return NULL; 3075 3076 return __dev_get_by_name(net, ifname); 3077 } 3078 3079 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3080 struct netlink_ext_ack *extack) 3081 { 3082 struct net *net = sock_net(skb->sk); 3083 struct ifinfomsg *ifm; 3084 struct net_device *dev; 3085 int err; 3086 struct nlattr *tb[IFLA_MAX+1]; 3087 3088 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3089 ifla_policy, extack); 3090 if (err < 0) 3091 goto errout; 3092 3093 err = rtnl_ensure_unique_netns(tb, extack, false); 3094 if (err < 0) 3095 goto errout; 3096 3097 err = -EINVAL; 3098 ifm = nlmsg_data(nlh); 3099 if (ifm->ifi_index > 0) 3100 dev = __dev_get_by_index(net, ifm->ifi_index); 3101 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3102 dev = rtnl_dev_get(net, tb); 3103 else 3104 goto errout; 3105 3106 if (dev == NULL) { 3107 err = -ENODEV; 3108 goto errout; 3109 } 3110 3111 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3112 errout: 3113 return err; 3114 } 3115 3116 static int rtnl_group_dellink(const struct net *net, int group) 3117 { 3118 struct net_device *dev, *aux; 3119 LIST_HEAD(list_kill); 3120 bool found = false; 3121 3122 if (!group) 3123 return -EPERM; 3124 3125 for_each_netdev(net, dev) { 3126 if (dev->group == group) { 3127 const struct rtnl_link_ops *ops; 3128 3129 found = true; 3130 ops = dev->rtnl_link_ops; 3131 if (!ops || !ops->dellink) 3132 return -EOPNOTSUPP; 3133 } 3134 } 3135 3136 if (!found) 3137 return -ENODEV; 3138 3139 for_each_netdev_safe(net, dev, aux) { 3140 if (dev->group == group) { 3141 const struct rtnl_link_ops *ops; 3142 3143 ops = dev->rtnl_link_ops; 3144 ops->dellink(dev, &list_kill); 3145 } 3146 } 3147 unregister_netdevice_many(&list_kill); 3148 3149 return 0; 3150 } 3151 3152 int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh) 3153 { 3154 const struct rtnl_link_ops *ops; 3155 LIST_HEAD(list_kill); 3156 3157 ops = dev->rtnl_link_ops; 3158 if (!ops || !ops->dellink) 3159 return -EOPNOTSUPP; 3160 3161 ops->dellink(dev, &list_kill); 3162 unregister_netdevice_many_notify(&list_kill, portid, nlh); 3163 3164 return 0; 3165 } 3166 EXPORT_SYMBOL_GPL(rtnl_delete_link); 3167 3168 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 3169 struct netlink_ext_ack *extack) 3170 { 3171 struct net *net = sock_net(skb->sk); 3172 u32 portid = NETLINK_CB(skb).portid; 3173 struct net *tgt_net = net; 3174 struct net_device *dev = NULL; 3175 struct ifinfomsg *ifm; 3176 struct nlattr *tb[IFLA_MAX+1]; 3177 int err; 3178 int netnsid = -1; 3179 3180 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3181 ifla_policy, extack); 3182 if (err < 0) 3183 return err; 3184 3185 err = rtnl_ensure_unique_netns(tb, extack, true); 3186 if (err < 0) 3187 return err; 3188 3189 if (tb[IFLA_TARGET_NETNSID]) { 3190 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3191 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3192 if (IS_ERR(tgt_net)) 3193 return PTR_ERR(tgt_net); 3194 } 3195 3196 err = -EINVAL; 3197 ifm = nlmsg_data(nlh); 3198 if (ifm->ifi_index > 0) 3199 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3200 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3201 dev = rtnl_dev_get(net, tb); 3202 else if (tb[IFLA_GROUP]) 3203 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 3204 else 3205 goto out; 3206 3207 if (!dev) { 3208 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0) 3209 err = -ENODEV; 3210 3211 goto out; 3212 } 3213 3214 err = rtnl_delete_link(dev, portid, nlh); 3215 3216 out: 3217 if (netnsid >= 0) 3218 put_net(tgt_net); 3219 3220 return err; 3221 } 3222 3223 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, 3224 u32 portid, const struct nlmsghdr *nlh) 3225 { 3226 unsigned int old_flags; 3227 int err; 3228 3229 old_flags = dev->flags; 3230 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 3231 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), 3232 NULL); 3233 if (err < 0) 3234 return err; 3235 } 3236 3237 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 3238 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh); 3239 } else { 3240 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 3241 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh); 3242 } 3243 return 0; 3244 } 3245 EXPORT_SYMBOL(rtnl_configure_link); 3246 3247 struct net_device *rtnl_create_link(struct net *net, const char *ifname, 3248 unsigned char name_assign_type, 3249 const struct rtnl_link_ops *ops, 3250 struct nlattr *tb[], 3251 struct netlink_ext_ack *extack) 3252 { 3253 struct net_device *dev; 3254 unsigned int num_tx_queues = 1; 3255 unsigned int num_rx_queues = 1; 3256 3257 if (tb[IFLA_NUM_TX_QUEUES]) 3258 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 3259 else if (ops->get_num_tx_queues) 3260 num_tx_queues = ops->get_num_tx_queues(); 3261 3262 if (tb[IFLA_NUM_RX_QUEUES]) 3263 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 3264 else if (ops->get_num_rx_queues) 3265 num_rx_queues = ops->get_num_rx_queues(); 3266 3267 if (num_tx_queues < 1 || num_tx_queues > 4096) { 3268 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues"); 3269 return ERR_PTR(-EINVAL); 3270 } 3271 3272 if (num_rx_queues < 1 || num_rx_queues > 4096) { 3273 NL_SET_ERR_MSG(extack, "Invalid number of receive queues"); 3274 return ERR_PTR(-EINVAL); 3275 } 3276 3277 if (ops->alloc) { 3278 dev = ops->alloc(tb, ifname, name_assign_type, 3279 num_tx_queues, num_rx_queues); 3280 if (IS_ERR(dev)) 3281 return dev; 3282 } else { 3283 dev = alloc_netdev_mqs(ops->priv_size, ifname, 3284 name_assign_type, ops->setup, 3285 num_tx_queues, num_rx_queues); 3286 } 3287 3288 if (!dev) 3289 return ERR_PTR(-ENOMEM); 3290 3291 dev_net_set(dev, net); 3292 dev->rtnl_link_ops = ops; 3293 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 3294 3295 if (tb[IFLA_MTU]) { 3296 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3297 int err; 3298 3299 err = dev_validate_mtu(dev, mtu, extack); 3300 if (err) { 3301 free_netdev(dev); 3302 return ERR_PTR(err); 3303 } 3304 dev->mtu = mtu; 3305 } 3306 if (tb[IFLA_ADDRESS]) { 3307 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), 3308 nla_len(tb[IFLA_ADDRESS])); 3309 dev->addr_assign_type = NET_ADDR_SET; 3310 } 3311 if (tb[IFLA_BROADCAST]) 3312 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 3313 nla_len(tb[IFLA_BROADCAST])); 3314 if (tb[IFLA_TXQLEN]) 3315 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 3316 if (tb[IFLA_OPERSTATE]) 3317 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 3318 if (tb[IFLA_LINKMODE]) 3319 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 3320 if (tb[IFLA_GROUP]) 3321 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 3322 if (tb[IFLA_GSO_MAX_SIZE]) 3323 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 3324 if (tb[IFLA_GSO_MAX_SEGS]) 3325 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); 3326 if (tb[IFLA_GRO_MAX_SIZE]) 3327 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE])); 3328 3329 return dev; 3330 } 3331 EXPORT_SYMBOL(rtnl_create_link); 3332 3333 static int rtnl_group_changelink(const struct sk_buff *skb, 3334 struct net *net, int group, 3335 struct ifinfomsg *ifm, 3336 struct netlink_ext_ack *extack, 3337 struct nlattr **tb) 3338 { 3339 struct net_device *dev, *aux; 3340 int err; 3341 3342 for_each_netdev_safe(net, dev, aux) { 3343 if (dev->group == group) { 3344 err = do_setlink(skb, dev, ifm, extack, tb, 0); 3345 if (err < 0) 3346 return err; 3347 } 3348 } 3349 3350 return 0; 3351 } 3352 3353 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, 3354 const struct rtnl_link_ops *ops, 3355 const struct nlmsghdr *nlh, 3356 struct nlattr **tb, struct nlattr **data, 3357 struct netlink_ext_ack *extack) 3358 { 3359 unsigned char name_assign_type = NET_NAME_USER; 3360 struct net *net = sock_net(skb->sk); 3361 u32 portid = NETLINK_CB(skb).portid; 3362 struct net *dest_net, *link_net; 3363 struct net_device *dev; 3364 char ifname[IFNAMSIZ]; 3365 int err; 3366 3367 if (!ops->alloc && !ops->setup) 3368 return -EOPNOTSUPP; 3369 3370 if (tb[IFLA_IFNAME]) { 3371 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3372 } else { 3373 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3374 name_assign_type = NET_NAME_ENUM; 3375 } 3376 3377 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3378 if (IS_ERR(dest_net)) 3379 return PTR_ERR(dest_net); 3380 3381 if (tb[IFLA_LINK_NETNSID]) { 3382 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3383 3384 link_net = get_net_ns_by_id(dest_net, id); 3385 if (!link_net) { 3386 NL_SET_ERR_MSG(extack, "Unknown network namespace id"); 3387 err = -EINVAL; 3388 goto out; 3389 } 3390 err = -EPERM; 3391 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3392 goto out; 3393 } else { 3394 link_net = NULL; 3395 } 3396 3397 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3398 name_assign_type, ops, tb, extack); 3399 if (IS_ERR(dev)) { 3400 err = PTR_ERR(dev); 3401 goto out; 3402 } 3403 3404 dev->ifindex = ifm->ifi_index; 3405 3406 if (ops->newlink) 3407 err = ops->newlink(link_net ? : net, dev, tb, data, extack); 3408 else 3409 err = register_netdevice(dev); 3410 if (err < 0) { 3411 free_netdev(dev); 3412 goto out; 3413 } 3414 3415 err = rtnl_configure_link(dev, ifm, portid, nlh); 3416 if (err < 0) 3417 goto out_unregister; 3418 if (link_net) { 3419 err = dev_change_net_namespace(dev, dest_net, ifname); 3420 if (err < 0) 3421 goto out_unregister; 3422 } 3423 if (tb[IFLA_MASTER]) { 3424 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 3425 if (err) 3426 goto out_unregister; 3427 } 3428 out: 3429 if (link_net) 3430 put_net(link_net); 3431 put_net(dest_net); 3432 return err; 3433 out_unregister: 3434 if (ops->newlink) { 3435 LIST_HEAD(list_kill); 3436 3437 ops->dellink(dev, &list_kill); 3438 unregister_netdevice_many(&list_kill); 3439 } else { 3440 unregister_netdevice(dev); 3441 } 3442 goto out; 3443 } 3444 3445 struct rtnl_newlink_tbs { 3446 struct nlattr *tb[IFLA_MAX + 1]; 3447 struct nlattr *attr[RTNL_MAX_TYPE + 1]; 3448 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; 3449 }; 3450 3451 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3452 struct rtnl_newlink_tbs *tbs, 3453 struct netlink_ext_ack *extack) 3454 { 3455 struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; 3456 struct nlattr ** const tb = tbs->tb; 3457 const struct rtnl_link_ops *m_ops; 3458 struct net_device *master_dev; 3459 struct net *net = sock_net(skb->sk); 3460 const struct rtnl_link_ops *ops; 3461 struct nlattr **slave_data; 3462 char kind[MODULE_NAME_LEN]; 3463 struct net_device *dev; 3464 struct ifinfomsg *ifm; 3465 struct nlattr **data; 3466 bool link_specified; 3467 int err; 3468 3469 #ifdef CONFIG_MODULES 3470 replay: 3471 #endif 3472 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3473 ifla_policy, extack); 3474 if (err < 0) 3475 return err; 3476 3477 err = rtnl_ensure_unique_netns(tb, extack, false); 3478 if (err < 0) 3479 return err; 3480 3481 ifm = nlmsg_data(nlh); 3482 if (ifm->ifi_index > 0) { 3483 link_specified = true; 3484 dev = __dev_get_by_index(net, ifm->ifi_index); 3485 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { 3486 link_specified = true; 3487 dev = rtnl_dev_get(net, tb); 3488 } else { 3489 link_specified = false; 3490 dev = NULL; 3491 } 3492 3493 master_dev = NULL; 3494 m_ops = NULL; 3495 if (dev) { 3496 master_dev = netdev_master_upper_dev_get(dev); 3497 if (master_dev) 3498 m_ops = master_dev->rtnl_link_ops; 3499 } 3500 3501 err = validate_linkmsg(dev, tb, extack); 3502 if (err < 0) 3503 return err; 3504 3505 if (tb[IFLA_LINKINFO]) { 3506 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, 3507 tb[IFLA_LINKINFO], 3508 ifla_info_policy, NULL); 3509 if (err < 0) 3510 return err; 3511 } else 3512 memset(linkinfo, 0, sizeof(linkinfo)); 3513 3514 if (linkinfo[IFLA_INFO_KIND]) { 3515 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 3516 ops = rtnl_link_ops_get(kind); 3517 } else { 3518 kind[0] = '\0'; 3519 ops = NULL; 3520 } 3521 3522 data = NULL; 3523 if (ops) { 3524 if (ops->maxtype > RTNL_MAX_TYPE) 3525 return -EINVAL; 3526 3527 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 3528 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype, 3529 linkinfo[IFLA_INFO_DATA], 3530 ops->policy, extack); 3531 if (err < 0) 3532 return err; 3533 data = tbs->attr; 3534 } 3535 if (ops->validate) { 3536 err = ops->validate(tb, data, extack); 3537 if (err < 0) 3538 return err; 3539 } 3540 } 3541 3542 slave_data = NULL; 3543 if (m_ops) { 3544 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE) 3545 return -EINVAL; 3546 3547 if (m_ops->slave_maxtype && 3548 linkinfo[IFLA_INFO_SLAVE_DATA]) { 3549 err = nla_parse_nested_deprecated(tbs->slave_attr, 3550 m_ops->slave_maxtype, 3551 linkinfo[IFLA_INFO_SLAVE_DATA], 3552 m_ops->slave_policy, 3553 extack); 3554 if (err < 0) 3555 return err; 3556 slave_data = tbs->slave_attr; 3557 } 3558 } 3559 3560 if (dev) { 3561 int status = 0; 3562 3563 if (nlh->nlmsg_flags & NLM_F_EXCL) 3564 return -EEXIST; 3565 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3566 return -EOPNOTSUPP; 3567 3568 if (linkinfo[IFLA_INFO_DATA]) { 3569 if (!ops || ops != dev->rtnl_link_ops || 3570 !ops->changelink) 3571 return -EOPNOTSUPP; 3572 3573 err = ops->changelink(dev, tb, data, extack); 3574 if (err < 0) 3575 return err; 3576 status |= DO_SETLINK_NOTIFY; 3577 } 3578 3579 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 3580 if (!m_ops || !m_ops->slave_changelink) 3581 return -EOPNOTSUPP; 3582 3583 err = m_ops->slave_changelink(master_dev, dev, tb, 3584 slave_data, extack); 3585 if (err < 0) 3586 return err; 3587 status |= DO_SETLINK_NOTIFY; 3588 } 3589 3590 return do_setlink(skb, dev, ifm, extack, tb, status); 3591 } 3592 3593 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 3594 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, 3595 * or it's for a group 3596 */ 3597 if (link_specified) 3598 return -ENODEV; 3599 if (tb[IFLA_GROUP]) 3600 return rtnl_group_changelink(skb, net, 3601 nla_get_u32(tb[IFLA_GROUP]), 3602 ifm, extack, tb); 3603 return -ENODEV; 3604 } 3605 3606 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 3607 return -EOPNOTSUPP; 3608 3609 if (!ops) { 3610 #ifdef CONFIG_MODULES 3611 if (kind[0]) { 3612 __rtnl_unlock(); 3613 request_module("rtnl-link-%s", kind); 3614 rtnl_lock(); 3615 ops = rtnl_link_ops_get(kind); 3616 if (ops) 3617 goto replay; 3618 } 3619 #endif 3620 NL_SET_ERR_MSG(extack, "Unknown device type"); 3621 return -EOPNOTSUPP; 3622 } 3623 3624 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack); 3625 } 3626 3627 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3628 struct netlink_ext_ack *extack) 3629 { 3630 struct rtnl_newlink_tbs *tbs; 3631 int ret; 3632 3633 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL); 3634 if (!tbs) 3635 return -ENOMEM; 3636 3637 ret = __rtnl_newlink(skb, nlh, tbs, extack); 3638 kfree(tbs); 3639 return ret; 3640 } 3641 3642 static int rtnl_valid_getlink_req(struct sk_buff *skb, 3643 const struct nlmsghdr *nlh, 3644 struct nlattr **tb, 3645 struct netlink_ext_ack *extack) 3646 { 3647 struct ifinfomsg *ifm; 3648 int i, err; 3649 3650 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 3651 NL_SET_ERR_MSG(extack, "Invalid header for get link"); 3652 return -EINVAL; 3653 } 3654 3655 if (!netlink_strict_get_check(skb)) 3656 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX, 3657 ifla_policy, extack); 3658 3659 ifm = nlmsg_data(nlh); 3660 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 3661 ifm->ifi_change) { 3662 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request"); 3663 return -EINVAL; 3664 } 3665 3666 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, 3667 ifla_policy, extack); 3668 if (err) 3669 return err; 3670 3671 for (i = 0; i <= IFLA_MAX; i++) { 3672 if (!tb[i]) 3673 continue; 3674 3675 switch (i) { 3676 case IFLA_IFNAME: 3677 case IFLA_ALT_IFNAME: 3678 case IFLA_EXT_MASK: 3679 case IFLA_TARGET_NETNSID: 3680 break; 3681 default: 3682 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request"); 3683 return -EINVAL; 3684 } 3685 } 3686 3687 return 0; 3688 } 3689 3690 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3691 struct netlink_ext_ack *extack) 3692 { 3693 struct net *net = sock_net(skb->sk); 3694 struct net *tgt_net = net; 3695 struct ifinfomsg *ifm; 3696 struct nlattr *tb[IFLA_MAX+1]; 3697 struct net_device *dev = NULL; 3698 struct sk_buff *nskb; 3699 int netnsid = -1; 3700 int err; 3701 u32 ext_filter_mask = 0; 3702 3703 err = rtnl_valid_getlink_req(skb, nlh, tb, extack); 3704 if (err < 0) 3705 return err; 3706 3707 err = rtnl_ensure_unique_netns(tb, extack, true); 3708 if (err < 0) 3709 return err; 3710 3711 if (tb[IFLA_TARGET_NETNSID]) { 3712 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]); 3713 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid); 3714 if (IS_ERR(tgt_net)) 3715 return PTR_ERR(tgt_net); 3716 } 3717 3718 if (tb[IFLA_EXT_MASK]) 3719 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3720 3721 err = -EINVAL; 3722 ifm = nlmsg_data(nlh); 3723 if (ifm->ifi_index > 0) 3724 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3725 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3726 dev = rtnl_dev_get(tgt_net, tb); 3727 else 3728 goto out; 3729 3730 err = -ENODEV; 3731 if (dev == NULL) 3732 goto out; 3733 3734 err = -ENOBUFS; 3735 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); 3736 if (nskb == NULL) 3737 goto out; 3738 3739 err = rtnl_fill_ifinfo(nskb, dev, net, 3740 RTM_NEWLINK, NETLINK_CB(skb).portid, 3741 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3742 0, NULL, 0, netnsid, GFP_KERNEL); 3743 if (err < 0) { 3744 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3745 WARN_ON(err == -EMSGSIZE); 3746 kfree_skb(nskb); 3747 } else 3748 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3749 out: 3750 if (netnsid >= 0) 3751 put_net(tgt_net); 3752 3753 return err; 3754 } 3755 3756 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, 3757 bool *changed, struct netlink_ext_ack *extack) 3758 { 3759 char *alt_ifname; 3760 size_t size; 3761 int err; 3762 3763 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); 3764 if (err) 3765 return err; 3766 3767 if (cmd == RTM_NEWLINKPROP) { 3768 size = rtnl_prop_list_size(dev); 3769 size += nla_total_size(ALTIFNAMSIZ); 3770 if (size >= U16_MAX) { 3771 NL_SET_ERR_MSG(extack, 3772 "effective property list too long"); 3773 return -EINVAL; 3774 } 3775 } 3776 3777 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 3778 if (!alt_ifname) 3779 return -ENOMEM; 3780 3781 if (cmd == RTM_NEWLINKPROP) { 3782 err = netdev_name_node_alt_create(dev, alt_ifname); 3783 if (!err) 3784 alt_ifname = NULL; 3785 } else if (cmd == RTM_DELLINKPROP) { 3786 err = netdev_name_node_alt_destroy(dev, alt_ifname); 3787 } else { 3788 WARN_ON_ONCE(1); 3789 err = -EINVAL; 3790 } 3791 3792 kfree(alt_ifname); 3793 if (!err) 3794 *changed = true; 3795 return err; 3796 } 3797 3798 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh, 3799 struct netlink_ext_ack *extack) 3800 { 3801 struct net *net = sock_net(skb->sk); 3802 struct nlattr *tb[IFLA_MAX + 1]; 3803 struct net_device *dev; 3804 struct ifinfomsg *ifm; 3805 bool changed = false; 3806 struct nlattr *attr; 3807 int err, rem; 3808 3809 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3810 if (err) 3811 return err; 3812 3813 err = rtnl_ensure_unique_netns(tb, extack, true); 3814 if (err) 3815 return err; 3816 3817 ifm = nlmsg_data(nlh); 3818 if (ifm->ifi_index > 0) 3819 dev = __dev_get_by_index(net, ifm->ifi_index); 3820 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) 3821 dev = rtnl_dev_get(net, tb); 3822 else 3823 return -EINVAL; 3824 3825 if (!dev) 3826 return -ENODEV; 3827 3828 if (!tb[IFLA_PROP_LIST]) 3829 return 0; 3830 3831 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) { 3832 switch (nla_type(attr)) { 3833 case IFLA_ALT_IFNAME: 3834 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack); 3835 if (err) 3836 return err; 3837 break; 3838 } 3839 } 3840 3841 if (changed) 3842 netdev_state_change(dev); 3843 return 0; 3844 } 3845 3846 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3847 struct netlink_ext_ack *extack) 3848 { 3849 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack); 3850 } 3851 3852 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, 3853 struct netlink_ext_ack *extack) 3854 { 3855 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); 3856 } 3857 3858 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3859 { 3860 struct net *net = sock_net(skb->sk); 3861 size_t min_ifinfo_dump_size = 0; 3862 struct nlattr *tb[IFLA_MAX+1]; 3863 u32 ext_filter_mask = 0; 3864 struct net_device *dev; 3865 int hdrlen; 3866 3867 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3868 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3869 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3870 3871 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3872 if (tb[IFLA_EXT_MASK]) 3873 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3874 } 3875 3876 if (!ext_filter_mask) 3877 return NLMSG_GOODSIZE; 3878 /* 3879 * traverse the list of net devices and compute the minimum 3880 * buffer size based upon the filter mask. 3881 */ 3882 rcu_read_lock(); 3883 for_each_netdev_rcu(net, dev) { 3884 min_ifinfo_dump_size = max(min_ifinfo_dump_size, 3885 if_nlmsg_size(dev, ext_filter_mask)); 3886 } 3887 rcu_read_unlock(); 3888 3889 return nlmsg_total_size(min_ifinfo_dump_size); 3890 } 3891 3892 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 3893 { 3894 int idx; 3895 int s_idx = cb->family; 3896 int type = cb->nlh->nlmsg_type - RTM_BASE; 3897 int ret = 0; 3898 3899 if (s_idx == 0) 3900 s_idx = 1; 3901 3902 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 3903 struct rtnl_link __rcu **tab; 3904 struct rtnl_link *link; 3905 rtnl_dumpit_func dumpit; 3906 3907 if (idx < s_idx || idx == PF_PACKET) 3908 continue; 3909 3910 if (type < 0 || type >= RTM_NR_MSGTYPES) 3911 continue; 3912 3913 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 3914 if (!tab) 3915 continue; 3916 3917 link = rcu_dereference_rtnl(tab[type]); 3918 if (!link) 3919 continue; 3920 3921 dumpit = link->dumpit; 3922 if (!dumpit) 3923 continue; 3924 3925 if (idx > s_idx) { 3926 memset(&cb->args[0], 0, sizeof(cb->args)); 3927 cb->prev_seq = 0; 3928 cb->seq = 0; 3929 } 3930 ret = dumpit(skb, cb); 3931 if (ret) 3932 break; 3933 } 3934 cb->family = idx; 3935 3936 return skb->len ? : ret; 3937 } 3938 3939 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 3940 unsigned int change, 3941 u32 event, gfp_t flags, int *new_nsid, 3942 int new_ifindex, u32 portid, u32 seq) 3943 { 3944 struct net *net = dev_net(dev); 3945 struct sk_buff *skb; 3946 int err = -ENOBUFS; 3947 3948 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); 3949 if (skb == NULL) 3950 goto errout; 3951 3952 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 3953 type, portid, seq, change, 0, 0, event, 3954 new_nsid, new_ifindex, -1, flags); 3955 if (err < 0) { 3956 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 3957 WARN_ON(err == -EMSGSIZE); 3958 kfree_skb(skb); 3959 goto errout; 3960 } 3961 return skb; 3962 errout: 3963 if (err < 0) 3964 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 3965 return NULL; 3966 } 3967 3968 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags, 3969 u32 portid, const struct nlmsghdr *nlh) 3970 { 3971 struct net *net = dev_net(dev); 3972 3973 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags); 3974 } 3975 3976 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 3977 unsigned int change, u32 event, 3978 gfp_t flags, int *new_nsid, int new_ifindex, 3979 u32 portid, const struct nlmsghdr *nlh) 3980 { 3981 struct sk_buff *skb; 3982 3983 if (dev->reg_state != NETREG_REGISTERED) 3984 return; 3985 3986 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 3987 new_ifindex, portid, nlmsg_seq(nlh)); 3988 if (skb) 3989 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh); 3990 } 3991 3992 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 3993 gfp_t flags, u32 portid, const struct nlmsghdr *nlh) 3994 { 3995 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 3996 NULL, 0, portid, nlh); 3997 } 3998 3999 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 4000 gfp_t flags, int *new_nsid, int new_ifindex) 4001 { 4002 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 4003 new_nsid, new_ifindex, 0, NULL); 4004 } 4005 4006 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 4007 struct net_device *dev, 4008 u8 *addr, u16 vid, u32 pid, u32 seq, 4009 int type, unsigned int flags, 4010 int nlflags, u16 ndm_state) 4011 { 4012 struct nlmsghdr *nlh; 4013 struct ndmsg *ndm; 4014 4015 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 4016 if (!nlh) 4017 return -EMSGSIZE; 4018 4019 ndm = nlmsg_data(nlh); 4020 ndm->ndm_family = AF_BRIDGE; 4021 ndm->ndm_pad1 = 0; 4022 ndm->ndm_pad2 = 0; 4023 ndm->ndm_flags = flags; 4024 ndm->ndm_type = 0; 4025 ndm->ndm_ifindex = dev->ifindex; 4026 ndm->ndm_state = ndm_state; 4027 4028 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) 4029 goto nla_put_failure; 4030 if (vid) 4031 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 4032 goto nla_put_failure; 4033 4034 nlmsg_end(skb, nlh); 4035 return 0; 4036 4037 nla_put_failure: 4038 nlmsg_cancel(skb, nlh); 4039 return -EMSGSIZE; 4040 } 4041 4042 static inline size_t rtnl_fdb_nlmsg_size(void) 4043 { 4044 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 4045 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ 4046 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 4047 0; 4048 } 4049 4050 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 4051 u16 ndm_state) 4052 { 4053 struct net *net = dev_net(dev); 4054 struct sk_buff *skb; 4055 int err = -ENOBUFS; 4056 4057 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); 4058 if (!skb) 4059 goto errout; 4060 4061 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 4062 0, 0, type, NTF_SELF, 0, ndm_state); 4063 if (err < 0) { 4064 kfree_skb(skb); 4065 goto errout; 4066 } 4067 4068 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 4069 return; 4070 errout: 4071 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 4072 } 4073 4074 /* 4075 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 4076 */ 4077 int ndo_dflt_fdb_add(struct ndmsg *ndm, 4078 struct nlattr *tb[], 4079 struct net_device *dev, 4080 const unsigned char *addr, u16 vid, 4081 u16 flags) 4082 { 4083 int err = -EINVAL; 4084 4085 /* If aging addresses are supported device will need to 4086 * implement its own handler for this. 4087 */ 4088 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 4089 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4090 return err; 4091 } 4092 4093 if (tb[NDA_FLAGS_EXT]) { 4094 netdev_info(dev, "invalid flags given to default FDB implementation\n"); 4095 return err; 4096 } 4097 4098 if (vid) { 4099 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n"); 4100 return err; 4101 } 4102 4103 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4104 err = dev_uc_add_excl(dev, addr); 4105 else if (is_multicast_ether_addr(addr)) 4106 err = dev_mc_add_excl(dev, addr); 4107 4108 /* Only return duplicate errors if NLM_F_EXCL is set */ 4109 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 4110 err = 0; 4111 4112 return err; 4113 } 4114 EXPORT_SYMBOL(ndo_dflt_fdb_add); 4115 4116 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 4117 struct netlink_ext_ack *extack) 4118 { 4119 u16 vid = 0; 4120 4121 if (vlan_attr) { 4122 if (nla_len(vlan_attr) != sizeof(u16)) { 4123 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 4124 return -EINVAL; 4125 } 4126 4127 vid = nla_get_u16(vlan_attr); 4128 4129 if (!vid || vid >= VLAN_VID_MASK) { 4130 NL_SET_ERR_MSG(extack, "invalid vlan id"); 4131 return -EINVAL; 4132 } 4133 } 4134 *p_vid = vid; 4135 return 0; 4136 } 4137 4138 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 4139 struct netlink_ext_ack *extack) 4140 { 4141 struct net *net = sock_net(skb->sk); 4142 struct ndmsg *ndm; 4143 struct nlattr *tb[NDA_MAX+1]; 4144 struct net_device *dev; 4145 u8 *addr; 4146 u16 vid; 4147 int err; 4148 4149 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, 4150 extack); 4151 if (err < 0) 4152 return err; 4153 4154 ndm = nlmsg_data(nlh); 4155 if (ndm->ndm_ifindex == 0) { 4156 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4157 return -EINVAL; 4158 } 4159 4160 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4161 if (dev == NULL) { 4162 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4163 return -ENODEV; 4164 } 4165 4166 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4167 NL_SET_ERR_MSG(extack, "invalid address"); 4168 return -EINVAL; 4169 } 4170 4171 if (dev->type != ARPHRD_ETHER) { 4172 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); 4173 return -EINVAL; 4174 } 4175 4176 addr = nla_data(tb[NDA_LLADDR]); 4177 4178 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4179 if (err) 4180 return err; 4181 4182 err = -EOPNOTSUPP; 4183 4184 /* Support fdb on master device the net/bridge default case */ 4185 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4186 netif_is_bridge_port(dev)) { 4187 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4188 const struct net_device_ops *ops = br_dev->netdev_ops; 4189 4190 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 4191 nlh->nlmsg_flags, extack); 4192 if (err) 4193 goto out; 4194 else 4195 ndm->ndm_flags &= ~NTF_MASTER; 4196 } 4197 4198 /* Embedded bridge, macvlan, and any other device support */ 4199 if ((ndm->ndm_flags & NTF_SELF)) { 4200 if (dev->netdev_ops->ndo_fdb_add) 4201 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 4202 vid, 4203 nlh->nlmsg_flags, 4204 extack); 4205 else 4206 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 4207 nlh->nlmsg_flags); 4208 4209 if (!err) { 4210 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 4211 ndm->ndm_state); 4212 ndm->ndm_flags &= ~NTF_SELF; 4213 } 4214 } 4215 out: 4216 return err; 4217 } 4218 4219 /* 4220 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 4221 */ 4222 int ndo_dflt_fdb_del(struct ndmsg *ndm, 4223 struct nlattr *tb[], 4224 struct net_device *dev, 4225 const unsigned char *addr, u16 vid) 4226 { 4227 int err = -EINVAL; 4228 4229 /* If aging addresses are supported device will need to 4230 * implement its own handler for this. 4231 */ 4232 if (!(ndm->ndm_state & NUD_PERMANENT)) { 4233 netdev_info(dev, "default FDB implementation only supports local addresses\n"); 4234 return err; 4235 } 4236 4237 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 4238 err = dev_uc_del(dev, addr); 4239 else if (is_multicast_ether_addr(addr)) 4240 err = dev_mc_del(dev, addr); 4241 4242 return err; 4243 } 4244 EXPORT_SYMBOL(ndo_dflt_fdb_del); 4245 4246 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = { 4247 [NDA_VLAN] = { .type = NLA_U16 }, 4248 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), 4249 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, 4250 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, 4251 }; 4252 4253 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 4254 struct netlink_ext_ack *extack) 4255 { 4256 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK); 4257 struct net *net = sock_net(skb->sk); 4258 const struct net_device_ops *ops; 4259 struct ndmsg *ndm; 4260 struct nlattr *tb[NDA_MAX+1]; 4261 struct net_device *dev; 4262 __u8 *addr = NULL; 4263 int err; 4264 u16 vid; 4265 4266 if (!netlink_capable(skb, CAP_NET_ADMIN)) 4267 return -EPERM; 4268 4269 if (!del_bulk) { 4270 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4271 NULL, extack); 4272 } else { 4273 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, 4274 fdb_del_bulk_policy, extack); 4275 } 4276 if (err < 0) 4277 return err; 4278 4279 ndm = nlmsg_data(nlh); 4280 if (ndm->ndm_ifindex == 0) { 4281 NL_SET_ERR_MSG(extack, "invalid ifindex"); 4282 return -EINVAL; 4283 } 4284 4285 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 4286 if (dev == NULL) { 4287 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4288 return -ENODEV; 4289 } 4290 4291 if (!del_bulk) { 4292 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 4293 NL_SET_ERR_MSG(extack, "invalid address"); 4294 return -EINVAL; 4295 } 4296 addr = nla_data(tb[NDA_LLADDR]); 4297 } 4298 4299 if (dev->type != ARPHRD_ETHER) { 4300 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); 4301 return -EINVAL; 4302 } 4303 4304 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 4305 if (err) 4306 return err; 4307 4308 err = -EOPNOTSUPP; 4309 4310 /* Support fdb on master device the net/bridge default case */ 4311 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 4312 netif_is_bridge_port(dev)) { 4313 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4314 4315 ops = br_dev->netdev_ops; 4316 if (!del_bulk) { 4317 if (ops->ndo_fdb_del) 4318 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4319 } else { 4320 if (ops->ndo_fdb_del_bulk) 4321 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4322 extack); 4323 } 4324 4325 if (err) 4326 goto out; 4327 else 4328 ndm->ndm_flags &= ~NTF_MASTER; 4329 } 4330 4331 /* Embedded bridge, macvlan, and any other device support */ 4332 if (ndm->ndm_flags & NTF_SELF) { 4333 ops = dev->netdev_ops; 4334 if (!del_bulk) { 4335 if (ops->ndo_fdb_del) 4336 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); 4337 else 4338 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 4339 } else { 4340 /* in case err was cleared by NTF_MASTER call */ 4341 err = -EOPNOTSUPP; 4342 if (ops->ndo_fdb_del_bulk) 4343 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, 4344 extack); 4345 } 4346 4347 if (!err) { 4348 if (!del_bulk) 4349 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 4350 ndm->ndm_state); 4351 ndm->ndm_flags &= ~NTF_SELF; 4352 } 4353 } 4354 out: 4355 return err; 4356 } 4357 4358 static int nlmsg_populate_fdb(struct sk_buff *skb, 4359 struct netlink_callback *cb, 4360 struct net_device *dev, 4361 int *idx, 4362 struct netdev_hw_addr_list *list) 4363 { 4364 struct netdev_hw_addr *ha; 4365 int err; 4366 u32 portid, seq; 4367 4368 portid = NETLINK_CB(cb->skb).portid; 4369 seq = cb->nlh->nlmsg_seq; 4370 4371 list_for_each_entry(ha, &list->list, list) { 4372 if (*idx < cb->args[2]) 4373 goto skip; 4374 4375 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 4376 portid, seq, 4377 RTM_NEWNEIGH, NTF_SELF, 4378 NLM_F_MULTI, NUD_PERMANENT); 4379 if (err < 0) 4380 return err; 4381 skip: 4382 *idx += 1; 4383 } 4384 return 0; 4385 } 4386 4387 /** 4388 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 4389 * @skb: socket buffer to store message in 4390 * @cb: netlink callback 4391 * @dev: netdevice 4392 * @filter_dev: ignored 4393 * @idx: the number of FDB table entries dumped is added to *@idx 4394 * 4395 * Default netdevice operation to dump the existing unicast address list. 4396 * Returns number of addresses from list put in skb. 4397 */ 4398 int ndo_dflt_fdb_dump(struct sk_buff *skb, 4399 struct netlink_callback *cb, 4400 struct net_device *dev, 4401 struct net_device *filter_dev, 4402 int *idx) 4403 { 4404 int err; 4405 4406 if (dev->type != ARPHRD_ETHER) 4407 return -EINVAL; 4408 4409 netif_addr_lock_bh(dev); 4410 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 4411 if (err) 4412 goto out; 4413 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 4414 out: 4415 netif_addr_unlock_bh(dev); 4416 return err; 4417 } 4418 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 4419 4420 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh, 4421 int *br_idx, int *brport_idx, 4422 struct netlink_ext_ack *extack) 4423 { 4424 struct nlattr *tb[NDA_MAX + 1]; 4425 struct ndmsg *ndm; 4426 int err, i; 4427 4428 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4429 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request"); 4430 return -EINVAL; 4431 } 4432 4433 ndm = nlmsg_data(nlh); 4434 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4435 ndm->ndm_flags || ndm->ndm_type) { 4436 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request"); 4437 return -EINVAL; 4438 } 4439 4440 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4441 NDA_MAX, NULL, extack); 4442 if (err < 0) 4443 return err; 4444 4445 *brport_idx = ndm->ndm_ifindex; 4446 for (i = 0; i <= NDA_MAX; ++i) { 4447 if (!tb[i]) 4448 continue; 4449 4450 switch (i) { 4451 case NDA_IFINDEX: 4452 if (nla_len(tb[i]) != sizeof(u32)) { 4453 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request"); 4454 return -EINVAL; 4455 } 4456 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]); 4457 break; 4458 case NDA_MASTER: 4459 if (nla_len(tb[i]) != sizeof(u32)) { 4460 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request"); 4461 return -EINVAL; 4462 } 4463 *br_idx = nla_get_u32(tb[NDA_MASTER]); 4464 break; 4465 default: 4466 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request"); 4467 return -EINVAL; 4468 } 4469 } 4470 4471 return 0; 4472 } 4473 4474 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh, 4475 int *br_idx, int *brport_idx, 4476 struct netlink_ext_ack *extack) 4477 { 4478 struct nlattr *tb[IFLA_MAX+1]; 4479 int err; 4480 4481 /* A hack to preserve kernel<->userspace interface. 4482 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0. 4483 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails. 4484 * So, check for ndmsg with an optional u32 attribute (not used here). 4485 * Fortunately these sizes don't conflict with the size of ifinfomsg 4486 * with an optional attribute. 4487 */ 4488 if (nlmsg_len(nlh) != sizeof(struct ndmsg) && 4489 (nlmsg_len(nlh) != sizeof(struct ndmsg) + 4490 nla_attr_size(sizeof(u32)))) { 4491 struct ifinfomsg *ifm; 4492 4493 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4494 tb, IFLA_MAX, ifla_policy, 4495 extack); 4496 if (err < 0) { 4497 return -EINVAL; 4498 } else if (err == 0) { 4499 if (tb[IFLA_MASTER]) 4500 *br_idx = nla_get_u32(tb[IFLA_MASTER]); 4501 } 4502 4503 ifm = nlmsg_data(nlh); 4504 *brport_idx = ifm->ifi_index; 4505 } 4506 return 0; 4507 } 4508 4509 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 4510 { 4511 struct net_device *dev; 4512 struct net_device *br_dev = NULL; 4513 const struct net_device_ops *ops = NULL; 4514 const struct net_device_ops *cops = NULL; 4515 struct net *net = sock_net(skb->sk); 4516 struct hlist_head *head; 4517 int brport_idx = 0; 4518 int br_idx = 0; 4519 int h, s_h; 4520 int idx = 0, s_idx; 4521 int err = 0; 4522 int fidx = 0; 4523 4524 if (cb->strict_check) 4525 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx, 4526 cb->extack); 4527 else 4528 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx, 4529 cb->extack); 4530 if (err < 0) 4531 return err; 4532 4533 if (br_idx) { 4534 br_dev = __dev_get_by_index(net, br_idx); 4535 if (!br_dev) 4536 return -ENODEV; 4537 4538 ops = br_dev->netdev_ops; 4539 } 4540 4541 s_h = cb->args[0]; 4542 s_idx = cb->args[1]; 4543 4544 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4545 idx = 0; 4546 head = &net->dev_index_head[h]; 4547 hlist_for_each_entry(dev, head, index_hlist) { 4548 4549 if (brport_idx && (dev->ifindex != brport_idx)) 4550 continue; 4551 4552 if (!br_idx) { /* user did not specify a specific bridge */ 4553 if (netif_is_bridge_port(dev)) { 4554 br_dev = netdev_master_upper_dev_get(dev); 4555 cops = br_dev->netdev_ops; 4556 } 4557 } else { 4558 if (dev != br_dev && 4559 !netif_is_bridge_port(dev)) 4560 continue; 4561 4562 if (br_dev != netdev_master_upper_dev_get(dev) && 4563 !netif_is_bridge_master(dev)) 4564 continue; 4565 cops = ops; 4566 } 4567 4568 if (idx < s_idx) 4569 goto cont; 4570 4571 if (netif_is_bridge_port(dev)) { 4572 if (cops && cops->ndo_fdb_dump) { 4573 err = cops->ndo_fdb_dump(skb, cb, 4574 br_dev, dev, 4575 &fidx); 4576 if (err == -EMSGSIZE) 4577 goto out; 4578 } 4579 } 4580 4581 if (dev->netdev_ops->ndo_fdb_dump) 4582 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 4583 dev, NULL, 4584 &fidx); 4585 else 4586 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 4587 &fidx); 4588 if (err == -EMSGSIZE) 4589 goto out; 4590 4591 cops = NULL; 4592 4593 /* reset fdb offset to 0 for rest of the interfaces */ 4594 cb->args[2] = 0; 4595 fidx = 0; 4596 cont: 4597 idx++; 4598 } 4599 } 4600 4601 out: 4602 cb->args[0] = h; 4603 cb->args[1] = idx; 4604 cb->args[2] = fidx; 4605 4606 return skb->len; 4607 } 4608 4609 static int valid_fdb_get_strict(const struct nlmsghdr *nlh, 4610 struct nlattr **tb, u8 *ndm_flags, 4611 int *br_idx, int *brport_idx, u8 **addr, 4612 u16 *vid, struct netlink_ext_ack *extack) 4613 { 4614 struct ndmsg *ndm; 4615 int err, i; 4616 4617 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 4618 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request"); 4619 return -EINVAL; 4620 } 4621 4622 ndm = nlmsg_data(nlh); 4623 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 4624 ndm->ndm_type) { 4625 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request"); 4626 return -EINVAL; 4627 } 4628 4629 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) { 4630 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request"); 4631 return -EINVAL; 4632 } 4633 4634 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 4635 NDA_MAX, nda_policy, extack); 4636 if (err < 0) 4637 return err; 4638 4639 *ndm_flags = ndm->ndm_flags; 4640 *brport_idx = ndm->ndm_ifindex; 4641 for (i = 0; i <= NDA_MAX; ++i) { 4642 if (!tb[i]) 4643 continue; 4644 4645 switch (i) { 4646 case NDA_MASTER: 4647 *br_idx = nla_get_u32(tb[i]); 4648 break; 4649 case NDA_LLADDR: 4650 if (nla_len(tb[i]) != ETH_ALEN) { 4651 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request"); 4652 return -EINVAL; 4653 } 4654 *addr = nla_data(tb[i]); 4655 break; 4656 case NDA_VLAN: 4657 err = fdb_vid_parse(tb[i], vid, extack); 4658 if (err) 4659 return err; 4660 break; 4661 case NDA_VNI: 4662 break; 4663 default: 4664 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request"); 4665 return -EINVAL; 4666 } 4667 } 4668 4669 return 0; 4670 } 4671 4672 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4673 struct netlink_ext_ack *extack) 4674 { 4675 struct net_device *dev = NULL, *br_dev = NULL; 4676 const struct net_device_ops *ops = NULL; 4677 struct net *net = sock_net(in_skb->sk); 4678 struct nlattr *tb[NDA_MAX + 1]; 4679 struct sk_buff *skb; 4680 int brport_idx = 0; 4681 u8 ndm_flags = 0; 4682 int br_idx = 0; 4683 u8 *addr = NULL; 4684 u16 vid = 0; 4685 int err; 4686 4687 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx, 4688 &brport_idx, &addr, &vid, extack); 4689 if (err < 0) 4690 return err; 4691 4692 if (!addr) { 4693 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request"); 4694 return -EINVAL; 4695 } 4696 4697 if (brport_idx) { 4698 dev = __dev_get_by_index(net, brport_idx); 4699 if (!dev) { 4700 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 4701 return -ENODEV; 4702 } 4703 } 4704 4705 if (br_idx) { 4706 if (dev) { 4707 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive"); 4708 return -EINVAL; 4709 } 4710 4711 br_dev = __dev_get_by_index(net, br_idx); 4712 if (!br_dev) { 4713 NL_SET_ERR_MSG(extack, "Invalid master ifindex"); 4714 return -EINVAL; 4715 } 4716 ops = br_dev->netdev_ops; 4717 } 4718 4719 if (dev) { 4720 if (!ndm_flags || (ndm_flags & NTF_MASTER)) { 4721 if (!netif_is_bridge_port(dev)) { 4722 NL_SET_ERR_MSG(extack, "Device is not a bridge port"); 4723 return -EINVAL; 4724 } 4725 br_dev = netdev_master_upper_dev_get(dev); 4726 if (!br_dev) { 4727 NL_SET_ERR_MSG(extack, "Master of device not found"); 4728 return -EINVAL; 4729 } 4730 ops = br_dev->netdev_ops; 4731 } else { 4732 if (!(ndm_flags & NTF_SELF)) { 4733 NL_SET_ERR_MSG(extack, "Missing NTF_SELF"); 4734 return -EINVAL; 4735 } 4736 ops = dev->netdev_ops; 4737 } 4738 } 4739 4740 if (!br_dev && !dev) { 4741 NL_SET_ERR_MSG(extack, "No device specified"); 4742 return -ENODEV; 4743 } 4744 4745 if (!ops || !ops->ndo_fdb_get) { 4746 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device"); 4747 return -EOPNOTSUPP; 4748 } 4749 4750 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 4751 if (!skb) 4752 return -ENOBUFS; 4753 4754 if (br_dev) 4755 dev = br_dev; 4756 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid, 4757 NETLINK_CB(in_skb).portid, 4758 nlh->nlmsg_seq, extack); 4759 if (err) 4760 goto out; 4761 4762 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4763 out: 4764 kfree_skb(skb); 4765 return err; 4766 } 4767 4768 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 4769 unsigned int attrnum, unsigned int flag) 4770 { 4771 if (mask & flag) 4772 return nla_put_u8(skb, attrnum, !!(flags & flag)); 4773 return 0; 4774 } 4775 4776 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4777 struct net_device *dev, u16 mode, 4778 u32 flags, u32 mask, int nlflags, 4779 u32 filter_mask, 4780 int (*vlan_fill)(struct sk_buff *skb, 4781 struct net_device *dev, 4782 u32 filter_mask)) 4783 { 4784 struct nlmsghdr *nlh; 4785 struct ifinfomsg *ifm; 4786 struct nlattr *br_afspec; 4787 struct nlattr *protinfo; 4788 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 4789 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4790 int err = 0; 4791 4792 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 4793 if (nlh == NULL) 4794 return -EMSGSIZE; 4795 4796 ifm = nlmsg_data(nlh); 4797 ifm->ifi_family = AF_BRIDGE; 4798 ifm->__ifi_pad = 0; 4799 ifm->ifi_type = dev->type; 4800 ifm->ifi_index = dev->ifindex; 4801 ifm->ifi_flags = dev_get_flags(dev); 4802 ifm->ifi_change = 0; 4803 4804 4805 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 4806 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4807 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 4808 (br_dev && 4809 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 4810 (dev->addr_len && 4811 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4812 (dev->ifindex != dev_get_iflink(dev) && 4813 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 4814 goto nla_put_failure; 4815 4816 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 4817 if (!br_afspec) 4818 goto nla_put_failure; 4819 4820 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 4821 nla_nest_cancel(skb, br_afspec); 4822 goto nla_put_failure; 4823 } 4824 4825 if (mode != BRIDGE_MODE_UNDEF) { 4826 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 4827 nla_nest_cancel(skb, br_afspec); 4828 goto nla_put_failure; 4829 } 4830 } 4831 if (vlan_fill) { 4832 err = vlan_fill(skb, dev, filter_mask); 4833 if (err) { 4834 nla_nest_cancel(skb, br_afspec); 4835 goto nla_put_failure; 4836 } 4837 } 4838 nla_nest_end(skb, br_afspec); 4839 4840 protinfo = nla_nest_start(skb, IFLA_PROTINFO); 4841 if (!protinfo) 4842 goto nla_put_failure; 4843 4844 if (brport_nla_put_flag(skb, flags, mask, 4845 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 4846 brport_nla_put_flag(skb, flags, mask, 4847 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 4848 brport_nla_put_flag(skb, flags, mask, 4849 IFLA_BRPORT_FAST_LEAVE, 4850 BR_MULTICAST_FAST_LEAVE) || 4851 brport_nla_put_flag(skb, flags, mask, 4852 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 4853 brport_nla_put_flag(skb, flags, mask, 4854 IFLA_BRPORT_LEARNING, BR_LEARNING) || 4855 brport_nla_put_flag(skb, flags, mask, 4856 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 4857 brport_nla_put_flag(skb, flags, mask, 4858 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 4859 brport_nla_put_flag(skb, flags, mask, 4860 IFLA_BRPORT_PROXYARP, BR_PROXYARP) || 4861 brport_nla_put_flag(skb, flags, mask, 4862 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) || 4863 brport_nla_put_flag(skb, flags, mask, 4864 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) { 4865 nla_nest_cancel(skb, protinfo); 4866 goto nla_put_failure; 4867 } 4868 4869 nla_nest_end(skb, protinfo); 4870 4871 nlmsg_end(skb, nlh); 4872 return 0; 4873 nla_put_failure: 4874 nlmsg_cancel(skb, nlh); 4875 return err ? err : -EMSGSIZE; 4876 } 4877 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 4878 4879 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh, 4880 bool strict_check, u32 *filter_mask, 4881 struct netlink_ext_ack *extack) 4882 { 4883 struct nlattr *tb[IFLA_MAX+1]; 4884 int err, i; 4885 4886 if (strict_check) { 4887 struct ifinfomsg *ifm; 4888 4889 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 4890 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump"); 4891 return -EINVAL; 4892 } 4893 4894 ifm = nlmsg_data(nlh); 4895 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 4896 ifm->ifi_change || ifm->ifi_index) { 4897 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request"); 4898 return -EINVAL; 4899 } 4900 4901 err = nlmsg_parse_deprecated_strict(nlh, 4902 sizeof(struct ifinfomsg), 4903 tb, IFLA_MAX, ifla_policy, 4904 extack); 4905 } else { 4906 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), 4907 tb, IFLA_MAX, ifla_policy, 4908 extack); 4909 } 4910 if (err < 0) 4911 return err; 4912 4913 /* new attributes should only be added with strict checking */ 4914 for (i = 0; i <= IFLA_MAX; ++i) { 4915 if (!tb[i]) 4916 continue; 4917 4918 switch (i) { 4919 case IFLA_EXT_MASK: 4920 *filter_mask = nla_get_u32(tb[i]); 4921 break; 4922 default: 4923 if (strict_check) { 4924 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request"); 4925 return -EINVAL; 4926 } 4927 } 4928 } 4929 4930 return 0; 4931 } 4932 4933 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 4934 { 4935 const struct nlmsghdr *nlh = cb->nlh; 4936 struct net *net = sock_net(skb->sk); 4937 struct net_device *dev; 4938 int idx = 0; 4939 u32 portid = NETLINK_CB(cb->skb).portid; 4940 u32 seq = nlh->nlmsg_seq; 4941 u32 filter_mask = 0; 4942 int err; 4943 4944 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask, 4945 cb->extack); 4946 if (err < 0 && cb->strict_check) 4947 return err; 4948 4949 rcu_read_lock(); 4950 for_each_netdev_rcu(net, dev) { 4951 const struct net_device_ops *ops = dev->netdev_ops; 4952 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4953 4954 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 4955 if (idx >= cb->args[0]) { 4956 err = br_dev->netdev_ops->ndo_bridge_getlink( 4957 skb, portid, seq, dev, 4958 filter_mask, NLM_F_MULTI); 4959 if (err < 0 && err != -EOPNOTSUPP) { 4960 if (likely(skb->len)) 4961 break; 4962 4963 goto out_err; 4964 } 4965 } 4966 idx++; 4967 } 4968 4969 if (ops->ndo_bridge_getlink) { 4970 if (idx >= cb->args[0]) { 4971 err = ops->ndo_bridge_getlink(skb, portid, 4972 seq, dev, 4973 filter_mask, 4974 NLM_F_MULTI); 4975 if (err < 0 && err != -EOPNOTSUPP) { 4976 if (likely(skb->len)) 4977 break; 4978 4979 goto out_err; 4980 } 4981 } 4982 idx++; 4983 } 4984 } 4985 err = skb->len; 4986 out_err: 4987 rcu_read_unlock(); 4988 cb->args[0] = idx; 4989 4990 return err; 4991 } 4992 4993 static inline size_t bridge_nlmsg_size(void) 4994 { 4995 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 4996 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 4997 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 4998 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 4999 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 5000 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 5001 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 5002 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 5003 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 5004 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 5005 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 5006 } 5007 5008 static int rtnl_bridge_notify(struct net_device *dev) 5009 { 5010 struct net *net = dev_net(dev); 5011 struct sk_buff *skb; 5012 int err = -EOPNOTSUPP; 5013 5014 if (!dev->netdev_ops->ndo_bridge_getlink) 5015 return 0; 5016 5017 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 5018 if (!skb) { 5019 err = -ENOMEM; 5020 goto errout; 5021 } 5022 5023 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 5024 if (err < 0) 5025 goto errout; 5026 5027 /* Notification info is only filled for bridge ports, not the bridge 5028 * device itself. Therefore, a zero notification length is valid and 5029 * should not result in an error. 5030 */ 5031 if (!skb->len) 5032 goto errout; 5033 5034 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 5035 return 0; 5036 errout: 5037 WARN_ON(err == -EMSGSIZE); 5038 kfree_skb(skb); 5039 if (err) 5040 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 5041 return err; 5042 } 5043 5044 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 5045 struct netlink_ext_ack *extack) 5046 { 5047 struct net *net = sock_net(skb->sk); 5048 struct ifinfomsg *ifm; 5049 struct net_device *dev; 5050 struct nlattr *br_spec, *attr = NULL; 5051 int rem, err = -EOPNOTSUPP; 5052 u16 flags = 0; 5053 bool have_flags = false; 5054 5055 if (nlmsg_len(nlh) < sizeof(*ifm)) 5056 return -EINVAL; 5057 5058 ifm = nlmsg_data(nlh); 5059 if (ifm->ifi_family != AF_BRIDGE) 5060 return -EPFNOSUPPORT; 5061 5062 dev = __dev_get_by_index(net, ifm->ifi_index); 5063 if (!dev) { 5064 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5065 return -ENODEV; 5066 } 5067 5068 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5069 if (br_spec) { 5070 nla_for_each_nested(attr, br_spec, rem) { 5071 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5072 if (nla_len(attr) < sizeof(flags)) 5073 return -EINVAL; 5074 5075 have_flags = true; 5076 flags = nla_get_u16(attr); 5077 break; 5078 } 5079 } 5080 } 5081 5082 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5083 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5084 5085 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 5086 err = -EOPNOTSUPP; 5087 goto out; 5088 } 5089 5090 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags, 5091 extack); 5092 if (err) 5093 goto out; 5094 5095 flags &= ~BRIDGE_FLAGS_MASTER; 5096 } 5097 5098 if ((flags & BRIDGE_FLAGS_SELF)) { 5099 if (!dev->netdev_ops->ndo_bridge_setlink) 5100 err = -EOPNOTSUPP; 5101 else 5102 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 5103 flags, 5104 extack); 5105 if (!err) { 5106 flags &= ~BRIDGE_FLAGS_SELF; 5107 5108 /* Generate event to notify upper layer of bridge 5109 * change 5110 */ 5111 err = rtnl_bridge_notify(dev); 5112 } 5113 } 5114 5115 if (have_flags) 5116 memcpy(nla_data(attr), &flags, sizeof(flags)); 5117 out: 5118 return err; 5119 } 5120 5121 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 5122 struct netlink_ext_ack *extack) 5123 { 5124 struct net *net = sock_net(skb->sk); 5125 struct ifinfomsg *ifm; 5126 struct net_device *dev; 5127 struct nlattr *br_spec, *attr = NULL; 5128 int rem, err = -EOPNOTSUPP; 5129 u16 flags = 0; 5130 bool have_flags = false; 5131 5132 if (nlmsg_len(nlh) < sizeof(*ifm)) 5133 return -EINVAL; 5134 5135 ifm = nlmsg_data(nlh); 5136 if (ifm->ifi_family != AF_BRIDGE) 5137 return -EPFNOSUPPORT; 5138 5139 dev = __dev_get_by_index(net, ifm->ifi_index); 5140 if (!dev) { 5141 NL_SET_ERR_MSG(extack, "unknown ifindex"); 5142 return -ENODEV; 5143 } 5144 5145 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 5146 if (br_spec) { 5147 nla_for_each_nested(attr, br_spec, rem) { 5148 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 5149 if (nla_len(attr) < sizeof(flags)) 5150 return -EINVAL; 5151 5152 have_flags = true; 5153 flags = nla_get_u16(attr); 5154 break; 5155 } 5156 } 5157 } 5158 5159 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 5160 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 5161 5162 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 5163 err = -EOPNOTSUPP; 5164 goto out; 5165 } 5166 5167 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 5168 if (err) 5169 goto out; 5170 5171 flags &= ~BRIDGE_FLAGS_MASTER; 5172 } 5173 5174 if ((flags & BRIDGE_FLAGS_SELF)) { 5175 if (!dev->netdev_ops->ndo_bridge_dellink) 5176 err = -EOPNOTSUPP; 5177 else 5178 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 5179 flags); 5180 5181 if (!err) { 5182 flags &= ~BRIDGE_FLAGS_SELF; 5183 5184 /* Generate event to notify upper layer of bridge 5185 * change 5186 */ 5187 err = rtnl_bridge_notify(dev); 5188 } 5189 } 5190 5191 if (have_flags) 5192 memcpy(nla_data(attr), &flags, sizeof(flags)); 5193 out: 5194 return err; 5195 } 5196 5197 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 5198 { 5199 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 5200 (!idxattr || idxattr == attrid); 5201 } 5202 5203 static bool 5204 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id) 5205 { 5206 return dev->netdev_ops && 5207 dev->netdev_ops->ndo_has_offload_stats && 5208 dev->netdev_ops->ndo_get_offload_stats && 5209 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id); 5210 } 5211 5212 static unsigned int 5213 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id) 5214 { 5215 return rtnl_offload_xstats_have_ndo(dev, attr_id) ? 5216 sizeof(struct rtnl_link_stats64) : 0; 5217 } 5218 5219 static int 5220 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, 5221 struct sk_buff *skb) 5222 { 5223 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id); 5224 struct nlattr *attr = NULL; 5225 void *attr_data; 5226 int err; 5227 5228 if (!size) 5229 return -ENODATA; 5230 5231 attr = nla_reserve_64bit(skb, attr_id, size, 5232 IFLA_OFFLOAD_XSTATS_UNSPEC); 5233 if (!attr) 5234 return -EMSGSIZE; 5235 5236 attr_data = nla_data(attr); 5237 memset(attr_data, 0, size); 5238 5239 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data); 5240 if (err) 5241 return err; 5242 5243 return 0; 5244 } 5245 5246 static unsigned int 5247 rtnl_offload_xstats_get_size_stats(const struct net_device *dev, 5248 enum netdev_offload_xstats_type type) 5249 { 5250 bool enabled = netdev_offload_xstats_enabled(dev, type); 5251 5252 return enabled ? sizeof(struct rtnl_hw_stats64) : 0; 5253 } 5254 5255 struct rtnl_offload_xstats_request_used { 5256 bool request; 5257 bool used; 5258 }; 5259 5260 static int 5261 rtnl_offload_xstats_get_stats(struct net_device *dev, 5262 enum netdev_offload_xstats_type type, 5263 struct rtnl_offload_xstats_request_used *ru, 5264 struct rtnl_hw_stats64 *stats, 5265 struct netlink_ext_ack *extack) 5266 { 5267 bool request; 5268 bool used; 5269 int err; 5270 5271 request = netdev_offload_xstats_enabled(dev, type); 5272 if (!request) { 5273 used = false; 5274 goto out; 5275 } 5276 5277 err = netdev_offload_xstats_get(dev, type, stats, &used, extack); 5278 if (err) 5279 return err; 5280 5281 out: 5282 if (ru) { 5283 ru->request = request; 5284 ru->used = used; 5285 } 5286 return 0; 5287 } 5288 5289 static int 5290 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, 5291 struct rtnl_offload_xstats_request_used *ru) 5292 { 5293 struct nlattr *nest; 5294 5295 nest = nla_nest_start(skb, attr_id); 5296 if (!nest) 5297 return -EMSGSIZE; 5298 5299 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) 5300 goto nla_put_failure; 5301 5302 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) 5303 goto nla_put_failure; 5304 5305 nla_nest_end(skb, nest); 5306 return 0; 5307 5308 nla_put_failure: 5309 nla_nest_cancel(skb, nest); 5310 return -EMSGSIZE; 5311 } 5312 5313 static int 5314 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, 5315 struct netlink_ext_ack *extack) 5316 { 5317 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5318 struct rtnl_offload_xstats_request_used ru_l3; 5319 struct nlattr *nest; 5320 int err; 5321 5322 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); 5323 if (err) 5324 return err; 5325 5326 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5327 if (!nest) 5328 return -EMSGSIZE; 5329 5330 if (rtnl_offload_xstats_fill_hw_s_info_one(skb, 5331 IFLA_OFFLOAD_XSTATS_L3_STATS, 5332 &ru_l3)) 5333 goto nla_put_failure; 5334 5335 nla_nest_end(skb, nest); 5336 return 0; 5337 5338 nla_put_failure: 5339 nla_nest_cancel(skb, nest); 5340 return -EMSGSIZE; 5341 } 5342 5343 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, 5344 int *prividx, u32 off_filter_mask, 5345 struct netlink_ext_ack *extack) 5346 { 5347 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5348 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; 5349 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; 5350 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5351 bool have_data = false; 5352 int err; 5353 5354 if (*prividx <= attr_id_cpu_hit && 5355 (off_filter_mask & 5356 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { 5357 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); 5358 if (!err) { 5359 have_data = true; 5360 } else if (err != -ENODATA) { 5361 *prividx = attr_id_cpu_hit; 5362 return err; 5363 } 5364 } 5365 5366 if (*prividx <= attr_id_hw_s_info && 5367 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { 5368 *prividx = attr_id_hw_s_info; 5369 5370 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); 5371 if (err) 5372 return err; 5373 5374 have_data = true; 5375 *prividx = 0; 5376 } 5377 5378 if (*prividx <= attr_id_l3_stats && 5379 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { 5380 unsigned int size_l3; 5381 struct nlattr *attr; 5382 5383 *prividx = attr_id_l3_stats; 5384 5385 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5386 if (!size_l3) 5387 goto skip_l3_stats; 5388 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, 5389 IFLA_OFFLOAD_XSTATS_UNSPEC); 5390 if (!attr) 5391 return -EMSGSIZE; 5392 5393 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, 5394 nla_data(attr), extack); 5395 if (err) 5396 return err; 5397 5398 have_data = true; 5399 skip_l3_stats: 5400 *prividx = 0; 5401 } 5402 5403 if (!have_data) 5404 return -ENODATA; 5405 5406 *prividx = 0; 5407 return 0; 5408 } 5409 5410 static unsigned int 5411 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, 5412 enum netdev_offload_xstats_type type) 5413 { 5414 bool enabled = netdev_offload_xstats_enabled(dev, type); 5415 5416 return nla_total_size(0) + 5417 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ 5418 nla_total_size(sizeof(u8)) + 5419 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ 5420 (enabled ? nla_total_size(sizeof(u8)) : 0) + 5421 0; 5422 } 5423 5424 static unsigned int 5425 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) 5426 { 5427 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5428 5429 return nla_total_size(0) + 5430 /* IFLA_OFFLOAD_XSTATS_L3_STATS */ 5431 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + 5432 0; 5433 } 5434 5435 static int rtnl_offload_xstats_get_size(const struct net_device *dev, 5436 u32 off_filter_mask) 5437 { 5438 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5439 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; 5440 int nla_size = 0; 5441 int size; 5442 5443 if (off_filter_mask & 5444 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { 5445 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); 5446 nla_size += nla_total_size_64bit(size); 5447 } 5448 5449 if (off_filter_mask & 5450 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) 5451 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); 5452 5453 if (off_filter_mask & 5454 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { 5455 size = rtnl_offload_xstats_get_size_stats(dev, t_l3); 5456 nla_size += nla_total_size_64bit(size); 5457 } 5458 5459 if (nla_size != 0) 5460 nla_size += nla_total_size(0); 5461 5462 return nla_size; 5463 } 5464 5465 struct rtnl_stats_dump_filters { 5466 /* mask[0] filters outer attributes. Then individual nests have their 5467 * filtering mask at the index of the nested attribute. 5468 */ 5469 u32 mask[IFLA_STATS_MAX + 1]; 5470 }; 5471 5472 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 5473 int type, u32 pid, u32 seq, u32 change, 5474 unsigned int flags, 5475 const struct rtnl_stats_dump_filters *filters, 5476 int *idxattr, int *prividx, 5477 struct netlink_ext_ack *extack) 5478 { 5479 unsigned int filter_mask = filters->mask[0]; 5480 struct if_stats_msg *ifsm; 5481 struct nlmsghdr *nlh; 5482 struct nlattr *attr; 5483 int s_prividx = *prividx; 5484 int err; 5485 5486 ASSERT_RTNL(); 5487 5488 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 5489 if (!nlh) 5490 return -EMSGSIZE; 5491 5492 ifsm = nlmsg_data(nlh); 5493 ifsm->family = PF_UNSPEC; 5494 ifsm->pad1 = 0; 5495 ifsm->pad2 = 0; 5496 ifsm->ifindex = dev->ifindex; 5497 ifsm->filter_mask = filter_mask; 5498 5499 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 5500 struct rtnl_link_stats64 *sp; 5501 5502 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 5503 sizeof(struct rtnl_link_stats64), 5504 IFLA_STATS_UNSPEC); 5505 if (!attr) { 5506 err = -EMSGSIZE; 5507 goto nla_put_failure; 5508 } 5509 5510 sp = nla_data(attr); 5511 dev_get_stats(dev, sp); 5512 } 5513 5514 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 5515 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5516 5517 if (ops && ops->fill_linkxstats) { 5518 *idxattr = IFLA_STATS_LINK_XSTATS; 5519 attr = nla_nest_start_noflag(skb, 5520 IFLA_STATS_LINK_XSTATS); 5521 if (!attr) { 5522 err = -EMSGSIZE; 5523 goto nla_put_failure; 5524 } 5525 5526 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5527 nla_nest_end(skb, attr); 5528 if (err) 5529 goto nla_put_failure; 5530 *idxattr = 0; 5531 } 5532 } 5533 5534 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 5535 *idxattr)) { 5536 const struct rtnl_link_ops *ops = NULL; 5537 const struct net_device *master; 5538 5539 master = netdev_master_upper_dev_get(dev); 5540 if (master) 5541 ops = master->rtnl_link_ops; 5542 if (ops && ops->fill_linkxstats) { 5543 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 5544 attr = nla_nest_start_noflag(skb, 5545 IFLA_STATS_LINK_XSTATS_SLAVE); 5546 if (!attr) { 5547 err = -EMSGSIZE; 5548 goto nla_put_failure; 5549 } 5550 5551 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 5552 nla_nest_end(skb, attr); 5553 if (err) 5554 goto nla_put_failure; 5555 *idxattr = 0; 5556 } 5557 } 5558 5559 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 5560 *idxattr)) { 5561 u32 off_filter_mask; 5562 5563 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5564 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 5565 attr = nla_nest_start_noflag(skb, 5566 IFLA_STATS_LINK_OFFLOAD_XSTATS); 5567 if (!attr) { 5568 err = -EMSGSIZE; 5569 goto nla_put_failure; 5570 } 5571 5572 err = rtnl_offload_xstats_fill(skb, dev, prividx, 5573 off_filter_mask, extack); 5574 if (err == -ENODATA) 5575 nla_nest_cancel(skb, attr); 5576 else 5577 nla_nest_end(skb, attr); 5578 5579 if (err && err != -ENODATA) 5580 goto nla_put_failure; 5581 *idxattr = 0; 5582 } 5583 5584 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 5585 struct rtnl_af_ops *af_ops; 5586 5587 *idxattr = IFLA_STATS_AF_SPEC; 5588 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC); 5589 if (!attr) { 5590 err = -EMSGSIZE; 5591 goto nla_put_failure; 5592 } 5593 5594 rcu_read_lock(); 5595 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5596 if (af_ops->fill_stats_af) { 5597 struct nlattr *af; 5598 5599 af = nla_nest_start_noflag(skb, 5600 af_ops->family); 5601 if (!af) { 5602 rcu_read_unlock(); 5603 err = -EMSGSIZE; 5604 goto nla_put_failure; 5605 } 5606 err = af_ops->fill_stats_af(skb, dev); 5607 5608 if (err == -ENODATA) { 5609 nla_nest_cancel(skb, af); 5610 } else if (err < 0) { 5611 rcu_read_unlock(); 5612 goto nla_put_failure; 5613 } 5614 5615 nla_nest_end(skb, af); 5616 } 5617 } 5618 rcu_read_unlock(); 5619 5620 nla_nest_end(skb, attr); 5621 5622 *idxattr = 0; 5623 } 5624 5625 nlmsg_end(skb, nlh); 5626 5627 return 0; 5628 5629 nla_put_failure: 5630 /* not a multi message or no progress mean a real error */ 5631 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 5632 nlmsg_cancel(skb, nlh); 5633 else 5634 nlmsg_end(skb, nlh); 5635 5636 return err; 5637 } 5638 5639 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5640 const struct rtnl_stats_dump_filters *filters) 5641 { 5642 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5643 unsigned int filter_mask = filters->mask[0]; 5644 5645 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5646 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 5647 5648 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 5649 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 5650 int attr = IFLA_STATS_LINK_XSTATS; 5651 5652 if (ops && ops->get_linkxstats_size) { 5653 size += nla_total_size(ops->get_linkxstats_size(dev, 5654 attr)); 5655 /* for IFLA_STATS_LINK_XSTATS */ 5656 size += nla_total_size(0); 5657 } 5658 } 5659 5660 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 5661 struct net_device *_dev = (struct net_device *)dev; 5662 const struct rtnl_link_ops *ops = NULL; 5663 const struct net_device *master; 5664 5665 /* netdev_master_upper_dev_get can't take const */ 5666 master = netdev_master_upper_dev_get(_dev); 5667 if (master) 5668 ops = master->rtnl_link_ops; 5669 if (ops && ops->get_linkxstats_size) { 5670 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 5671 5672 size += nla_total_size(ops->get_linkxstats_size(dev, 5673 attr)); 5674 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 5675 size += nla_total_size(0); 5676 } 5677 } 5678 5679 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { 5680 u32 off_filter_mask; 5681 5682 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; 5683 size += rtnl_offload_xstats_get_size(dev, off_filter_mask); 5684 } 5685 5686 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 5687 struct rtnl_af_ops *af_ops; 5688 5689 /* for IFLA_STATS_AF_SPEC */ 5690 size += nla_total_size(0); 5691 5692 rcu_read_lock(); 5693 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 5694 if (af_ops->get_stats_af_size) { 5695 size += nla_total_size( 5696 af_ops->get_stats_af_size(dev)); 5697 5698 /* for AF_* */ 5699 size += nla_total_size(0); 5700 } 5701 } 5702 rcu_read_unlock(); 5703 } 5704 5705 return size; 5706 } 5707 5708 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) 5709 5710 static const struct nla_policy 5711 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { 5712 [IFLA_STATS_LINK_OFFLOAD_XSTATS] = 5713 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), 5714 }; 5715 5716 static const struct nla_policy 5717 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { 5718 [IFLA_STATS_GET_FILTERS] = 5719 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), 5720 }; 5721 5722 static const struct nla_policy 5723 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { 5724 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), 5725 }; 5726 5727 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, 5728 struct rtnl_stats_dump_filters *filters, 5729 struct netlink_ext_ack *extack) 5730 { 5731 struct nlattr *tb[IFLA_STATS_MAX + 1]; 5732 int err; 5733 int at; 5734 5735 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, 5736 rtnl_stats_get_policy_filters, extack); 5737 if (err < 0) 5738 return err; 5739 5740 for (at = 1; at <= IFLA_STATS_MAX; at++) { 5741 if (tb[at]) { 5742 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { 5743 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); 5744 return -EINVAL; 5745 } 5746 filters->mask[at] = nla_get_u32(tb[at]); 5747 } 5748 } 5749 5750 return 0; 5751 } 5752 5753 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, 5754 u32 filter_mask, 5755 struct rtnl_stats_dump_filters *filters, 5756 struct netlink_ext_ack *extack) 5757 { 5758 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5759 int err; 5760 int i; 5761 5762 filters->mask[0] = filter_mask; 5763 for (i = 1; i < ARRAY_SIZE(filters->mask); i++) 5764 filters->mask[i] = -1U; 5765 5766 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, 5767 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); 5768 if (err < 0) 5769 return err; 5770 5771 if (tb[IFLA_STATS_GET_FILTERS]) { 5772 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], 5773 filters, extack); 5774 if (err) 5775 return err; 5776 } 5777 5778 return 0; 5779 } 5780 5781 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, 5782 bool is_dump, struct netlink_ext_ack *extack) 5783 { 5784 struct if_stats_msg *ifsm; 5785 5786 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { 5787 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 5788 return -EINVAL; 5789 } 5790 5791 if (!strict_check) 5792 return 0; 5793 5794 ifsm = nlmsg_data(nlh); 5795 5796 /* only requests using strict checks can pass data to influence 5797 * the dump. The legacy exception is filter_mask. 5798 */ 5799 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) { 5800 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); 5801 return -EINVAL; 5802 } 5803 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { 5804 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); 5805 return -EINVAL; 5806 } 5807 5808 return 0; 5809 } 5810 5811 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 5812 struct netlink_ext_ack *extack) 5813 { 5814 struct rtnl_stats_dump_filters filters; 5815 struct net *net = sock_net(skb->sk); 5816 struct net_device *dev = NULL; 5817 int idxattr = 0, prividx = 0; 5818 struct if_stats_msg *ifsm; 5819 struct sk_buff *nskb; 5820 int err; 5821 5822 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5823 false, extack); 5824 if (err) 5825 return err; 5826 5827 ifsm = nlmsg_data(nlh); 5828 if (ifsm->ifindex > 0) 5829 dev = __dev_get_by_index(net, ifsm->ifindex); 5830 else 5831 return -EINVAL; 5832 5833 if (!dev) 5834 return -ENODEV; 5835 5836 if (!ifsm->filter_mask) { 5837 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); 5838 return -EINVAL; 5839 } 5840 5841 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); 5842 if (err) 5843 return err; 5844 5845 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); 5846 if (!nskb) 5847 return -ENOBUFS; 5848 5849 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 5850 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 5851 0, &filters, &idxattr, &prividx, extack); 5852 if (err < 0) { 5853 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 5854 WARN_ON(err == -EMSGSIZE); 5855 kfree_skb(nskb); 5856 } else { 5857 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 5858 } 5859 5860 return err; 5861 } 5862 5863 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 5864 { 5865 struct netlink_ext_ack *extack = cb->extack; 5866 int h, s_h, err, s_idx, s_idxattr, s_prividx; 5867 struct rtnl_stats_dump_filters filters; 5868 struct net *net = sock_net(skb->sk); 5869 unsigned int flags = NLM_F_MULTI; 5870 struct if_stats_msg *ifsm; 5871 struct hlist_head *head; 5872 struct net_device *dev; 5873 int idx = 0; 5874 5875 s_h = cb->args[0]; 5876 s_idx = cb->args[1]; 5877 s_idxattr = cb->args[2]; 5878 s_prividx = cb->args[3]; 5879 5880 cb->seq = net->dev_base_seq; 5881 5882 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack); 5883 if (err) 5884 return err; 5885 5886 ifsm = nlmsg_data(cb->nlh); 5887 if (!ifsm->filter_mask) { 5888 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); 5889 return -EINVAL; 5890 } 5891 5892 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, 5893 extack); 5894 if (err) 5895 return err; 5896 5897 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 5898 idx = 0; 5899 head = &net->dev_index_head[h]; 5900 hlist_for_each_entry(dev, head, index_hlist) { 5901 if (idx < s_idx) 5902 goto cont; 5903 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 5904 NETLINK_CB(cb->skb).portid, 5905 cb->nlh->nlmsg_seq, 0, 5906 flags, &filters, 5907 &s_idxattr, &s_prividx, 5908 extack); 5909 /* If we ran out of room on the first message, 5910 * we're in trouble 5911 */ 5912 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 5913 5914 if (err < 0) 5915 goto out; 5916 s_prividx = 0; 5917 s_idxattr = 0; 5918 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 5919 cont: 5920 idx++; 5921 } 5922 } 5923 out: 5924 cb->args[3] = s_prividx; 5925 cb->args[2] = s_idxattr; 5926 cb->args[1] = idx; 5927 cb->args[0] = h; 5928 5929 return skb->len; 5930 } 5931 5932 void rtnl_offload_xstats_notify(struct net_device *dev) 5933 { 5934 struct rtnl_stats_dump_filters response_filters = {}; 5935 struct net *net = dev_net(dev); 5936 int idxattr = 0, prividx = 0; 5937 struct sk_buff *skb; 5938 int err = -ENOBUFS; 5939 5940 ASSERT_RTNL(); 5941 5942 response_filters.mask[0] |= 5943 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 5944 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 5945 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 5946 5947 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), 5948 GFP_KERNEL); 5949 if (!skb) 5950 goto errout; 5951 5952 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, 5953 &response_filters, &idxattr, &prividx, NULL); 5954 if (err < 0) { 5955 kfree_skb(skb); 5956 goto errout; 5957 } 5958 5959 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); 5960 return; 5961 5962 errout: 5963 rtnl_set_sk_err(net, RTNLGRP_STATS, err); 5964 } 5965 EXPORT_SYMBOL(rtnl_offload_xstats_notify); 5966 5967 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, 5968 struct netlink_ext_ack *extack) 5969 { 5970 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; 5971 struct rtnl_stats_dump_filters response_filters = {}; 5972 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; 5973 struct net *net = sock_net(skb->sk); 5974 struct net_device *dev = NULL; 5975 struct if_stats_msg *ifsm; 5976 bool notify = false; 5977 int err; 5978 5979 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), 5980 false, extack); 5981 if (err) 5982 return err; 5983 5984 ifsm = nlmsg_data(nlh); 5985 if (ifsm->family != AF_UNSPEC) { 5986 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); 5987 return -EINVAL; 5988 } 5989 5990 if (ifsm->ifindex > 0) 5991 dev = __dev_get_by_index(net, ifsm->ifindex); 5992 else 5993 return -EINVAL; 5994 5995 if (!dev) 5996 return -ENODEV; 5997 5998 if (ifsm->filter_mask) { 5999 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); 6000 return -EINVAL; 6001 } 6002 6003 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, 6004 ifla_stats_set_policy, extack); 6005 if (err < 0) 6006 return err; 6007 6008 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { 6009 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); 6010 6011 if (req) 6012 err = netdev_offload_xstats_enable(dev, t_l3, extack); 6013 else 6014 err = netdev_offload_xstats_disable(dev, t_l3); 6015 6016 if (!err) 6017 notify = true; 6018 else if (err != -EALREADY) 6019 return err; 6020 6021 response_filters.mask[0] |= 6022 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); 6023 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= 6024 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); 6025 } 6026 6027 if (notify) 6028 rtnl_offload_xstats_notify(dev); 6029 6030 return 0; 6031 } 6032 6033 /* Process one rtnetlink message. */ 6034 6035 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 6036 struct netlink_ext_ack *extack) 6037 { 6038 struct net *net = sock_net(skb->sk); 6039 struct rtnl_link *link; 6040 enum rtnl_kinds kind; 6041 struct module *owner; 6042 int err = -EOPNOTSUPP; 6043 rtnl_doit_func doit; 6044 unsigned int flags; 6045 int family; 6046 int type; 6047 6048 type = nlh->nlmsg_type; 6049 if (type > RTM_MAX) 6050 return -EOPNOTSUPP; 6051 6052 type -= RTM_BASE; 6053 6054 /* All the messages must have at least 1 byte length */ 6055 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 6056 return 0; 6057 6058 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 6059 kind = rtnl_msgtype_kind(type); 6060 6061 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN)) 6062 return -EPERM; 6063 6064 rcu_read_lock(); 6065 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) { 6066 struct sock *rtnl; 6067 rtnl_dumpit_func dumpit; 6068 u32 min_dump_alloc = 0; 6069 6070 link = rtnl_get_link(family, type); 6071 if (!link || !link->dumpit) { 6072 family = PF_UNSPEC; 6073 link = rtnl_get_link(family, type); 6074 if (!link || !link->dumpit) 6075 goto err_unlock; 6076 } 6077 owner = link->owner; 6078 dumpit = link->dumpit; 6079 6080 if (type == RTM_GETLINK - RTM_BASE) 6081 min_dump_alloc = rtnl_calcit(skb, nlh); 6082 6083 err = 0; 6084 /* need to do this before rcu_read_unlock() */ 6085 if (!try_module_get(owner)) 6086 err = -EPROTONOSUPPORT; 6087 6088 rcu_read_unlock(); 6089 6090 rtnl = net->rtnl; 6091 if (err == 0) { 6092 struct netlink_dump_control c = { 6093 .dump = dumpit, 6094 .min_dump_alloc = min_dump_alloc, 6095 .module = owner, 6096 }; 6097 err = netlink_dump_start(rtnl, skb, nlh, &c); 6098 /* netlink_dump_start() will keep a reference on 6099 * module if dump is still in progress. 6100 */ 6101 module_put(owner); 6102 } 6103 return err; 6104 } 6105 6106 link = rtnl_get_link(family, type); 6107 if (!link || !link->doit) { 6108 family = PF_UNSPEC; 6109 link = rtnl_get_link(PF_UNSPEC, type); 6110 if (!link || !link->doit) 6111 goto out_unlock; 6112 } 6113 6114 owner = link->owner; 6115 if (!try_module_get(owner)) { 6116 err = -EPROTONOSUPPORT; 6117 goto out_unlock; 6118 } 6119 6120 flags = link->flags; 6121 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && 6122 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { 6123 NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); 6124 module_put(owner); 6125 goto err_unlock; 6126 } 6127 6128 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 6129 doit = link->doit; 6130 rcu_read_unlock(); 6131 if (doit) 6132 err = doit(skb, nlh, extack); 6133 module_put(owner); 6134 return err; 6135 } 6136 rcu_read_unlock(); 6137 6138 rtnl_lock(); 6139 link = rtnl_get_link(family, type); 6140 if (link && link->doit) 6141 err = link->doit(skb, nlh, extack); 6142 rtnl_unlock(); 6143 6144 module_put(owner); 6145 6146 return err; 6147 6148 out_unlock: 6149 rcu_read_unlock(); 6150 return err; 6151 6152 err_unlock: 6153 rcu_read_unlock(); 6154 return -EOPNOTSUPP; 6155 } 6156 6157 static void rtnetlink_rcv(struct sk_buff *skb) 6158 { 6159 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 6160 } 6161 6162 static int rtnetlink_bind(struct net *net, int group) 6163 { 6164 switch (group) { 6165 case RTNLGRP_IPV4_MROUTE_R: 6166 case RTNLGRP_IPV6_MROUTE_R: 6167 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 6168 return -EPERM; 6169 break; 6170 } 6171 return 0; 6172 } 6173 6174 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 6175 { 6176 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6177 6178 switch (event) { 6179 case NETDEV_REBOOT: 6180 case NETDEV_CHANGEMTU: 6181 case NETDEV_CHANGEADDR: 6182 case NETDEV_CHANGENAME: 6183 case NETDEV_FEAT_CHANGE: 6184 case NETDEV_BONDING_FAILOVER: 6185 case NETDEV_POST_TYPE_CHANGE: 6186 case NETDEV_NOTIFY_PEERS: 6187 case NETDEV_CHANGEUPPER: 6188 case NETDEV_RESEND_IGMP: 6189 case NETDEV_CHANGEINFODATA: 6190 case NETDEV_CHANGELOWERSTATE: 6191 case NETDEV_CHANGE_TX_QUEUE_LEN: 6192 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 6193 GFP_KERNEL, NULL, 0, 0, NULL); 6194 break; 6195 default: 6196 break; 6197 } 6198 return NOTIFY_DONE; 6199 } 6200 6201 static struct notifier_block rtnetlink_dev_notifier = { 6202 .notifier_call = rtnetlink_event, 6203 }; 6204 6205 6206 static int __net_init rtnetlink_net_init(struct net *net) 6207 { 6208 struct sock *sk; 6209 struct netlink_kernel_cfg cfg = { 6210 .groups = RTNLGRP_MAX, 6211 .input = rtnetlink_rcv, 6212 .cb_mutex = &rtnl_mutex, 6213 .flags = NL_CFG_F_NONROOT_RECV, 6214 .bind = rtnetlink_bind, 6215 }; 6216 6217 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 6218 if (!sk) 6219 return -ENOMEM; 6220 net->rtnl = sk; 6221 return 0; 6222 } 6223 6224 static void __net_exit rtnetlink_net_exit(struct net *net) 6225 { 6226 netlink_kernel_release(net->rtnl); 6227 net->rtnl = NULL; 6228 } 6229 6230 static struct pernet_operations rtnetlink_net_ops = { 6231 .init = rtnetlink_net_init, 6232 .exit = rtnetlink_net_exit, 6233 }; 6234 6235 void __init rtnetlink_init(void) 6236 { 6237 if (register_pernet_subsys(&rtnetlink_net_ops)) 6238 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 6239 6240 register_netdevice_notifier(&rtnetlink_dev_notifier); 6241 6242 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 6243 rtnl_dump_ifinfo, 0); 6244 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 6245 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 6246 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 6247 6248 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 6249 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 6250 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 6251 6252 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0); 6253 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0); 6254 6255 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 6256 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 6257 RTNL_FLAG_BULK_DEL_SUPPORTED); 6258 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0); 6259 6260 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 6261 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 6262 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 6263 6264 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 6265 0); 6266 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); 6267 } 6268