1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Routing netlink socket interface: protocol independent part. 7 * 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 * 15 * Fixes: 16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong. 17 */ 18 19 #include <linux/bitops.h> 20 #include <linux/errno.h> 21 #include <linux/module.h> 22 #include <linux/types.h> 23 #include <linux/socket.h> 24 #include <linux/kernel.h> 25 #include <linux/timer.h> 26 #include <linux/string.h> 27 #include <linux/sockios.h> 28 #include <linux/net.h> 29 #include <linux/fcntl.h> 30 #include <linux/mm.h> 31 #include <linux/slab.h> 32 #include <linux/interrupt.h> 33 #include <linux/capability.h> 34 #include <linux/skbuff.h> 35 #include <linux/init.h> 36 #include <linux/security.h> 37 #include <linux/mutex.h> 38 #include <linux/if_addr.h> 39 #include <linux/if_bridge.h> 40 #include <linux/if_vlan.h> 41 #include <linux/pci.h> 42 #include <linux/etherdevice.h> 43 #include <linux/bpf.h> 44 45 #include <linux/uaccess.h> 46 47 #include <linux/inet.h> 48 #include <linux/netdevice.h> 49 #include <net/switchdev.h> 50 #include <net/ip.h> 51 #include <net/protocol.h> 52 #include <net/arp.h> 53 #include <net/route.h> 54 #include <net/udp.h> 55 #include <net/tcp.h> 56 #include <net/sock.h> 57 #include <net/pkt_sched.h> 58 #include <net/fib_rules.h> 59 #include <net/rtnetlink.h> 60 #include <net/net_namespace.h> 61 62 struct rtnl_link { 63 rtnl_doit_func doit; 64 rtnl_dumpit_func dumpit; 65 struct module *owner; 66 unsigned int flags; 67 struct rcu_head rcu; 68 }; 69 70 static DEFINE_MUTEX(rtnl_mutex); 71 72 void rtnl_lock(void) 73 { 74 mutex_lock(&rtnl_mutex); 75 } 76 EXPORT_SYMBOL(rtnl_lock); 77 78 int rtnl_lock_killable(void) 79 { 80 return mutex_lock_killable(&rtnl_mutex); 81 } 82 EXPORT_SYMBOL(rtnl_lock_killable); 83 84 static struct sk_buff *defer_kfree_skb_list; 85 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) 86 { 87 if (head && tail) { 88 tail->next = defer_kfree_skb_list; 89 defer_kfree_skb_list = head; 90 } 91 } 92 EXPORT_SYMBOL(rtnl_kfree_skbs); 93 94 void __rtnl_unlock(void) 95 { 96 struct sk_buff *head = defer_kfree_skb_list; 97 98 defer_kfree_skb_list = NULL; 99 100 mutex_unlock(&rtnl_mutex); 101 102 while (head) { 103 struct sk_buff *next = head->next; 104 105 kfree_skb(head); 106 cond_resched(); 107 head = next; 108 } 109 } 110 111 void rtnl_unlock(void) 112 { 113 /* This fellow will unlock it for us. */ 114 netdev_run_todo(); 115 } 116 EXPORT_SYMBOL(rtnl_unlock); 117 118 int rtnl_trylock(void) 119 { 120 return mutex_trylock(&rtnl_mutex); 121 } 122 EXPORT_SYMBOL(rtnl_trylock); 123 124 int rtnl_is_locked(void) 125 { 126 return mutex_is_locked(&rtnl_mutex); 127 } 128 EXPORT_SYMBOL(rtnl_is_locked); 129 130 #ifdef CONFIG_PROVE_LOCKING 131 bool lockdep_rtnl_is_held(void) 132 { 133 return lockdep_is_held(&rtnl_mutex); 134 } 135 EXPORT_SYMBOL(lockdep_rtnl_is_held); 136 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 137 138 static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; 139 140 static inline int rtm_msgindex(int msgtype) 141 { 142 int msgindex = msgtype - RTM_BASE; 143 144 /* 145 * msgindex < 0 implies someone tried to register a netlink 146 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that 147 * the message type has not been added to linux/rtnetlink.h 148 */ 149 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); 150 151 return msgindex; 152 } 153 154 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) 155 { 156 struct rtnl_link **tab; 157 158 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) 159 protocol = PF_UNSPEC; 160 161 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]); 162 if (!tab) 163 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); 164 165 return tab[msgtype]; 166 } 167 168 static int rtnl_register_internal(struct module *owner, 169 int protocol, int msgtype, 170 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 171 unsigned int flags) 172 { 173 struct rtnl_link *link, *old; 174 struct rtnl_link __rcu **tab; 175 int msgindex; 176 int ret = -ENOBUFS; 177 178 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 179 msgindex = rtm_msgindex(msgtype); 180 181 rtnl_lock(); 182 tab = rtnl_msg_handlers[protocol]; 183 if (tab == NULL) { 184 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); 185 if (!tab) 186 goto unlock; 187 188 /* ensures we see the 0 stores */ 189 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); 190 } 191 192 old = rtnl_dereference(tab[msgindex]); 193 if (old) { 194 link = kmemdup(old, sizeof(*old), GFP_KERNEL); 195 if (!link) 196 goto unlock; 197 } else { 198 link = kzalloc(sizeof(*link), GFP_KERNEL); 199 if (!link) 200 goto unlock; 201 } 202 203 WARN_ON(link->owner && link->owner != owner); 204 link->owner = owner; 205 206 WARN_ON(doit && link->doit && link->doit != doit); 207 if (doit) 208 link->doit = doit; 209 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit); 210 if (dumpit) 211 link->dumpit = dumpit; 212 213 link->flags |= flags; 214 215 /* publish protocol:msgtype */ 216 rcu_assign_pointer(tab[msgindex], link); 217 ret = 0; 218 if (old) 219 kfree_rcu(old, rcu); 220 unlock: 221 rtnl_unlock(); 222 return ret; 223 } 224 225 /** 226 * rtnl_register_module - Register a rtnetlink message type 227 * 228 * @owner: module registering the hook (THIS_MODULE) 229 * @protocol: Protocol family or PF_UNSPEC 230 * @msgtype: rtnetlink message type 231 * @doit: Function pointer called for each request message 232 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 233 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions 234 * 235 * Like rtnl_register, but for use by removable modules. 236 */ 237 int rtnl_register_module(struct module *owner, 238 int protocol, int msgtype, 239 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 240 unsigned int flags) 241 { 242 return rtnl_register_internal(owner, protocol, msgtype, 243 doit, dumpit, flags); 244 } 245 EXPORT_SYMBOL_GPL(rtnl_register_module); 246 247 /** 248 * rtnl_register - Register a rtnetlink message type 249 * @protocol: Protocol family or PF_UNSPEC 250 * @msgtype: rtnetlink message type 251 * @doit: Function pointer called for each request message 252 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 253 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions 254 * 255 * Registers the specified function pointers (at least one of them has 256 * to be non-NULL) to be called whenever a request message for the 257 * specified protocol family and message type is received. 258 * 259 * The special protocol family PF_UNSPEC may be used to define fallback 260 * function pointers for the case when no entry for the specific protocol 261 * family exists. 262 */ 263 void rtnl_register(int protocol, int msgtype, 264 rtnl_doit_func doit, rtnl_dumpit_func dumpit, 265 unsigned int flags) 266 { 267 int err; 268 269 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit, 270 flags); 271 if (err) 272 pr_err("Unable to register rtnetlink message handler, " 273 "protocol = %d, message type = %d\n", protocol, msgtype); 274 } 275 276 /** 277 * rtnl_unregister - Unregister a rtnetlink message type 278 * @protocol: Protocol family or PF_UNSPEC 279 * @msgtype: rtnetlink message type 280 * 281 * Returns 0 on success or a negative error code. 282 */ 283 int rtnl_unregister(int protocol, int msgtype) 284 { 285 struct rtnl_link **tab, *link; 286 int msgindex; 287 288 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 289 msgindex = rtm_msgindex(msgtype); 290 291 rtnl_lock(); 292 tab = rtnl_dereference(rtnl_msg_handlers[protocol]); 293 if (!tab) { 294 rtnl_unlock(); 295 return -ENOENT; 296 } 297 298 link = tab[msgindex]; 299 rcu_assign_pointer(tab[msgindex], NULL); 300 rtnl_unlock(); 301 302 kfree_rcu(link, rcu); 303 304 return 0; 305 } 306 EXPORT_SYMBOL_GPL(rtnl_unregister); 307 308 /** 309 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol 310 * @protocol : Protocol family or PF_UNSPEC 311 * 312 * Identical to calling rtnl_unregster() for all registered message types 313 * of a certain protocol family. 314 */ 315 void rtnl_unregister_all(int protocol) 316 { 317 struct rtnl_link **tab, *link; 318 int msgindex; 319 320 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); 321 322 rtnl_lock(); 323 tab = rtnl_msg_handlers[protocol]; 324 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); 325 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 326 link = tab[msgindex]; 327 if (!link) 328 continue; 329 330 rcu_assign_pointer(tab[msgindex], NULL); 331 kfree_rcu(link, rcu); 332 } 333 rtnl_unlock(); 334 335 synchronize_net(); 336 337 kfree(tab); 338 } 339 EXPORT_SYMBOL_GPL(rtnl_unregister_all); 340 341 static LIST_HEAD(link_ops); 342 343 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) 344 { 345 const struct rtnl_link_ops *ops; 346 347 list_for_each_entry(ops, &link_ops, list) { 348 if (!strcmp(ops->kind, kind)) 349 return ops; 350 } 351 return NULL; 352 } 353 354 /** 355 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink. 356 * @ops: struct rtnl_link_ops * to register 357 * 358 * The caller must hold the rtnl_mutex. This function should be used 359 * by drivers that create devices during module initialization. It 360 * must be called before registering the devices. 361 * 362 * Returns 0 on success or a negative error code. 363 */ 364 int __rtnl_link_register(struct rtnl_link_ops *ops) 365 { 366 if (rtnl_link_ops_get(ops->kind)) 367 return -EEXIST; 368 369 /* The check for setup is here because if ops 370 * does not have that filled up, it is not possible 371 * to use the ops for creating device. So do not 372 * fill up dellink as well. That disables rtnl_dellink. 373 */ 374 if (ops->setup && !ops->dellink) 375 ops->dellink = unregister_netdevice_queue; 376 377 list_add_tail(&ops->list, &link_ops); 378 return 0; 379 } 380 EXPORT_SYMBOL_GPL(__rtnl_link_register); 381 382 /** 383 * rtnl_link_register - Register rtnl_link_ops with rtnetlink. 384 * @ops: struct rtnl_link_ops * to register 385 * 386 * Returns 0 on success or a negative error code. 387 */ 388 int rtnl_link_register(struct rtnl_link_ops *ops) 389 { 390 int err; 391 392 rtnl_lock(); 393 err = __rtnl_link_register(ops); 394 rtnl_unlock(); 395 return err; 396 } 397 EXPORT_SYMBOL_GPL(rtnl_link_register); 398 399 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) 400 { 401 struct net_device *dev; 402 LIST_HEAD(list_kill); 403 404 for_each_netdev(net, dev) { 405 if (dev->rtnl_link_ops == ops) 406 ops->dellink(dev, &list_kill); 407 } 408 unregister_netdevice_many(&list_kill); 409 } 410 411 /** 412 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 413 * @ops: struct rtnl_link_ops * to unregister 414 * 415 * The caller must hold the rtnl_mutex and guarantee net_namespace_list 416 * integrity (hold pernet_ops_rwsem for writing to close the race 417 * with setup_net() and cleanup_net()). 418 */ 419 void __rtnl_link_unregister(struct rtnl_link_ops *ops) 420 { 421 struct net *net; 422 423 for_each_net(net) { 424 __rtnl_kill_links(net, ops); 425 } 426 list_del(&ops->list); 427 } 428 EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 429 430 /* Return with the rtnl_lock held when there are no network 431 * devices unregistering in any network namespace. 432 */ 433 static void rtnl_lock_unregistering_all(void) 434 { 435 struct net *net; 436 bool unregistering; 437 DEFINE_WAIT_FUNC(wait, woken_wake_function); 438 439 add_wait_queue(&netdev_unregistering_wq, &wait); 440 for (;;) { 441 unregistering = false; 442 rtnl_lock(); 443 /* We held write locked pernet_ops_rwsem, and parallel 444 * setup_net() and cleanup_net() are not possible. 445 */ 446 for_each_net(net) { 447 if (net->dev_unreg_count > 0) { 448 unregistering = true; 449 break; 450 } 451 } 452 if (!unregistering) 453 break; 454 __rtnl_unlock(); 455 456 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 457 } 458 remove_wait_queue(&netdev_unregistering_wq, &wait); 459 } 460 461 /** 462 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 463 * @ops: struct rtnl_link_ops * to unregister 464 */ 465 void rtnl_link_unregister(struct rtnl_link_ops *ops) 466 { 467 /* Close the race with setup_net() and cleanup_net() */ 468 down_write(&pernet_ops_rwsem); 469 rtnl_lock_unregistering_all(); 470 __rtnl_link_unregister(ops); 471 rtnl_unlock(); 472 up_write(&pernet_ops_rwsem); 473 } 474 EXPORT_SYMBOL_GPL(rtnl_link_unregister); 475 476 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) 477 { 478 struct net_device *master_dev; 479 const struct rtnl_link_ops *ops; 480 size_t size = 0; 481 482 rcu_read_lock(); 483 484 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 485 if (!master_dev) 486 goto out; 487 488 ops = master_dev->rtnl_link_ops; 489 if (!ops || !ops->get_slave_size) 490 goto out; 491 /* IFLA_INFO_SLAVE_DATA + nested data */ 492 size = nla_total_size(sizeof(struct nlattr)) + 493 ops->get_slave_size(master_dev, dev); 494 495 out: 496 rcu_read_unlock(); 497 return size; 498 } 499 500 static size_t rtnl_link_get_size(const struct net_device *dev) 501 { 502 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 503 size_t size; 504 505 if (!ops) 506 return 0; 507 508 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 509 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 510 511 if (ops->get_size) 512 /* IFLA_INFO_DATA + nested data */ 513 size += nla_total_size(sizeof(struct nlattr)) + 514 ops->get_size(dev); 515 516 if (ops->get_xstats_size) 517 /* IFLA_INFO_XSTATS */ 518 size += nla_total_size(ops->get_xstats_size(dev)); 519 520 size += rtnl_link_get_slave_info_data_size(dev); 521 522 return size; 523 } 524 525 static LIST_HEAD(rtnl_af_ops); 526 527 static const struct rtnl_af_ops *rtnl_af_lookup(const int family) 528 { 529 const struct rtnl_af_ops *ops; 530 531 list_for_each_entry_rcu(ops, &rtnl_af_ops, list) { 532 if (ops->family == family) 533 return ops; 534 } 535 536 return NULL; 537 } 538 539 /** 540 * rtnl_af_register - Register rtnl_af_ops with rtnetlink. 541 * @ops: struct rtnl_af_ops * to register 542 * 543 * Returns 0 on success or a negative error code. 544 */ 545 void rtnl_af_register(struct rtnl_af_ops *ops) 546 { 547 rtnl_lock(); 548 list_add_tail_rcu(&ops->list, &rtnl_af_ops); 549 rtnl_unlock(); 550 } 551 EXPORT_SYMBOL_GPL(rtnl_af_register); 552 553 /** 554 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink. 555 * @ops: struct rtnl_af_ops * to unregister 556 */ 557 void rtnl_af_unregister(struct rtnl_af_ops *ops) 558 { 559 rtnl_lock(); 560 list_del_rcu(&ops->list); 561 rtnl_unlock(); 562 563 synchronize_rcu(); 564 } 565 EXPORT_SYMBOL_GPL(rtnl_af_unregister); 566 567 static size_t rtnl_link_get_af_size(const struct net_device *dev, 568 u32 ext_filter_mask) 569 { 570 struct rtnl_af_ops *af_ops; 571 size_t size; 572 573 /* IFLA_AF_SPEC */ 574 size = nla_total_size(sizeof(struct nlattr)); 575 576 rcu_read_lock(); 577 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 578 if (af_ops->get_link_af_size) { 579 /* AF_* + nested data */ 580 size += nla_total_size(sizeof(struct nlattr)) + 581 af_ops->get_link_af_size(dev, ext_filter_mask); 582 } 583 } 584 rcu_read_unlock(); 585 586 return size; 587 } 588 589 static bool rtnl_have_link_slave_info(const struct net_device *dev) 590 { 591 struct net_device *master_dev; 592 bool ret = false; 593 594 rcu_read_lock(); 595 596 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); 597 if (master_dev && master_dev->rtnl_link_ops) 598 ret = true; 599 rcu_read_unlock(); 600 return ret; 601 } 602 603 static int rtnl_link_slave_info_fill(struct sk_buff *skb, 604 const struct net_device *dev) 605 { 606 struct net_device *master_dev; 607 const struct rtnl_link_ops *ops; 608 struct nlattr *slave_data; 609 int err; 610 611 master_dev = netdev_master_upper_dev_get((struct net_device *) dev); 612 if (!master_dev) 613 return 0; 614 ops = master_dev->rtnl_link_ops; 615 if (!ops) 616 return 0; 617 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0) 618 return -EMSGSIZE; 619 if (ops->fill_slave_info) { 620 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA); 621 if (!slave_data) 622 return -EMSGSIZE; 623 err = ops->fill_slave_info(skb, master_dev, dev); 624 if (err < 0) 625 goto err_cancel_slave_data; 626 nla_nest_end(skb, slave_data); 627 } 628 return 0; 629 630 err_cancel_slave_data: 631 nla_nest_cancel(skb, slave_data); 632 return err; 633 } 634 635 static int rtnl_link_info_fill(struct sk_buff *skb, 636 const struct net_device *dev) 637 { 638 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 639 struct nlattr *data; 640 int err; 641 642 if (!ops) 643 return 0; 644 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0) 645 return -EMSGSIZE; 646 if (ops->fill_xstats) { 647 err = ops->fill_xstats(skb, dev); 648 if (err < 0) 649 return err; 650 } 651 if (ops->fill_info) { 652 data = nla_nest_start(skb, IFLA_INFO_DATA); 653 if (data == NULL) 654 return -EMSGSIZE; 655 err = ops->fill_info(skb, dev); 656 if (err < 0) 657 goto err_cancel_data; 658 nla_nest_end(skb, data); 659 } 660 return 0; 661 662 err_cancel_data: 663 nla_nest_cancel(skb, data); 664 return err; 665 } 666 667 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) 668 { 669 struct nlattr *linkinfo; 670 int err = -EMSGSIZE; 671 672 linkinfo = nla_nest_start(skb, IFLA_LINKINFO); 673 if (linkinfo == NULL) 674 goto out; 675 676 err = rtnl_link_info_fill(skb, dev); 677 if (err < 0) 678 goto err_cancel_link; 679 680 err = rtnl_link_slave_info_fill(skb, dev); 681 if (err < 0) 682 goto err_cancel_link; 683 684 nla_nest_end(skb, linkinfo); 685 return 0; 686 687 err_cancel_link: 688 nla_nest_cancel(skb, linkinfo); 689 out: 690 return err; 691 } 692 693 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 694 { 695 struct sock *rtnl = net->rtnl; 696 int err = 0; 697 698 NETLINK_CB(skb).dst_group = group; 699 if (echo) 700 refcount_inc(&skb->users); 701 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); 702 if (echo) 703 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); 704 return err; 705 } 706 707 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) 708 { 709 struct sock *rtnl = net->rtnl; 710 711 return nlmsg_unicast(rtnl, skb, pid); 712 } 713 EXPORT_SYMBOL(rtnl_unicast); 714 715 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 716 struct nlmsghdr *nlh, gfp_t flags) 717 { 718 struct sock *rtnl = net->rtnl; 719 int report = 0; 720 721 if (nlh) 722 report = nlmsg_report(nlh); 723 724 nlmsg_notify(rtnl, skb, pid, group, report, flags); 725 } 726 EXPORT_SYMBOL(rtnl_notify); 727 728 void rtnl_set_sk_err(struct net *net, u32 group, int error) 729 { 730 struct sock *rtnl = net->rtnl; 731 732 netlink_set_err(rtnl, 0, group, error); 733 } 734 EXPORT_SYMBOL(rtnl_set_sk_err); 735 736 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) 737 { 738 struct nlattr *mx; 739 int i, valid = 0; 740 741 mx = nla_nest_start(skb, RTA_METRICS); 742 if (mx == NULL) 743 return -ENOBUFS; 744 745 for (i = 0; i < RTAX_MAX; i++) { 746 if (metrics[i]) { 747 if (i == RTAX_CC_ALGO - 1) { 748 char tmp[TCP_CA_NAME_MAX], *name; 749 750 name = tcp_ca_get_name_by_key(metrics[i], tmp); 751 if (!name) 752 continue; 753 if (nla_put_string(skb, i + 1, name)) 754 goto nla_put_failure; 755 } else if (i == RTAX_FEATURES - 1) { 756 u32 user_features = metrics[i] & RTAX_FEATURE_MASK; 757 758 if (!user_features) 759 continue; 760 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK); 761 if (nla_put_u32(skb, i + 1, user_features)) 762 goto nla_put_failure; 763 } else { 764 if (nla_put_u32(skb, i + 1, metrics[i])) 765 goto nla_put_failure; 766 } 767 valid++; 768 } 769 } 770 771 if (!valid) { 772 nla_nest_cancel(skb, mx); 773 return 0; 774 } 775 776 return nla_nest_end(skb, mx); 777 778 nla_put_failure: 779 nla_nest_cancel(skb, mx); 780 return -EMSGSIZE; 781 } 782 EXPORT_SYMBOL(rtnetlink_put_metrics); 783 784 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 785 long expires, u32 error) 786 { 787 struct rta_cacheinfo ci = { 788 .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse), 789 .rta_used = dst->__use, 790 .rta_clntref = atomic_read(&(dst->__refcnt)), 791 .rta_error = error, 792 .rta_id = id, 793 }; 794 795 if (expires) { 796 unsigned long clock; 797 798 clock = jiffies_to_clock_t(abs(expires)); 799 clock = min_t(unsigned long, clock, INT_MAX); 800 ci.rta_expires = (expires > 0) ? clock : -clock; 801 } 802 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); 803 } 804 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 805 806 static void set_operstate(struct net_device *dev, unsigned char transition) 807 { 808 unsigned char operstate = dev->operstate; 809 810 switch (transition) { 811 case IF_OPER_UP: 812 if ((operstate == IF_OPER_DORMANT || 813 operstate == IF_OPER_UNKNOWN) && 814 !netif_dormant(dev)) 815 operstate = IF_OPER_UP; 816 break; 817 818 case IF_OPER_DORMANT: 819 if (operstate == IF_OPER_UP || 820 operstate == IF_OPER_UNKNOWN) 821 operstate = IF_OPER_DORMANT; 822 break; 823 } 824 825 if (dev->operstate != operstate) { 826 write_lock_bh(&dev_base_lock); 827 dev->operstate = operstate; 828 write_unlock_bh(&dev_base_lock); 829 netdev_state_change(dev); 830 } 831 } 832 833 static unsigned int rtnl_dev_get_flags(const struct net_device *dev) 834 { 835 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) | 836 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); 837 } 838 839 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 840 const struct ifinfomsg *ifm) 841 { 842 unsigned int flags = ifm->ifi_flags; 843 844 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 845 if (ifm->ifi_change) 846 flags = (flags & ifm->ifi_change) | 847 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change); 848 849 return flags; 850 } 851 852 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 853 const struct rtnl_link_stats64 *b) 854 { 855 a->rx_packets = b->rx_packets; 856 a->tx_packets = b->tx_packets; 857 a->rx_bytes = b->rx_bytes; 858 a->tx_bytes = b->tx_bytes; 859 a->rx_errors = b->rx_errors; 860 a->tx_errors = b->tx_errors; 861 a->rx_dropped = b->rx_dropped; 862 a->tx_dropped = b->tx_dropped; 863 864 a->multicast = b->multicast; 865 a->collisions = b->collisions; 866 867 a->rx_length_errors = b->rx_length_errors; 868 a->rx_over_errors = b->rx_over_errors; 869 a->rx_crc_errors = b->rx_crc_errors; 870 a->rx_frame_errors = b->rx_frame_errors; 871 a->rx_fifo_errors = b->rx_fifo_errors; 872 a->rx_missed_errors = b->rx_missed_errors; 873 874 a->tx_aborted_errors = b->tx_aborted_errors; 875 a->tx_carrier_errors = b->tx_carrier_errors; 876 a->tx_fifo_errors = b->tx_fifo_errors; 877 a->tx_heartbeat_errors = b->tx_heartbeat_errors; 878 a->tx_window_errors = b->tx_window_errors; 879 880 a->rx_compressed = b->rx_compressed; 881 a->tx_compressed = b->tx_compressed; 882 883 a->rx_nohandler = b->rx_nohandler; 884 } 885 886 /* All VF info */ 887 static inline int rtnl_vfinfo_size(const struct net_device *dev, 888 u32 ext_filter_mask) 889 { 890 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { 891 int num_vfs = dev_num_vf(dev->dev.parent); 892 size_t size = nla_total_size(0); 893 size += num_vfs * 894 (nla_total_size(0) + 895 nla_total_size(sizeof(struct ifla_vf_mac)) + 896 nla_total_size(sizeof(struct ifla_vf_vlan)) + 897 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ 898 nla_total_size(MAX_VLAN_LIST_LEN * 899 sizeof(struct ifla_vf_vlan_info)) + 900 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 901 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 902 nla_total_size(sizeof(struct ifla_vf_rate)) + 903 nla_total_size(sizeof(struct ifla_vf_link_state)) + 904 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 905 nla_total_size(0) + /* nest IFLA_VF_STATS */ 906 /* IFLA_VF_STATS_RX_PACKETS */ 907 nla_total_size_64bit(sizeof(__u64)) + 908 /* IFLA_VF_STATS_TX_PACKETS */ 909 nla_total_size_64bit(sizeof(__u64)) + 910 /* IFLA_VF_STATS_RX_BYTES */ 911 nla_total_size_64bit(sizeof(__u64)) + 912 /* IFLA_VF_STATS_TX_BYTES */ 913 nla_total_size_64bit(sizeof(__u64)) + 914 /* IFLA_VF_STATS_BROADCAST */ 915 nla_total_size_64bit(sizeof(__u64)) + 916 /* IFLA_VF_STATS_MULTICAST */ 917 nla_total_size_64bit(sizeof(__u64)) + 918 /* IFLA_VF_STATS_RX_DROPPED */ 919 nla_total_size_64bit(sizeof(__u64)) + 920 /* IFLA_VF_STATS_TX_DROPPED */ 921 nla_total_size_64bit(sizeof(__u64)) + 922 nla_total_size(sizeof(struct ifla_vf_trust))); 923 return size; 924 } else 925 return 0; 926 } 927 928 static size_t rtnl_port_size(const struct net_device *dev, 929 u32 ext_filter_mask) 930 { 931 size_t port_size = nla_total_size(4) /* PORT_VF */ 932 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 933 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ 934 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ 935 + nla_total_size(1) /* PROT_VDP_REQUEST */ 936 + nla_total_size(2); /* PORT_VDP_RESPONSE */ 937 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr)); 938 size_t vf_port_size = nla_total_size(sizeof(struct nlattr)) 939 + port_size; 940 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 941 + port_size; 942 943 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 944 !(ext_filter_mask & RTEXT_FILTER_VF)) 945 return 0; 946 if (dev_num_vf(dev->dev.parent)) 947 return port_self_size + vf_ports_size + 948 vf_port_size * dev_num_vf(dev->dev.parent); 949 else 950 return port_self_size; 951 } 952 953 static size_t rtnl_xdp_size(void) 954 { 955 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 956 nla_total_size(1) + /* XDP_ATTACHED */ 957 nla_total_size(4); /* XDP_PROG_ID */ 958 959 return xdp_size; 960 } 961 962 static noinline size_t if_nlmsg_size(const struct net_device *dev, 963 u32 ext_filter_mask) 964 { 965 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 966 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 967 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ 968 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 969 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) 970 + nla_total_size(sizeof(struct rtnl_link_stats)) 971 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) 972 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 973 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 974 + nla_total_size(4) /* IFLA_TXQLEN */ 975 + nla_total_size(4) /* IFLA_WEIGHT */ 976 + nla_total_size(4) /* IFLA_MTU */ 977 + nla_total_size(4) /* IFLA_LINK */ 978 + nla_total_size(4) /* IFLA_MASTER */ 979 + nla_total_size(1) /* IFLA_CARRIER */ 980 + nla_total_size(4) /* IFLA_PROMISCUITY */ 981 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 982 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 983 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ 984 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ 985 + nla_total_size(1) /* IFLA_OPERSTATE */ 986 + nla_total_size(1) /* IFLA_LINKMODE */ 987 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 988 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 989 + nla_total_size(4) /* IFLA_GROUP */ 990 + nla_total_size(ext_filter_mask 991 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 992 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 993 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 994 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 995 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ 996 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ 997 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ 998 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ 999 + rtnl_xdp_size() /* IFLA_XDP */ 1000 + nla_total_size(4) /* IFLA_EVENT */ 1001 + nla_total_size(4) /* IFLA_NEW_NETNSID */ 1002 + nla_total_size(4) /* IFLA_NEW_IFINDEX */ 1003 + nla_total_size(1) /* IFLA_PROTO_DOWN */ 1004 + nla_total_size(4) /* IFLA_IF_NETNSID */ 1005 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ 1006 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ 1007 + 0; 1008 } 1009 1010 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 1011 { 1012 struct nlattr *vf_ports; 1013 struct nlattr *vf_port; 1014 int vf; 1015 int err; 1016 1017 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS); 1018 if (!vf_ports) 1019 return -EMSGSIZE; 1020 1021 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 1022 vf_port = nla_nest_start(skb, IFLA_VF_PORT); 1023 if (!vf_port) 1024 goto nla_put_failure; 1025 if (nla_put_u32(skb, IFLA_PORT_VF, vf)) 1026 goto nla_put_failure; 1027 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 1028 if (err == -EMSGSIZE) 1029 goto nla_put_failure; 1030 if (err) { 1031 nla_nest_cancel(skb, vf_port); 1032 continue; 1033 } 1034 nla_nest_end(skb, vf_port); 1035 } 1036 1037 nla_nest_end(skb, vf_ports); 1038 1039 return 0; 1040 1041 nla_put_failure: 1042 nla_nest_cancel(skb, vf_ports); 1043 return -EMSGSIZE; 1044 } 1045 1046 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 1047 { 1048 struct nlattr *port_self; 1049 int err; 1050 1051 port_self = nla_nest_start(skb, IFLA_PORT_SELF); 1052 if (!port_self) 1053 return -EMSGSIZE; 1054 1055 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 1056 if (err) { 1057 nla_nest_cancel(skb, port_self); 1058 return (err == -EMSGSIZE) ? err : 0; 1059 } 1060 1061 nla_nest_end(skb, port_self); 1062 1063 return 0; 1064 } 1065 1066 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev, 1067 u32 ext_filter_mask) 1068 { 1069 int err; 1070 1071 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent || 1072 !(ext_filter_mask & RTEXT_FILTER_VF)) 1073 return 0; 1074 1075 err = rtnl_port_self_fill(skb, dev); 1076 if (err) 1077 return err; 1078 1079 if (dev_num_vf(dev->dev.parent)) { 1080 err = rtnl_vf_ports_fill(skb, dev); 1081 if (err) 1082 return err; 1083 } 1084 1085 return 0; 1086 } 1087 1088 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) 1089 { 1090 int err; 1091 struct netdev_phys_item_id ppid; 1092 1093 err = dev_get_phys_port_id(dev, &ppid); 1094 if (err) { 1095 if (err == -EOPNOTSUPP) 1096 return 0; 1097 return err; 1098 } 1099 1100 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) 1101 return -EMSGSIZE; 1102 1103 return 0; 1104 } 1105 1106 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) 1107 { 1108 char name[IFNAMSIZ]; 1109 int err; 1110 1111 err = dev_get_phys_port_name(dev, name, sizeof(name)); 1112 if (err) { 1113 if (err == -EOPNOTSUPP) 1114 return 0; 1115 return err; 1116 } 1117 1118 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) 1119 return -EMSGSIZE; 1120 1121 return 0; 1122 } 1123 1124 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) 1125 { 1126 int err; 1127 struct switchdev_attr attr = { 1128 .orig_dev = dev, 1129 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID, 1130 .flags = SWITCHDEV_F_NO_RECURSE, 1131 }; 1132 1133 err = switchdev_port_attr_get(dev, &attr); 1134 if (err) { 1135 if (err == -EOPNOTSUPP) 1136 return 0; 1137 return err; 1138 } 1139 1140 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len, 1141 attr.u.ppid.id)) 1142 return -EMSGSIZE; 1143 1144 return 0; 1145 } 1146 1147 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, 1148 struct net_device *dev) 1149 { 1150 struct rtnl_link_stats64 *sp; 1151 struct nlattr *attr; 1152 1153 attr = nla_reserve_64bit(skb, IFLA_STATS64, 1154 sizeof(struct rtnl_link_stats64), IFLA_PAD); 1155 if (!attr) 1156 return -EMSGSIZE; 1157 1158 sp = nla_data(attr); 1159 dev_get_stats(dev, sp); 1160 1161 attr = nla_reserve(skb, IFLA_STATS, 1162 sizeof(struct rtnl_link_stats)); 1163 if (!attr) 1164 return -EMSGSIZE; 1165 1166 copy_rtnl_link_stats(nla_data(attr), sp); 1167 1168 return 0; 1169 } 1170 1171 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, 1172 struct net_device *dev, 1173 int vfs_num, 1174 struct nlattr *vfinfo) 1175 { 1176 struct ifla_vf_rss_query_en vf_rss_query_en; 1177 struct nlattr *vf, *vfstats, *vfvlanlist; 1178 struct ifla_vf_link_state vf_linkstate; 1179 struct ifla_vf_vlan_info vf_vlan_info; 1180 struct ifla_vf_spoofchk vf_spoofchk; 1181 struct ifla_vf_tx_rate vf_tx_rate; 1182 struct ifla_vf_stats vf_stats; 1183 struct ifla_vf_trust vf_trust; 1184 struct ifla_vf_vlan vf_vlan; 1185 struct ifla_vf_rate vf_rate; 1186 struct ifla_vf_mac vf_mac; 1187 struct ifla_vf_info ivi; 1188 1189 memset(&ivi, 0, sizeof(ivi)); 1190 1191 /* Not all SR-IOV capable drivers support the 1192 * spoofcheck and "RSS query enable" query. Preset to 1193 * -1 so the user space tool can detect that the driver 1194 * didn't report anything. 1195 */ 1196 ivi.spoofchk = -1; 1197 ivi.rss_query_en = -1; 1198 ivi.trusted = -1; 1199 /* The default value for VF link state is "auto" 1200 * IFLA_VF_LINK_STATE_AUTO which equals zero 1201 */ 1202 ivi.linkstate = 0; 1203 /* VLAN Protocol by default is 802.1Q */ 1204 ivi.vlan_proto = htons(ETH_P_8021Q); 1205 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi)) 1206 return 0; 1207 1208 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info)); 1209 1210 vf_mac.vf = 1211 vf_vlan.vf = 1212 vf_vlan_info.vf = 1213 vf_rate.vf = 1214 vf_tx_rate.vf = 1215 vf_spoofchk.vf = 1216 vf_linkstate.vf = 1217 vf_rss_query_en.vf = 1218 vf_trust.vf = ivi.vf; 1219 1220 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1221 vf_vlan.vlan = ivi.vlan; 1222 vf_vlan.qos = ivi.qos; 1223 vf_vlan_info.vlan = ivi.vlan; 1224 vf_vlan_info.qos = ivi.qos; 1225 vf_vlan_info.vlan_proto = ivi.vlan_proto; 1226 vf_tx_rate.rate = ivi.max_tx_rate; 1227 vf_rate.min_tx_rate = ivi.min_tx_rate; 1228 vf_rate.max_tx_rate = ivi.max_tx_rate; 1229 vf_spoofchk.setting = ivi.spoofchk; 1230 vf_linkstate.link_state = ivi.linkstate; 1231 vf_rss_query_en.setting = ivi.rss_query_en; 1232 vf_trust.setting = ivi.trusted; 1233 vf = nla_nest_start(skb, IFLA_VF_INFO); 1234 if (!vf) 1235 goto nla_put_vfinfo_failure; 1236 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1237 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1238 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate), 1239 &vf_rate) || 1240 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1241 &vf_tx_rate) || 1242 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1243 &vf_spoofchk) || 1244 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), 1245 &vf_linkstate) || 1246 nla_put(skb, IFLA_VF_RSS_QUERY_EN, 1247 sizeof(vf_rss_query_en), 1248 &vf_rss_query_en) || 1249 nla_put(skb, IFLA_VF_TRUST, 1250 sizeof(vf_trust), &vf_trust)) 1251 goto nla_put_vf_failure; 1252 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST); 1253 if (!vfvlanlist) 1254 goto nla_put_vf_failure; 1255 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info), 1256 &vf_vlan_info)) { 1257 nla_nest_cancel(skb, vfvlanlist); 1258 goto nla_put_vf_failure; 1259 } 1260 nla_nest_end(skb, vfvlanlist); 1261 memset(&vf_stats, 0, sizeof(vf_stats)); 1262 if (dev->netdev_ops->ndo_get_vf_stats) 1263 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, 1264 &vf_stats); 1265 vfstats = nla_nest_start(skb, IFLA_VF_STATS); 1266 if (!vfstats) 1267 goto nla_put_vf_failure; 1268 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, 1269 vf_stats.rx_packets, IFLA_VF_STATS_PAD) || 1270 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, 1271 vf_stats.tx_packets, IFLA_VF_STATS_PAD) || 1272 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, 1273 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || 1274 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, 1275 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || 1276 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, 1277 vf_stats.broadcast, IFLA_VF_STATS_PAD) || 1278 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, 1279 vf_stats.multicast, IFLA_VF_STATS_PAD) || 1280 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, 1281 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || 1282 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, 1283 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { 1284 nla_nest_cancel(skb, vfstats); 1285 goto nla_put_vf_failure; 1286 } 1287 nla_nest_end(skb, vfstats); 1288 nla_nest_end(skb, vf); 1289 return 0; 1290 1291 nla_put_vf_failure: 1292 nla_nest_cancel(skb, vf); 1293 nla_put_vfinfo_failure: 1294 nla_nest_cancel(skb, vfinfo); 1295 return -EMSGSIZE; 1296 } 1297 1298 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, 1299 struct net_device *dev, 1300 u32 ext_filter_mask) 1301 { 1302 struct nlattr *vfinfo; 1303 int i, num_vfs; 1304 1305 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0)) 1306 return 0; 1307 1308 num_vfs = dev_num_vf(dev->dev.parent); 1309 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) 1310 return -EMSGSIZE; 1311 1312 if (!dev->netdev_ops->ndo_get_vf_config) 1313 return 0; 1314 1315 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); 1316 if (!vfinfo) 1317 return -EMSGSIZE; 1318 1319 for (i = 0; i < num_vfs; i++) { 1320 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo)) 1321 return -EMSGSIZE; 1322 } 1323 1324 nla_nest_end(skb, vfinfo); 1325 return 0; 1326 } 1327 1328 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1329 { 1330 struct rtnl_link_ifmap map; 1331 1332 memset(&map, 0, sizeof(map)); 1333 map.mem_start = dev->mem_start; 1334 map.mem_end = dev->mem_end; 1335 map.base_addr = dev->base_addr; 1336 map.irq = dev->irq; 1337 map.dma = dev->dma; 1338 map.port = dev->if_port; 1339 1340 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1341 return -EMSGSIZE; 1342 1343 return 0; 1344 } 1345 1346 static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id) 1347 { 1348 const struct net_device_ops *ops = dev->netdev_ops; 1349 const struct bpf_prog *generic_xdp_prog; 1350 struct netdev_bpf xdp; 1351 1352 ASSERT_RTNL(); 1353 1354 *prog_id = 0; 1355 generic_xdp_prog = rtnl_dereference(dev->xdp_prog); 1356 if (generic_xdp_prog) { 1357 *prog_id = generic_xdp_prog->aux->id; 1358 return XDP_ATTACHED_SKB; 1359 } 1360 if (!ops->ndo_bpf) 1361 return XDP_ATTACHED_NONE; 1362 1363 __dev_xdp_query(dev, ops->ndo_bpf, &xdp); 1364 *prog_id = xdp.prog_id; 1365 1366 return xdp.prog_attached; 1367 } 1368 1369 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1370 { 1371 struct nlattr *xdp; 1372 u32 prog_id; 1373 int err; 1374 1375 xdp = nla_nest_start(skb, IFLA_XDP); 1376 if (!xdp) 1377 return -EMSGSIZE; 1378 1379 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, 1380 rtnl_xdp_attached_mode(dev, &prog_id)); 1381 if (err) 1382 goto err_cancel; 1383 1384 if (prog_id) { 1385 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); 1386 if (err) 1387 goto err_cancel; 1388 } 1389 1390 nla_nest_end(skb, xdp); 1391 return 0; 1392 1393 err_cancel: 1394 nla_nest_cancel(skb, xdp); 1395 return err; 1396 } 1397 1398 static u32 rtnl_get_event(unsigned long event) 1399 { 1400 u32 rtnl_event_type = IFLA_EVENT_NONE; 1401 1402 switch (event) { 1403 case NETDEV_REBOOT: 1404 rtnl_event_type = IFLA_EVENT_REBOOT; 1405 break; 1406 case NETDEV_FEAT_CHANGE: 1407 rtnl_event_type = IFLA_EVENT_FEATURES; 1408 break; 1409 case NETDEV_BONDING_FAILOVER: 1410 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; 1411 break; 1412 case NETDEV_NOTIFY_PEERS: 1413 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; 1414 break; 1415 case NETDEV_RESEND_IGMP: 1416 rtnl_event_type = IFLA_EVENT_IGMP_RESEND; 1417 break; 1418 case NETDEV_CHANGEINFODATA: 1419 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; 1420 break; 1421 default: 1422 break; 1423 } 1424 1425 return rtnl_event_type; 1426 } 1427 1428 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) 1429 { 1430 const struct net_device *upper_dev; 1431 int ret = 0; 1432 1433 rcu_read_lock(); 1434 1435 upper_dev = netdev_master_upper_dev_get_rcu(dev); 1436 if (upper_dev) 1437 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex); 1438 1439 rcu_read_unlock(); 1440 return ret; 1441 } 1442 1443 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev) 1444 { 1445 int ifindex = dev_get_iflink(dev); 1446 1447 if (dev->ifindex == ifindex) 1448 return 0; 1449 1450 return nla_put_u32(skb, IFLA_LINK, ifindex); 1451 } 1452 1453 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, 1454 struct net_device *dev) 1455 { 1456 char buf[IFALIASZ]; 1457 int ret; 1458 1459 ret = dev_get_alias(dev, buf, sizeof(buf)); 1460 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0; 1461 } 1462 1463 static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1464 const struct net_device *dev, 1465 struct net *src_net) 1466 { 1467 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1468 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1469 1470 if (!net_eq(dev_net(dev), link_net)) { 1471 int id = peernet2id_alloc(src_net, link_net); 1472 1473 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1474 return -EMSGSIZE; 1475 } 1476 } 1477 1478 return 0; 1479 } 1480 1481 static int rtnl_fill_link_af(struct sk_buff *skb, 1482 const struct net_device *dev, 1483 u32 ext_filter_mask) 1484 { 1485 const struct rtnl_af_ops *af_ops; 1486 struct nlattr *af_spec; 1487 1488 af_spec = nla_nest_start(skb, IFLA_AF_SPEC); 1489 if (!af_spec) 1490 return -EMSGSIZE; 1491 1492 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 1493 struct nlattr *af; 1494 int err; 1495 1496 if (!af_ops->fill_link_af) 1497 continue; 1498 1499 af = nla_nest_start(skb, af_ops->family); 1500 if (!af) 1501 return -EMSGSIZE; 1502 1503 err = af_ops->fill_link_af(skb, dev, ext_filter_mask); 1504 /* 1505 * Caller may return ENODATA to indicate that there 1506 * was no data to be dumped. This is not an error, it 1507 * means we should trim the attribute header and 1508 * continue. 1509 */ 1510 if (err == -ENODATA) 1511 nla_nest_cancel(skb, af); 1512 else if (err < 0) 1513 return -EMSGSIZE; 1514 1515 nla_nest_end(skb, af); 1516 } 1517 1518 nla_nest_end(skb, af_spec); 1519 return 0; 1520 } 1521 1522 static int rtnl_fill_ifinfo(struct sk_buff *skb, 1523 struct net_device *dev, struct net *src_net, 1524 int type, u32 pid, u32 seq, u32 change, 1525 unsigned int flags, u32 ext_filter_mask, 1526 u32 event, int *new_nsid, int new_ifindex, 1527 int tgt_netnsid) 1528 { 1529 struct ifinfomsg *ifm; 1530 struct nlmsghdr *nlh; 1531 1532 ASSERT_RTNL(); 1533 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 1534 if (nlh == NULL) 1535 return -EMSGSIZE; 1536 1537 ifm = nlmsg_data(nlh); 1538 ifm->ifi_family = AF_UNSPEC; 1539 ifm->__ifi_pad = 0; 1540 ifm->ifi_type = dev->type; 1541 ifm->ifi_index = dev->ifindex; 1542 ifm->ifi_flags = dev_get_flags(dev); 1543 ifm->ifi_change = change; 1544 1545 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_IF_NETNSID, tgt_netnsid)) 1546 goto nla_put_failure; 1547 1548 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 1549 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || 1550 nla_put_u8(skb, IFLA_OPERSTATE, 1551 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || 1552 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || 1553 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 1554 nla_put_u32(skb, IFLA_GROUP, dev->group) || 1555 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || 1556 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || 1557 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || 1558 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || 1559 #ifdef CONFIG_RPS 1560 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1561 #endif 1562 nla_put_iflink(skb, dev) || 1563 put_master_ifindex(skb, dev) || 1564 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1565 (dev->qdisc && 1566 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || 1567 nla_put_ifalias(skb, dev) || 1568 nla_put_u32(skb, IFLA_CARRIER_CHANGES, 1569 atomic_read(&dev->carrier_up_count) + 1570 atomic_read(&dev->carrier_down_count)) || 1571 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) || 1572 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, 1573 atomic_read(&dev->carrier_up_count)) || 1574 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, 1575 atomic_read(&dev->carrier_down_count))) 1576 goto nla_put_failure; 1577 1578 if (event != IFLA_EVENT_NONE) { 1579 if (nla_put_u32(skb, IFLA_EVENT, event)) 1580 goto nla_put_failure; 1581 } 1582 1583 if (rtnl_fill_link_ifmap(skb, dev)) 1584 goto nla_put_failure; 1585 1586 if (dev->addr_len) { 1587 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1588 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) 1589 goto nla_put_failure; 1590 } 1591 1592 if (rtnl_phys_port_id_fill(skb, dev)) 1593 goto nla_put_failure; 1594 1595 if (rtnl_phys_port_name_fill(skb, dev)) 1596 goto nla_put_failure; 1597 1598 if (rtnl_phys_switch_id_fill(skb, dev)) 1599 goto nla_put_failure; 1600 1601 if (rtnl_fill_stats(skb, dev)) 1602 goto nla_put_failure; 1603 1604 if (rtnl_fill_vf(skb, dev, ext_filter_mask)) 1605 goto nla_put_failure; 1606 1607 if (rtnl_port_fill(skb, dev, ext_filter_mask)) 1608 goto nla_put_failure; 1609 1610 if (rtnl_xdp_fill(skb, dev)) 1611 goto nla_put_failure; 1612 1613 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1614 if (rtnl_link_fill(skb, dev) < 0) 1615 goto nla_put_failure; 1616 } 1617 1618 if (rtnl_fill_link_netnsid(skb, dev, src_net)) 1619 goto nla_put_failure; 1620 1621 if (new_nsid && 1622 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0) 1623 goto nla_put_failure; 1624 if (new_ifindex && 1625 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0) 1626 goto nla_put_failure; 1627 1628 1629 rcu_read_lock(); 1630 if (rtnl_fill_link_af(skb, dev, ext_filter_mask)) 1631 goto nla_put_failure_rcu; 1632 rcu_read_unlock(); 1633 1634 nlmsg_end(skb, nlh); 1635 return 0; 1636 1637 nla_put_failure_rcu: 1638 rcu_read_unlock(); 1639 nla_put_failure: 1640 nlmsg_cancel(skb, nlh); 1641 return -EMSGSIZE; 1642 } 1643 1644 static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 1645 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 1646 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1647 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1648 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 1649 [IFLA_MTU] = { .type = NLA_U32 }, 1650 [IFLA_LINK] = { .type = NLA_U32 }, 1651 [IFLA_MASTER] = { .type = NLA_U32 }, 1652 [IFLA_CARRIER] = { .type = NLA_U8 }, 1653 [IFLA_TXQLEN] = { .type = NLA_U32 }, 1654 [IFLA_WEIGHT] = { .type = NLA_U32 }, 1655 [IFLA_OPERSTATE] = { .type = NLA_U8 }, 1656 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1657 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1658 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1659 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1660 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to 1661 * allow 0-length string (needed to remove an alias). 1662 */ 1663 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, 1664 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1665 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1666 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1667 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1668 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1669 [IFLA_PROMISCUITY] = { .type = NLA_U32 }, 1670 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, 1671 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, 1672 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, 1673 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, 1674 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1675 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ 1676 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1677 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1678 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1679 [IFLA_XDP] = { .type = NLA_NESTED }, 1680 [IFLA_EVENT] = { .type = NLA_U32 }, 1681 [IFLA_GROUP] = { .type = NLA_U32 }, 1682 [IFLA_IF_NETNSID] = { .type = NLA_S32 }, 1683 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, 1684 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, 1685 }; 1686 1687 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1688 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 1689 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 1690 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING }, 1691 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1692 }; 1693 1694 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1695 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1696 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1697 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED }, 1698 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, 1699 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, 1700 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) }, 1701 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) }, 1702 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) }, 1703 [IFLA_VF_STATS] = { .type = NLA_NESTED }, 1704 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) }, 1705 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1706 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) }, 1707 }; 1708 1709 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 1710 [IFLA_PORT_VF] = { .type = NLA_U32 }, 1711 [IFLA_PORT_PROFILE] = { .type = NLA_STRING, 1712 .len = PORT_PROFILE_MAX }, 1713 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, 1714 .len = PORT_UUID_MAX }, 1715 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, 1716 .len = PORT_UUID_MAX }, 1717 [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, 1718 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, 1719 1720 /* Unused, but we need to keep it here since user space could 1721 * fill it. It's also broken with regard to NLA_BINARY use in 1722 * combination with structs. 1723 */ 1724 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, 1725 .len = sizeof(struct ifla_port_vsi) }, 1726 }; 1727 1728 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { 1729 [IFLA_XDP_FD] = { .type = NLA_S32 }, 1730 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 }, 1731 [IFLA_XDP_FLAGS] = { .type = NLA_U32 }, 1732 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 }, 1733 }; 1734 1735 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla) 1736 { 1737 const struct rtnl_link_ops *ops = NULL; 1738 struct nlattr *linfo[IFLA_INFO_MAX + 1]; 1739 1740 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla, 1741 ifla_info_policy, NULL) < 0) 1742 return NULL; 1743 1744 if (linfo[IFLA_INFO_KIND]) { 1745 char kind[MODULE_NAME_LEN]; 1746 1747 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind)); 1748 ops = rtnl_link_ops_get(kind); 1749 } 1750 1751 return ops; 1752 } 1753 1754 static bool link_master_filtered(struct net_device *dev, int master_idx) 1755 { 1756 struct net_device *master; 1757 1758 if (!master_idx) 1759 return false; 1760 1761 master = netdev_master_upper_dev_get(dev); 1762 if (!master || master->ifindex != master_idx) 1763 return true; 1764 1765 return false; 1766 } 1767 1768 static bool link_kind_filtered(const struct net_device *dev, 1769 const struct rtnl_link_ops *kind_ops) 1770 { 1771 if (kind_ops && dev->rtnl_link_ops != kind_ops) 1772 return true; 1773 1774 return false; 1775 } 1776 1777 static bool link_dump_filtered(struct net_device *dev, 1778 int master_idx, 1779 const struct rtnl_link_ops *kind_ops) 1780 { 1781 if (link_master_filtered(dev, master_idx) || 1782 link_kind_filtered(dev, kind_ops)) 1783 return true; 1784 1785 return false; 1786 } 1787 1788 static struct net *get_target_net(struct sock *sk, int netnsid) 1789 { 1790 struct net *net; 1791 1792 net = get_net_ns_by_id(sock_net(sk), netnsid); 1793 if (!net) 1794 return ERR_PTR(-EINVAL); 1795 1796 /* For now, the caller is required to have CAP_NET_ADMIN in 1797 * the user namespace owning the target net ns. 1798 */ 1799 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { 1800 put_net(net); 1801 return ERR_PTR(-EACCES); 1802 } 1803 return net; 1804 } 1805 1806 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 1807 { 1808 struct net *net = sock_net(skb->sk); 1809 struct net *tgt_net = net; 1810 int h, s_h; 1811 int idx = 0, s_idx; 1812 struct net_device *dev; 1813 struct hlist_head *head; 1814 struct nlattr *tb[IFLA_MAX+1]; 1815 u32 ext_filter_mask = 0; 1816 const struct rtnl_link_ops *kind_ops = NULL; 1817 unsigned int flags = NLM_F_MULTI; 1818 int master_idx = 0; 1819 int netnsid = -1; 1820 int err; 1821 int hdrlen; 1822 1823 s_h = cb->args[0]; 1824 s_idx = cb->args[1]; 1825 1826 /* A hack to preserve kernel<->userspace interface. 1827 * The correct header is ifinfomsg. It is consistent with rtnl_getlink. 1828 * However, before Linux v3.9 the code here assumed rtgenmsg and that's 1829 * what iproute2 < v3.9.0 used. 1830 * We can detect the old iproute2. Even including the IFLA_EXT_MASK 1831 * attribute, its netlink message is shorter than struct ifinfomsg. 1832 */ 1833 hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ? 1834 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 1835 1836 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, 1837 ifla_policy, NULL) >= 0) { 1838 if (tb[IFLA_IF_NETNSID]) { 1839 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 1840 tgt_net = get_target_net(skb->sk, netnsid); 1841 if (IS_ERR(tgt_net)) { 1842 tgt_net = net; 1843 netnsid = -1; 1844 } 1845 } 1846 1847 if (tb[IFLA_EXT_MASK]) 1848 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 1849 1850 if (tb[IFLA_MASTER]) 1851 master_idx = nla_get_u32(tb[IFLA_MASTER]); 1852 1853 if (tb[IFLA_LINKINFO]) 1854 kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]); 1855 1856 if (master_idx || kind_ops) 1857 flags |= NLM_F_DUMP_FILTERED; 1858 } 1859 1860 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1861 idx = 0; 1862 head = &tgt_net->dev_index_head[h]; 1863 hlist_for_each_entry(dev, head, index_hlist) { 1864 if (link_dump_filtered(dev, master_idx, kind_ops)) 1865 goto cont; 1866 if (idx < s_idx) 1867 goto cont; 1868 err = rtnl_fill_ifinfo(skb, dev, net, 1869 RTM_NEWLINK, 1870 NETLINK_CB(cb->skb).portid, 1871 cb->nlh->nlmsg_seq, 0, 1872 flags, 1873 ext_filter_mask, 0, NULL, 0, 1874 netnsid); 1875 1876 if (err < 0) { 1877 if (likely(skb->len)) 1878 goto out; 1879 1880 goto out_err; 1881 } 1882 cont: 1883 idx++; 1884 } 1885 } 1886 out: 1887 err = skb->len; 1888 out_err: 1889 cb->args[1] = idx; 1890 cb->args[0] = h; 1891 cb->seq = net->dev_base_seq; 1892 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1893 if (netnsid >= 0) 1894 put_net(tgt_net); 1895 1896 return err; 1897 } 1898 1899 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 1900 struct netlink_ext_ack *exterr) 1901 { 1902 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr); 1903 } 1904 EXPORT_SYMBOL(rtnl_nla_parse_ifla); 1905 1906 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 1907 { 1908 struct net *net; 1909 /* Examine the link attributes and figure out which 1910 * network namespace we are talking about. 1911 */ 1912 if (tb[IFLA_NET_NS_PID]) 1913 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 1914 else if (tb[IFLA_NET_NS_FD]) 1915 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); 1916 else 1917 net = get_net(src_net); 1918 return net; 1919 } 1920 EXPORT_SYMBOL(rtnl_link_get_net); 1921 1922 /* Figure out which network namespace we are talking about by 1923 * examining the link attributes in the following order: 1924 * 1925 * 1. IFLA_NET_NS_PID 1926 * 2. IFLA_NET_NS_FD 1927 * 3. IFLA_IF_NETNSID 1928 */ 1929 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net, 1930 struct nlattr *tb[]) 1931 { 1932 struct net *net; 1933 1934 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) 1935 return rtnl_link_get_net(src_net, tb); 1936 1937 if (!tb[IFLA_IF_NETNSID]) 1938 return get_net(src_net); 1939 1940 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_IF_NETNSID])); 1941 if (!net) 1942 return ERR_PTR(-EINVAL); 1943 1944 return net; 1945 } 1946 1947 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb, 1948 struct net *src_net, 1949 struct nlattr *tb[], int cap) 1950 { 1951 struct net *net; 1952 1953 net = rtnl_link_get_net_by_nlattr(src_net, tb); 1954 if (IS_ERR(net)) 1955 return net; 1956 1957 if (!netlink_ns_capable(skb, net->user_ns, cap)) { 1958 put_net(net); 1959 return ERR_PTR(-EPERM); 1960 } 1961 1962 return net; 1963 } 1964 1965 /* Verify that rtnetlink requests do not pass additional properties 1966 * potentially referring to different network namespaces. 1967 */ 1968 static int rtnl_ensure_unique_netns(struct nlattr *tb[], 1969 struct netlink_ext_ack *extack, 1970 bool netns_id_only) 1971 { 1972 1973 if (netns_id_only) { 1974 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD]) 1975 return 0; 1976 1977 NL_SET_ERR_MSG(extack, "specified netns attribute not supported"); 1978 return -EOPNOTSUPP; 1979 } 1980 1981 if (tb[IFLA_IF_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])) 1982 goto invalid_attr; 1983 1984 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_FD])) 1985 goto invalid_attr; 1986 1987 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_PID])) 1988 goto invalid_attr; 1989 1990 return 0; 1991 1992 invalid_attr: 1993 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified"); 1994 return -EINVAL; 1995 } 1996 1997 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) 1998 { 1999 if (dev) { 2000 if (tb[IFLA_ADDRESS] && 2001 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) 2002 return -EINVAL; 2003 2004 if (tb[IFLA_BROADCAST] && 2005 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 2006 return -EINVAL; 2007 } 2008 2009 if (tb[IFLA_AF_SPEC]) { 2010 struct nlattr *af; 2011 int rem, err; 2012 2013 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2014 const struct rtnl_af_ops *af_ops; 2015 2016 rcu_read_lock(); 2017 af_ops = rtnl_af_lookup(nla_type(af)); 2018 if (!af_ops) { 2019 rcu_read_unlock(); 2020 return -EAFNOSUPPORT; 2021 } 2022 2023 if (!af_ops->set_link_af) { 2024 rcu_read_unlock(); 2025 return -EOPNOTSUPP; 2026 } 2027 2028 if (af_ops->validate_link_af) { 2029 err = af_ops->validate_link_af(dev, af); 2030 if (err < 0) { 2031 rcu_read_unlock(); 2032 return err; 2033 } 2034 } 2035 2036 rcu_read_unlock(); 2037 } 2038 } 2039 2040 return 0; 2041 } 2042 2043 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt, 2044 int guid_type) 2045 { 2046 const struct net_device_ops *ops = dev->netdev_ops; 2047 2048 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type); 2049 } 2050 2051 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type) 2052 { 2053 if (dev->type != ARPHRD_INFINIBAND) 2054 return -EOPNOTSUPP; 2055 2056 return handle_infiniband_guid(dev, ivt, guid_type); 2057 } 2058 2059 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) 2060 { 2061 const struct net_device_ops *ops = dev->netdev_ops; 2062 int err = -EINVAL; 2063 2064 if (tb[IFLA_VF_MAC]) { 2065 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2066 2067 err = -EOPNOTSUPP; 2068 if (ops->ndo_set_vf_mac) 2069 err = ops->ndo_set_vf_mac(dev, ivm->vf, 2070 ivm->mac); 2071 if (err < 0) 2072 return err; 2073 } 2074 2075 if (tb[IFLA_VF_VLAN]) { 2076 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2077 2078 err = -EOPNOTSUPP; 2079 if (ops->ndo_set_vf_vlan) 2080 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, 2081 ivv->qos, 2082 htons(ETH_P_8021Q)); 2083 if (err < 0) 2084 return err; 2085 } 2086 2087 if (tb[IFLA_VF_VLAN_LIST]) { 2088 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN]; 2089 struct nlattr *attr; 2090 int rem, len = 0; 2091 2092 err = -EOPNOTSUPP; 2093 if (!ops->ndo_set_vf_vlan) 2094 return err; 2095 2096 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { 2097 if (nla_type(attr) != IFLA_VF_VLAN_INFO || 2098 nla_len(attr) < NLA_HDRLEN) { 2099 return -EINVAL; 2100 } 2101 if (len >= MAX_VLAN_LIST_LEN) 2102 return -EOPNOTSUPP; 2103 ivvl[len] = nla_data(attr); 2104 2105 len++; 2106 } 2107 if (len == 0) 2108 return -EINVAL; 2109 2110 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2111 ivvl[0]->qos, ivvl[0]->vlan_proto); 2112 if (err < 0) 2113 return err; 2114 } 2115 2116 if (tb[IFLA_VF_TX_RATE]) { 2117 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2118 struct ifla_vf_info ivf; 2119 2120 err = -EOPNOTSUPP; 2121 if (ops->ndo_get_vf_config) 2122 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); 2123 if (err < 0) 2124 return err; 2125 2126 err = -EOPNOTSUPP; 2127 if (ops->ndo_set_vf_rate) 2128 err = ops->ndo_set_vf_rate(dev, ivt->vf, 2129 ivf.min_tx_rate, 2130 ivt->rate); 2131 if (err < 0) 2132 return err; 2133 } 2134 2135 if (tb[IFLA_VF_RATE]) { 2136 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2137 2138 err = -EOPNOTSUPP; 2139 if (ops->ndo_set_vf_rate) 2140 err = ops->ndo_set_vf_rate(dev, ivt->vf, 2141 ivt->min_tx_rate, 2142 ivt->max_tx_rate); 2143 if (err < 0) 2144 return err; 2145 } 2146 2147 if (tb[IFLA_VF_SPOOFCHK]) { 2148 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2149 2150 err = -EOPNOTSUPP; 2151 if (ops->ndo_set_vf_spoofchk) 2152 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, 2153 ivs->setting); 2154 if (err < 0) 2155 return err; 2156 } 2157 2158 if (tb[IFLA_VF_LINK_STATE]) { 2159 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2160 2161 err = -EOPNOTSUPP; 2162 if (ops->ndo_set_vf_link_state) 2163 err = ops->ndo_set_vf_link_state(dev, ivl->vf, 2164 ivl->link_state); 2165 if (err < 0) 2166 return err; 2167 } 2168 2169 if (tb[IFLA_VF_RSS_QUERY_EN]) { 2170 struct ifla_vf_rss_query_en *ivrssq_en; 2171 2172 err = -EOPNOTSUPP; 2173 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2174 if (ops->ndo_set_vf_rss_query_en) 2175 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2176 ivrssq_en->setting); 2177 if (err < 0) 2178 return err; 2179 } 2180 2181 if (tb[IFLA_VF_TRUST]) { 2182 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2183 2184 err = -EOPNOTSUPP; 2185 if (ops->ndo_set_vf_trust) 2186 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); 2187 if (err < 0) 2188 return err; 2189 } 2190 2191 if (tb[IFLA_VF_IB_NODE_GUID]) { 2192 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2193 2194 if (!ops->ndo_set_vf_guid) 2195 return -EOPNOTSUPP; 2196 2197 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2198 } 2199 2200 if (tb[IFLA_VF_IB_PORT_GUID]) { 2201 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2202 2203 if (!ops->ndo_set_vf_guid) 2204 return -EOPNOTSUPP; 2205 2206 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID); 2207 } 2208 2209 return err; 2210 } 2211 2212 static int do_set_master(struct net_device *dev, int ifindex, 2213 struct netlink_ext_ack *extack) 2214 { 2215 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 2216 const struct net_device_ops *ops; 2217 int err; 2218 2219 if (upper_dev) { 2220 if (upper_dev->ifindex == ifindex) 2221 return 0; 2222 ops = upper_dev->netdev_ops; 2223 if (ops->ndo_del_slave) { 2224 err = ops->ndo_del_slave(upper_dev, dev); 2225 if (err) 2226 return err; 2227 } else { 2228 return -EOPNOTSUPP; 2229 } 2230 } 2231 2232 if (ifindex) { 2233 upper_dev = __dev_get_by_index(dev_net(dev), ifindex); 2234 if (!upper_dev) 2235 return -EINVAL; 2236 ops = upper_dev->netdev_ops; 2237 if (ops->ndo_add_slave) { 2238 err = ops->ndo_add_slave(upper_dev, dev, extack); 2239 if (err) 2240 return err; 2241 } else { 2242 return -EOPNOTSUPP; 2243 } 2244 } 2245 return 0; 2246 } 2247 2248 #define DO_SETLINK_MODIFIED 0x01 2249 /* notify flag means notify + modified. */ 2250 #define DO_SETLINK_NOTIFY 0x03 2251 static int do_setlink(const struct sk_buff *skb, 2252 struct net_device *dev, struct ifinfomsg *ifm, 2253 struct netlink_ext_ack *extack, 2254 struct nlattr **tb, char *ifname, int status) 2255 { 2256 const struct net_device_ops *ops = dev->netdev_ops; 2257 int err; 2258 2259 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) { 2260 struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev), 2261 tb, CAP_NET_ADMIN); 2262 if (IS_ERR(net)) { 2263 err = PTR_ERR(net); 2264 goto errout; 2265 } 2266 2267 err = dev_change_net_namespace(dev, net, ifname); 2268 put_net(net); 2269 if (err) 2270 goto errout; 2271 status |= DO_SETLINK_MODIFIED; 2272 } 2273 2274 if (tb[IFLA_MAP]) { 2275 struct rtnl_link_ifmap *u_map; 2276 struct ifmap k_map; 2277 2278 if (!ops->ndo_set_config) { 2279 err = -EOPNOTSUPP; 2280 goto errout; 2281 } 2282 2283 if (!netif_device_present(dev)) { 2284 err = -ENODEV; 2285 goto errout; 2286 } 2287 2288 u_map = nla_data(tb[IFLA_MAP]); 2289 k_map.mem_start = (unsigned long) u_map->mem_start; 2290 k_map.mem_end = (unsigned long) u_map->mem_end; 2291 k_map.base_addr = (unsigned short) u_map->base_addr; 2292 k_map.irq = (unsigned char) u_map->irq; 2293 k_map.dma = (unsigned char) u_map->dma; 2294 k_map.port = (unsigned char) u_map->port; 2295 2296 err = ops->ndo_set_config(dev, &k_map); 2297 if (err < 0) 2298 goto errout; 2299 2300 status |= DO_SETLINK_NOTIFY; 2301 } 2302 2303 if (tb[IFLA_ADDRESS]) { 2304 struct sockaddr *sa; 2305 int len; 2306 2307 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, 2308 sizeof(*sa)); 2309 sa = kmalloc(len, GFP_KERNEL); 2310 if (!sa) { 2311 err = -ENOMEM; 2312 goto errout; 2313 } 2314 sa->sa_family = dev->type; 2315 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 2316 dev->addr_len); 2317 err = dev_set_mac_address(dev, sa); 2318 kfree(sa); 2319 if (err) 2320 goto errout; 2321 status |= DO_SETLINK_MODIFIED; 2322 } 2323 2324 if (tb[IFLA_MTU]) { 2325 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 2326 if (err < 0) 2327 goto errout; 2328 status |= DO_SETLINK_MODIFIED; 2329 } 2330 2331 if (tb[IFLA_GROUP]) { 2332 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2333 status |= DO_SETLINK_NOTIFY; 2334 } 2335 2336 /* 2337 * Interface selected by interface index but interface 2338 * name provided implies that a name change has been 2339 * requested. 2340 */ 2341 if (ifm->ifi_index > 0 && ifname[0]) { 2342 err = dev_change_name(dev, ifname); 2343 if (err < 0) 2344 goto errout; 2345 status |= DO_SETLINK_MODIFIED; 2346 } 2347 2348 if (tb[IFLA_IFALIAS]) { 2349 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), 2350 nla_len(tb[IFLA_IFALIAS])); 2351 if (err < 0) 2352 goto errout; 2353 status |= DO_SETLINK_NOTIFY; 2354 } 2355 2356 if (tb[IFLA_BROADCAST]) { 2357 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 2358 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 2359 } 2360 2361 if (ifm->ifi_flags || ifm->ifi_change) { 2362 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm)); 2363 if (err < 0) 2364 goto errout; 2365 } 2366 2367 if (tb[IFLA_MASTER]) { 2368 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); 2369 if (err) 2370 goto errout; 2371 status |= DO_SETLINK_MODIFIED; 2372 } 2373 2374 if (tb[IFLA_CARRIER]) { 2375 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); 2376 if (err) 2377 goto errout; 2378 status |= DO_SETLINK_MODIFIED; 2379 } 2380 2381 if (tb[IFLA_TXQLEN]) { 2382 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); 2383 2384 err = dev_change_tx_queue_len(dev, value); 2385 if (err) 2386 goto errout; 2387 status |= DO_SETLINK_MODIFIED; 2388 } 2389 2390 if (tb[IFLA_GSO_MAX_SIZE]) { 2391 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); 2392 2393 if (max_size > GSO_MAX_SIZE) { 2394 err = -EINVAL; 2395 goto errout; 2396 } 2397 2398 if (dev->gso_max_size ^ max_size) { 2399 netif_set_gso_max_size(dev, max_size); 2400 status |= DO_SETLINK_MODIFIED; 2401 } 2402 } 2403 2404 if (tb[IFLA_GSO_MAX_SEGS]) { 2405 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2406 2407 if (max_segs > GSO_MAX_SEGS) { 2408 err = -EINVAL; 2409 goto errout; 2410 } 2411 2412 if (dev->gso_max_segs ^ max_segs) { 2413 dev->gso_max_segs = max_segs; 2414 status |= DO_SETLINK_MODIFIED; 2415 } 2416 } 2417 2418 if (tb[IFLA_OPERSTATE]) 2419 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2420 2421 if (tb[IFLA_LINKMODE]) { 2422 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); 2423 2424 write_lock_bh(&dev_base_lock); 2425 if (dev->link_mode ^ value) 2426 status |= DO_SETLINK_NOTIFY; 2427 dev->link_mode = value; 2428 write_unlock_bh(&dev_base_lock); 2429 } 2430 2431 if (tb[IFLA_VFINFO_LIST]) { 2432 struct nlattr *vfinfo[IFLA_VF_MAX + 1]; 2433 struct nlattr *attr; 2434 int rem; 2435 2436 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 2437 if (nla_type(attr) != IFLA_VF_INFO || 2438 nla_len(attr) < NLA_HDRLEN) { 2439 err = -EINVAL; 2440 goto errout; 2441 } 2442 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr, 2443 ifla_vf_policy, NULL); 2444 if (err < 0) 2445 goto errout; 2446 err = do_setvfinfo(dev, vfinfo); 2447 if (err < 0) 2448 goto errout; 2449 status |= DO_SETLINK_NOTIFY; 2450 } 2451 } 2452 err = 0; 2453 2454 if (tb[IFLA_VF_PORTS]) { 2455 struct nlattr *port[IFLA_PORT_MAX+1]; 2456 struct nlattr *attr; 2457 int vf; 2458 int rem; 2459 2460 err = -EOPNOTSUPP; 2461 if (!ops->ndo_set_vf_port) 2462 goto errout; 2463 2464 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 2465 if (nla_type(attr) != IFLA_VF_PORT || 2466 nla_len(attr) < NLA_HDRLEN) { 2467 err = -EINVAL; 2468 goto errout; 2469 } 2470 err = nla_parse_nested(port, IFLA_PORT_MAX, attr, 2471 ifla_port_policy, NULL); 2472 if (err < 0) 2473 goto errout; 2474 if (!port[IFLA_PORT_VF]) { 2475 err = -EOPNOTSUPP; 2476 goto errout; 2477 } 2478 vf = nla_get_u32(port[IFLA_PORT_VF]); 2479 err = ops->ndo_set_vf_port(dev, vf, port); 2480 if (err < 0) 2481 goto errout; 2482 status |= DO_SETLINK_NOTIFY; 2483 } 2484 } 2485 err = 0; 2486 2487 if (tb[IFLA_PORT_SELF]) { 2488 struct nlattr *port[IFLA_PORT_MAX+1]; 2489 2490 err = nla_parse_nested(port, IFLA_PORT_MAX, 2491 tb[IFLA_PORT_SELF], ifla_port_policy, 2492 NULL); 2493 if (err < 0) 2494 goto errout; 2495 2496 err = -EOPNOTSUPP; 2497 if (ops->ndo_set_vf_port) 2498 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); 2499 if (err < 0) 2500 goto errout; 2501 status |= DO_SETLINK_NOTIFY; 2502 } 2503 2504 if (tb[IFLA_AF_SPEC]) { 2505 struct nlattr *af; 2506 int rem; 2507 2508 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { 2509 const struct rtnl_af_ops *af_ops; 2510 2511 rcu_read_lock(); 2512 2513 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af)))); 2514 2515 err = af_ops->set_link_af(dev, af); 2516 if (err < 0) { 2517 rcu_read_unlock(); 2518 goto errout; 2519 } 2520 2521 rcu_read_unlock(); 2522 status |= DO_SETLINK_NOTIFY; 2523 } 2524 } 2525 err = 0; 2526 2527 if (tb[IFLA_PROTO_DOWN]) { 2528 err = dev_change_proto_down(dev, 2529 nla_get_u8(tb[IFLA_PROTO_DOWN])); 2530 if (err) 2531 goto errout; 2532 status |= DO_SETLINK_NOTIFY; 2533 } 2534 2535 if (tb[IFLA_XDP]) { 2536 struct nlattr *xdp[IFLA_XDP_MAX + 1]; 2537 u32 xdp_flags = 0; 2538 2539 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP], 2540 ifla_xdp_policy, NULL); 2541 if (err < 0) 2542 goto errout; 2543 2544 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { 2545 err = -EINVAL; 2546 goto errout; 2547 } 2548 2549 if (xdp[IFLA_XDP_FLAGS]) { 2550 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); 2551 if (xdp_flags & ~XDP_FLAGS_MASK) { 2552 err = -EINVAL; 2553 goto errout; 2554 } 2555 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) { 2556 err = -EINVAL; 2557 goto errout; 2558 } 2559 } 2560 2561 if (xdp[IFLA_XDP_FD]) { 2562 err = dev_change_xdp_fd(dev, extack, 2563 nla_get_s32(xdp[IFLA_XDP_FD]), 2564 xdp_flags); 2565 if (err) 2566 goto errout; 2567 status |= DO_SETLINK_NOTIFY; 2568 } 2569 } 2570 2571 errout: 2572 if (status & DO_SETLINK_MODIFIED) { 2573 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) 2574 netdev_state_change(dev); 2575 2576 if (err < 0) 2577 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", 2578 dev->name); 2579 } 2580 2581 return err; 2582 } 2583 2584 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 2585 struct netlink_ext_ack *extack) 2586 { 2587 struct net *net = sock_net(skb->sk); 2588 struct ifinfomsg *ifm; 2589 struct net_device *dev; 2590 int err; 2591 struct nlattr *tb[IFLA_MAX+1]; 2592 char ifname[IFNAMSIZ]; 2593 2594 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, 2595 extack); 2596 if (err < 0) 2597 goto errout; 2598 2599 err = rtnl_ensure_unique_netns(tb, extack, false); 2600 if (err < 0) 2601 goto errout; 2602 2603 if (tb[IFLA_IFNAME]) 2604 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2605 else 2606 ifname[0] = '\0'; 2607 2608 err = -EINVAL; 2609 ifm = nlmsg_data(nlh); 2610 if (ifm->ifi_index > 0) 2611 dev = __dev_get_by_index(net, ifm->ifi_index); 2612 else if (tb[IFLA_IFNAME]) 2613 dev = __dev_get_by_name(net, ifname); 2614 else 2615 goto errout; 2616 2617 if (dev == NULL) { 2618 err = -ENODEV; 2619 goto errout; 2620 } 2621 2622 err = validate_linkmsg(dev, tb); 2623 if (err < 0) 2624 goto errout; 2625 2626 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0); 2627 errout: 2628 return err; 2629 } 2630 2631 static int rtnl_group_dellink(const struct net *net, int group) 2632 { 2633 struct net_device *dev, *aux; 2634 LIST_HEAD(list_kill); 2635 bool found = false; 2636 2637 if (!group) 2638 return -EPERM; 2639 2640 for_each_netdev(net, dev) { 2641 if (dev->group == group) { 2642 const struct rtnl_link_ops *ops; 2643 2644 found = true; 2645 ops = dev->rtnl_link_ops; 2646 if (!ops || !ops->dellink) 2647 return -EOPNOTSUPP; 2648 } 2649 } 2650 2651 if (!found) 2652 return -ENODEV; 2653 2654 for_each_netdev_safe(net, dev, aux) { 2655 if (dev->group == group) { 2656 const struct rtnl_link_ops *ops; 2657 2658 ops = dev->rtnl_link_ops; 2659 ops->dellink(dev, &list_kill); 2660 } 2661 } 2662 unregister_netdevice_many(&list_kill); 2663 2664 return 0; 2665 } 2666 2667 int rtnl_delete_link(struct net_device *dev) 2668 { 2669 const struct rtnl_link_ops *ops; 2670 LIST_HEAD(list_kill); 2671 2672 ops = dev->rtnl_link_ops; 2673 if (!ops || !ops->dellink) 2674 return -EOPNOTSUPP; 2675 2676 ops->dellink(dev, &list_kill); 2677 unregister_netdevice_many(&list_kill); 2678 2679 return 0; 2680 } 2681 EXPORT_SYMBOL_GPL(rtnl_delete_link); 2682 2683 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 2684 struct netlink_ext_ack *extack) 2685 { 2686 struct net *net = sock_net(skb->sk); 2687 struct net *tgt_net = net; 2688 struct net_device *dev = NULL; 2689 struct ifinfomsg *ifm; 2690 char ifname[IFNAMSIZ]; 2691 struct nlattr *tb[IFLA_MAX+1]; 2692 int err; 2693 int netnsid = -1; 2694 2695 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 2696 if (err < 0) 2697 return err; 2698 2699 err = rtnl_ensure_unique_netns(tb, extack, true); 2700 if (err < 0) 2701 return err; 2702 2703 if (tb[IFLA_IFNAME]) 2704 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2705 2706 if (tb[IFLA_IF_NETNSID]) { 2707 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 2708 tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid); 2709 if (IS_ERR(tgt_net)) 2710 return PTR_ERR(tgt_net); 2711 } 2712 2713 err = -EINVAL; 2714 ifm = nlmsg_data(nlh); 2715 if (ifm->ifi_index > 0) 2716 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 2717 else if (tb[IFLA_IFNAME]) 2718 dev = __dev_get_by_name(tgt_net, ifname); 2719 else if (tb[IFLA_GROUP]) 2720 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP])); 2721 else 2722 goto out; 2723 2724 if (!dev) { 2725 if (tb[IFLA_IFNAME] || ifm->ifi_index > 0) 2726 err = -ENODEV; 2727 2728 goto out; 2729 } 2730 2731 err = rtnl_delete_link(dev); 2732 2733 out: 2734 if (netnsid >= 0) 2735 put_net(tgt_net); 2736 2737 return err; 2738 } 2739 2740 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) 2741 { 2742 unsigned int old_flags; 2743 int err; 2744 2745 old_flags = dev->flags; 2746 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { 2747 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm)); 2748 if (err < 0) 2749 return err; 2750 } 2751 2752 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 2753 2754 __dev_notify_flags(dev, old_flags, ~0U); 2755 return 0; 2756 } 2757 EXPORT_SYMBOL(rtnl_configure_link); 2758 2759 struct net_device *rtnl_create_link(struct net *net, 2760 const char *ifname, unsigned char name_assign_type, 2761 const struct rtnl_link_ops *ops, struct nlattr *tb[]) 2762 { 2763 struct net_device *dev; 2764 unsigned int num_tx_queues = 1; 2765 unsigned int num_rx_queues = 1; 2766 2767 if (tb[IFLA_NUM_TX_QUEUES]) 2768 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); 2769 else if (ops->get_num_tx_queues) 2770 num_tx_queues = ops->get_num_tx_queues(); 2771 2772 if (tb[IFLA_NUM_RX_QUEUES]) 2773 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); 2774 else if (ops->get_num_rx_queues) 2775 num_rx_queues = ops->get_num_rx_queues(); 2776 2777 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type, 2778 ops->setup, num_tx_queues, num_rx_queues); 2779 if (!dev) 2780 return ERR_PTR(-ENOMEM); 2781 2782 dev_net_set(dev, net); 2783 dev->rtnl_link_ops = ops; 2784 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 2785 2786 if (tb[IFLA_MTU]) 2787 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 2788 if (tb[IFLA_ADDRESS]) { 2789 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), 2790 nla_len(tb[IFLA_ADDRESS])); 2791 dev->addr_assign_type = NET_ADDR_SET; 2792 } 2793 if (tb[IFLA_BROADCAST]) 2794 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), 2795 nla_len(tb[IFLA_BROADCAST])); 2796 if (tb[IFLA_TXQLEN]) 2797 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 2798 if (tb[IFLA_OPERSTATE]) 2799 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 2800 if (tb[IFLA_LINKMODE]) 2801 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 2802 if (tb[IFLA_GROUP]) 2803 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); 2804 if (tb[IFLA_GSO_MAX_SIZE]) 2805 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); 2806 if (tb[IFLA_GSO_MAX_SEGS]) 2807 dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); 2808 2809 return dev; 2810 } 2811 EXPORT_SYMBOL(rtnl_create_link); 2812 2813 static int rtnl_group_changelink(const struct sk_buff *skb, 2814 struct net *net, int group, 2815 struct ifinfomsg *ifm, 2816 struct netlink_ext_ack *extack, 2817 struct nlattr **tb) 2818 { 2819 struct net_device *dev, *aux; 2820 int err; 2821 2822 for_each_netdev_safe(net, dev, aux) { 2823 if (dev->group == group) { 2824 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0); 2825 if (err < 0) 2826 return err; 2827 } 2828 } 2829 2830 return 0; 2831 } 2832 2833 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 2834 struct netlink_ext_ack *extack) 2835 { 2836 struct net *net = sock_net(skb->sk); 2837 const struct rtnl_link_ops *ops; 2838 const struct rtnl_link_ops *m_ops = NULL; 2839 struct net_device *dev; 2840 struct net_device *master_dev = NULL; 2841 struct ifinfomsg *ifm; 2842 char kind[MODULE_NAME_LEN]; 2843 char ifname[IFNAMSIZ]; 2844 struct nlattr *tb[IFLA_MAX+1]; 2845 struct nlattr *linkinfo[IFLA_INFO_MAX+1]; 2846 unsigned char name_assign_type = NET_NAME_USER; 2847 int err; 2848 2849 #ifdef CONFIG_MODULES 2850 replay: 2851 #endif 2852 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 2853 if (err < 0) 2854 return err; 2855 2856 err = rtnl_ensure_unique_netns(tb, extack, false); 2857 if (err < 0) 2858 return err; 2859 2860 if (tb[IFLA_IFNAME]) 2861 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 2862 else 2863 ifname[0] = '\0'; 2864 2865 ifm = nlmsg_data(nlh); 2866 if (ifm->ifi_index > 0) 2867 dev = __dev_get_by_index(net, ifm->ifi_index); 2868 else { 2869 if (ifname[0]) 2870 dev = __dev_get_by_name(net, ifname); 2871 else 2872 dev = NULL; 2873 } 2874 2875 if (dev) { 2876 master_dev = netdev_master_upper_dev_get(dev); 2877 if (master_dev) 2878 m_ops = master_dev->rtnl_link_ops; 2879 } 2880 2881 err = validate_linkmsg(dev, tb); 2882 if (err < 0) 2883 return err; 2884 2885 if (tb[IFLA_LINKINFO]) { 2886 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX, 2887 tb[IFLA_LINKINFO], ifla_info_policy, 2888 NULL); 2889 if (err < 0) 2890 return err; 2891 } else 2892 memset(linkinfo, 0, sizeof(linkinfo)); 2893 2894 if (linkinfo[IFLA_INFO_KIND]) { 2895 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind)); 2896 ops = rtnl_link_ops_get(kind); 2897 } else { 2898 kind[0] = '\0'; 2899 ops = NULL; 2900 } 2901 2902 if (1) { 2903 struct nlattr *attr[ops ? ops->maxtype + 1 : 1]; 2904 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1]; 2905 struct nlattr **data = NULL; 2906 struct nlattr **slave_data = NULL; 2907 struct net *dest_net, *link_net = NULL; 2908 2909 if (ops) { 2910 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { 2911 err = nla_parse_nested(attr, ops->maxtype, 2912 linkinfo[IFLA_INFO_DATA], 2913 ops->policy, NULL); 2914 if (err < 0) 2915 return err; 2916 data = attr; 2917 } 2918 if (ops->validate) { 2919 err = ops->validate(tb, data, extack); 2920 if (err < 0) 2921 return err; 2922 } 2923 } 2924 2925 if (m_ops) { 2926 if (m_ops->slave_maxtype && 2927 linkinfo[IFLA_INFO_SLAVE_DATA]) { 2928 err = nla_parse_nested(slave_attr, 2929 m_ops->slave_maxtype, 2930 linkinfo[IFLA_INFO_SLAVE_DATA], 2931 m_ops->slave_policy, 2932 NULL); 2933 if (err < 0) 2934 return err; 2935 slave_data = slave_attr; 2936 } 2937 } 2938 2939 if (dev) { 2940 int status = 0; 2941 2942 if (nlh->nlmsg_flags & NLM_F_EXCL) 2943 return -EEXIST; 2944 if (nlh->nlmsg_flags & NLM_F_REPLACE) 2945 return -EOPNOTSUPP; 2946 2947 if (linkinfo[IFLA_INFO_DATA]) { 2948 if (!ops || ops != dev->rtnl_link_ops || 2949 !ops->changelink) 2950 return -EOPNOTSUPP; 2951 2952 err = ops->changelink(dev, tb, data, extack); 2953 if (err < 0) 2954 return err; 2955 status |= DO_SETLINK_NOTIFY; 2956 } 2957 2958 if (linkinfo[IFLA_INFO_SLAVE_DATA]) { 2959 if (!m_ops || !m_ops->slave_changelink) 2960 return -EOPNOTSUPP; 2961 2962 err = m_ops->slave_changelink(master_dev, dev, 2963 tb, slave_data, 2964 extack); 2965 if (err < 0) 2966 return err; 2967 status |= DO_SETLINK_NOTIFY; 2968 } 2969 2970 return do_setlink(skb, dev, ifm, extack, tb, ifname, 2971 status); 2972 } 2973 2974 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 2975 if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) 2976 return rtnl_group_changelink(skb, net, 2977 nla_get_u32(tb[IFLA_GROUP]), 2978 ifm, extack, tb); 2979 return -ENODEV; 2980 } 2981 2982 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO]) 2983 return -EOPNOTSUPP; 2984 2985 if (!ops) { 2986 #ifdef CONFIG_MODULES 2987 if (kind[0]) { 2988 __rtnl_unlock(); 2989 request_module("rtnl-link-%s", kind); 2990 rtnl_lock(); 2991 ops = rtnl_link_ops_get(kind); 2992 if (ops) 2993 goto replay; 2994 } 2995 #endif 2996 return -EOPNOTSUPP; 2997 } 2998 2999 if (!ops->setup) 3000 return -EOPNOTSUPP; 3001 3002 if (!ifname[0]) { 3003 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 3004 name_assign_type = NET_NAME_ENUM; 3005 } 3006 3007 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN); 3008 if (IS_ERR(dest_net)) 3009 return PTR_ERR(dest_net); 3010 3011 if (tb[IFLA_LINK_NETNSID]) { 3012 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); 3013 3014 link_net = get_net_ns_by_id(dest_net, id); 3015 if (!link_net) { 3016 err = -EINVAL; 3017 goto out; 3018 } 3019 err = -EPERM; 3020 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) 3021 goto out; 3022 } 3023 3024 dev = rtnl_create_link(link_net ? : dest_net, ifname, 3025 name_assign_type, ops, tb); 3026 if (IS_ERR(dev)) { 3027 err = PTR_ERR(dev); 3028 goto out; 3029 } 3030 3031 dev->ifindex = ifm->ifi_index; 3032 3033 if (ops->newlink) { 3034 err = ops->newlink(link_net ? : net, dev, tb, data, 3035 extack); 3036 /* Drivers should call free_netdev() in ->destructor 3037 * and unregister it on failure after registration 3038 * so that device could be finally freed in rtnl_unlock. 3039 */ 3040 if (err < 0) { 3041 /* If device is not registered at all, free it now */ 3042 if (dev->reg_state == NETREG_UNINITIALIZED) 3043 free_netdev(dev); 3044 goto out; 3045 } 3046 } else { 3047 err = register_netdevice(dev); 3048 if (err < 0) { 3049 free_netdev(dev); 3050 goto out; 3051 } 3052 } 3053 err = rtnl_configure_link(dev, ifm); 3054 if (err < 0) 3055 goto out_unregister; 3056 if (link_net) { 3057 err = dev_change_net_namespace(dev, dest_net, ifname); 3058 if (err < 0) 3059 goto out_unregister; 3060 } 3061 if (tb[IFLA_MASTER]) { 3062 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), 3063 extack); 3064 if (err) 3065 goto out_unregister; 3066 } 3067 out: 3068 if (link_net) 3069 put_net(link_net); 3070 put_net(dest_net); 3071 return err; 3072 out_unregister: 3073 if (ops->newlink) { 3074 LIST_HEAD(list_kill); 3075 3076 ops->dellink(dev, &list_kill); 3077 unregister_netdevice_many(&list_kill); 3078 } else { 3079 unregister_netdevice(dev); 3080 } 3081 goto out; 3082 } 3083 } 3084 3085 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3086 struct netlink_ext_ack *extack) 3087 { 3088 struct net *net = sock_net(skb->sk); 3089 struct net *tgt_net = net; 3090 struct ifinfomsg *ifm; 3091 char ifname[IFNAMSIZ]; 3092 struct nlattr *tb[IFLA_MAX+1]; 3093 struct net_device *dev = NULL; 3094 struct sk_buff *nskb; 3095 int netnsid = -1; 3096 int err; 3097 u32 ext_filter_mask = 0; 3098 3099 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); 3100 if (err < 0) 3101 return err; 3102 3103 err = rtnl_ensure_unique_netns(tb, extack, true); 3104 if (err < 0) 3105 return err; 3106 3107 if (tb[IFLA_IF_NETNSID]) { 3108 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 3109 tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid); 3110 if (IS_ERR(tgt_net)) 3111 return PTR_ERR(tgt_net); 3112 } 3113 3114 if (tb[IFLA_IFNAME]) 3115 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 3116 3117 if (tb[IFLA_EXT_MASK]) 3118 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3119 3120 err = -EINVAL; 3121 ifm = nlmsg_data(nlh); 3122 if (ifm->ifi_index > 0) 3123 dev = __dev_get_by_index(tgt_net, ifm->ifi_index); 3124 else if (tb[IFLA_IFNAME]) 3125 dev = __dev_get_by_name(tgt_net, ifname); 3126 else 3127 goto out; 3128 3129 err = -ENODEV; 3130 if (dev == NULL) 3131 goto out; 3132 3133 err = -ENOBUFS; 3134 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); 3135 if (nskb == NULL) 3136 goto out; 3137 3138 err = rtnl_fill_ifinfo(nskb, dev, net, 3139 RTM_NEWLINK, NETLINK_CB(skb).portid, 3140 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3141 0, NULL, 0, netnsid); 3142 if (err < 0) { 3143 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3144 WARN_ON(err == -EMSGSIZE); 3145 kfree_skb(nskb); 3146 } else 3147 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 3148 out: 3149 if (netnsid >= 0) 3150 put_net(tgt_net); 3151 3152 return err; 3153 } 3154 3155 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) 3156 { 3157 struct net *net = sock_net(skb->sk); 3158 struct net_device *dev; 3159 struct nlattr *tb[IFLA_MAX+1]; 3160 u32 ext_filter_mask = 0; 3161 u16 min_ifinfo_dump_size = 0; 3162 int hdrlen; 3163 3164 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ 3165 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? 3166 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); 3167 3168 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { 3169 if (tb[IFLA_EXT_MASK]) 3170 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 3171 } 3172 3173 if (!ext_filter_mask) 3174 return NLMSG_GOODSIZE; 3175 /* 3176 * traverse the list of net devices and compute the minimum 3177 * buffer size based upon the filter mask. 3178 */ 3179 rcu_read_lock(); 3180 for_each_netdev_rcu(net, dev) { 3181 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size, 3182 if_nlmsg_size(dev, 3183 ext_filter_mask)); 3184 } 3185 rcu_read_unlock(); 3186 3187 return nlmsg_total_size(min_ifinfo_dump_size); 3188 } 3189 3190 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 3191 { 3192 int idx; 3193 int s_idx = cb->family; 3194 3195 if (s_idx == 0) 3196 s_idx = 1; 3197 3198 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { 3199 struct rtnl_link **tab; 3200 int type = cb->nlh->nlmsg_type-RTM_BASE; 3201 struct rtnl_link *link; 3202 rtnl_dumpit_func dumpit; 3203 3204 if (idx < s_idx || idx == PF_PACKET) 3205 continue; 3206 3207 if (type < 0 || type >= RTM_NR_MSGTYPES) 3208 continue; 3209 3210 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]); 3211 if (!tab) 3212 continue; 3213 3214 link = tab[type]; 3215 if (!link) 3216 continue; 3217 3218 dumpit = link->dumpit; 3219 if (!dumpit) 3220 continue; 3221 3222 if (idx > s_idx) { 3223 memset(&cb->args[0], 0, sizeof(cb->args)); 3224 cb->prev_seq = 0; 3225 cb->seq = 0; 3226 } 3227 if (dumpit(skb, cb)) 3228 break; 3229 } 3230 cb->family = idx; 3231 3232 return skb->len; 3233 } 3234 3235 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 3236 unsigned int change, 3237 u32 event, gfp_t flags, int *new_nsid, 3238 int new_ifindex) 3239 { 3240 struct net *net = dev_net(dev); 3241 struct sk_buff *skb; 3242 int err = -ENOBUFS; 3243 size_t if_info_size; 3244 3245 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags); 3246 if (skb == NULL) 3247 goto errout; 3248 3249 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 3250 type, 0, 0, change, 0, 0, event, 3251 new_nsid, new_ifindex, -1); 3252 if (err < 0) { 3253 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 3254 WARN_ON(err == -EMSGSIZE); 3255 kfree_skb(skb); 3256 goto errout; 3257 } 3258 return skb; 3259 errout: 3260 if (err < 0) 3261 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 3262 return NULL; 3263 } 3264 3265 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags) 3266 { 3267 struct net *net = dev_net(dev); 3268 3269 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags); 3270 } 3271 3272 static void rtmsg_ifinfo_event(int type, struct net_device *dev, 3273 unsigned int change, u32 event, 3274 gfp_t flags, int *new_nsid, int new_ifindex) 3275 { 3276 struct sk_buff *skb; 3277 3278 if (dev->reg_state != NETREG_REGISTERED) 3279 return; 3280 3281 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid, 3282 new_ifindex); 3283 if (skb) 3284 rtmsg_ifinfo_send(skb, dev, flags); 3285 } 3286 3287 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, 3288 gfp_t flags) 3289 { 3290 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 3291 NULL, 0); 3292 } 3293 3294 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 3295 gfp_t flags, int *new_nsid, int new_ifindex) 3296 { 3297 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, 3298 new_nsid, new_ifindex); 3299 } 3300 3301 static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 3302 struct net_device *dev, 3303 u8 *addr, u16 vid, u32 pid, u32 seq, 3304 int type, unsigned int flags, 3305 int nlflags, u16 ndm_state) 3306 { 3307 struct nlmsghdr *nlh; 3308 struct ndmsg *ndm; 3309 3310 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); 3311 if (!nlh) 3312 return -EMSGSIZE; 3313 3314 ndm = nlmsg_data(nlh); 3315 ndm->ndm_family = AF_BRIDGE; 3316 ndm->ndm_pad1 = 0; 3317 ndm->ndm_pad2 = 0; 3318 ndm->ndm_flags = flags; 3319 ndm->ndm_type = 0; 3320 ndm->ndm_ifindex = dev->ifindex; 3321 ndm->ndm_state = ndm_state; 3322 3323 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) 3324 goto nla_put_failure; 3325 if (vid) 3326 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) 3327 goto nla_put_failure; 3328 3329 nlmsg_end(skb, nlh); 3330 return 0; 3331 3332 nla_put_failure: 3333 nlmsg_cancel(skb, nlh); 3334 return -EMSGSIZE; 3335 } 3336 3337 static inline size_t rtnl_fdb_nlmsg_size(void) 3338 { 3339 return NLMSG_ALIGN(sizeof(struct ndmsg)) + 3340 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ 3341 nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 3342 0; 3343 } 3344 3345 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 3346 u16 ndm_state) 3347 { 3348 struct net *net = dev_net(dev); 3349 struct sk_buff *skb; 3350 int err = -ENOBUFS; 3351 3352 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); 3353 if (!skb) 3354 goto errout; 3355 3356 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid, 3357 0, 0, type, NTF_SELF, 0, ndm_state); 3358 if (err < 0) { 3359 kfree_skb(skb); 3360 goto errout; 3361 } 3362 3363 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 3364 return; 3365 errout: 3366 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 3367 } 3368 3369 /** 3370 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry 3371 */ 3372 int ndo_dflt_fdb_add(struct ndmsg *ndm, 3373 struct nlattr *tb[], 3374 struct net_device *dev, 3375 const unsigned char *addr, u16 vid, 3376 u16 flags) 3377 { 3378 int err = -EINVAL; 3379 3380 /* If aging addresses are supported device will need to 3381 * implement its own handler for this. 3382 */ 3383 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 3384 pr_info("%s: FDB only supports static addresses\n", dev->name); 3385 return err; 3386 } 3387 3388 if (vid) { 3389 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 3390 return err; 3391 } 3392 3393 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 3394 err = dev_uc_add_excl(dev, addr); 3395 else if (is_multicast_ether_addr(addr)) 3396 err = dev_mc_add_excl(dev, addr); 3397 3398 /* Only return duplicate errors if NLM_F_EXCL is set */ 3399 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 3400 err = 0; 3401 3402 return err; 3403 } 3404 EXPORT_SYMBOL(ndo_dflt_fdb_add); 3405 3406 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid, 3407 struct netlink_ext_ack *extack) 3408 { 3409 u16 vid = 0; 3410 3411 if (vlan_attr) { 3412 if (nla_len(vlan_attr) != sizeof(u16)) { 3413 NL_SET_ERR_MSG(extack, "invalid vlan attribute size"); 3414 return -EINVAL; 3415 } 3416 3417 vid = nla_get_u16(vlan_attr); 3418 3419 if (!vid || vid >= VLAN_VID_MASK) { 3420 NL_SET_ERR_MSG(extack, "invalid vlan id"); 3421 return -EINVAL; 3422 } 3423 } 3424 *p_vid = vid; 3425 return 0; 3426 } 3427 3428 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 3429 struct netlink_ext_ack *extack) 3430 { 3431 struct net *net = sock_net(skb->sk); 3432 struct ndmsg *ndm; 3433 struct nlattr *tb[NDA_MAX+1]; 3434 struct net_device *dev; 3435 u8 *addr; 3436 u16 vid; 3437 int err; 3438 3439 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); 3440 if (err < 0) 3441 return err; 3442 3443 ndm = nlmsg_data(nlh); 3444 if (ndm->ndm_ifindex == 0) { 3445 NL_SET_ERR_MSG(extack, "invalid ifindex"); 3446 return -EINVAL; 3447 } 3448 3449 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 3450 if (dev == NULL) { 3451 NL_SET_ERR_MSG(extack, "unknown ifindex"); 3452 return -ENODEV; 3453 } 3454 3455 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 3456 NL_SET_ERR_MSG(extack, "invalid address"); 3457 return -EINVAL; 3458 } 3459 3460 addr = nla_data(tb[NDA_LLADDR]); 3461 3462 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 3463 if (err) 3464 return err; 3465 3466 err = -EOPNOTSUPP; 3467 3468 /* Support fdb on master device the net/bridge default case */ 3469 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 3470 (dev->priv_flags & IFF_BRIDGE_PORT)) { 3471 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3472 const struct net_device_ops *ops = br_dev->netdev_ops; 3473 3474 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, 3475 nlh->nlmsg_flags); 3476 if (err) 3477 goto out; 3478 else 3479 ndm->ndm_flags &= ~NTF_MASTER; 3480 } 3481 3482 /* Embedded bridge, macvlan, and any other device support */ 3483 if ((ndm->ndm_flags & NTF_SELF)) { 3484 if (dev->netdev_ops->ndo_fdb_add) 3485 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, 3486 vid, 3487 nlh->nlmsg_flags); 3488 else 3489 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, 3490 nlh->nlmsg_flags); 3491 3492 if (!err) { 3493 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH, 3494 ndm->ndm_state); 3495 ndm->ndm_flags &= ~NTF_SELF; 3496 } 3497 } 3498 out: 3499 return err; 3500 } 3501 3502 /** 3503 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry 3504 */ 3505 int ndo_dflt_fdb_del(struct ndmsg *ndm, 3506 struct nlattr *tb[], 3507 struct net_device *dev, 3508 const unsigned char *addr, u16 vid) 3509 { 3510 int err = -EINVAL; 3511 3512 /* If aging addresses are supported device will need to 3513 * implement its own handler for this. 3514 */ 3515 if (!(ndm->ndm_state & NUD_PERMANENT)) { 3516 pr_info("%s: FDB only supports static addresses\n", dev->name); 3517 return err; 3518 } 3519 3520 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 3521 err = dev_uc_del(dev, addr); 3522 else if (is_multicast_ether_addr(addr)) 3523 err = dev_mc_del(dev, addr); 3524 3525 return err; 3526 } 3527 EXPORT_SYMBOL(ndo_dflt_fdb_del); 3528 3529 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 3530 struct netlink_ext_ack *extack) 3531 { 3532 struct net *net = sock_net(skb->sk); 3533 struct ndmsg *ndm; 3534 struct nlattr *tb[NDA_MAX+1]; 3535 struct net_device *dev; 3536 int err = -EINVAL; 3537 __u8 *addr; 3538 u16 vid; 3539 3540 if (!netlink_capable(skb, CAP_NET_ADMIN)) 3541 return -EPERM; 3542 3543 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); 3544 if (err < 0) 3545 return err; 3546 3547 ndm = nlmsg_data(nlh); 3548 if (ndm->ndm_ifindex == 0) { 3549 NL_SET_ERR_MSG(extack, "invalid ifindex"); 3550 return -EINVAL; 3551 } 3552 3553 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 3554 if (dev == NULL) { 3555 NL_SET_ERR_MSG(extack, "unknown ifindex"); 3556 return -ENODEV; 3557 } 3558 3559 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { 3560 NL_SET_ERR_MSG(extack, "invalid address"); 3561 return -EINVAL; 3562 } 3563 3564 addr = nla_data(tb[NDA_LLADDR]); 3565 3566 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); 3567 if (err) 3568 return err; 3569 3570 err = -EOPNOTSUPP; 3571 3572 /* Support fdb on master device the net/bridge default case */ 3573 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 3574 (dev->priv_flags & IFF_BRIDGE_PORT)) { 3575 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3576 const struct net_device_ops *ops = br_dev->netdev_ops; 3577 3578 if (ops->ndo_fdb_del) 3579 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid); 3580 3581 if (err) 3582 goto out; 3583 else 3584 ndm->ndm_flags &= ~NTF_MASTER; 3585 } 3586 3587 /* Embedded bridge, macvlan, and any other device support */ 3588 if (ndm->ndm_flags & NTF_SELF) { 3589 if (dev->netdev_ops->ndo_fdb_del) 3590 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr, 3591 vid); 3592 else 3593 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); 3594 3595 if (!err) { 3596 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, 3597 ndm->ndm_state); 3598 ndm->ndm_flags &= ~NTF_SELF; 3599 } 3600 } 3601 out: 3602 return err; 3603 } 3604 3605 static int nlmsg_populate_fdb(struct sk_buff *skb, 3606 struct netlink_callback *cb, 3607 struct net_device *dev, 3608 int *idx, 3609 struct netdev_hw_addr_list *list) 3610 { 3611 struct netdev_hw_addr *ha; 3612 int err; 3613 u32 portid, seq; 3614 3615 portid = NETLINK_CB(cb->skb).portid; 3616 seq = cb->nlh->nlmsg_seq; 3617 3618 list_for_each_entry(ha, &list->list, list) { 3619 if (*idx < cb->args[2]) 3620 goto skip; 3621 3622 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0, 3623 portid, seq, 3624 RTM_NEWNEIGH, NTF_SELF, 3625 NLM_F_MULTI, NUD_PERMANENT); 3626 if (err < 0) 3627 return err; 3628 skip: 3629 *idx += 1; 3630 } 3631 return 0; 3632 } 3633 3634 /** 3635 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table. 3636 * @nlh: netlink message header 3637 * @dev: netdevice 3638 * 3639 * Default netdevice operation to dump the existing unicast address list. 3640 * Returns number of addresses from list put in skb. 3641 */ 3642 int ndo_dflt_fdb_dump(struct sk_buff *skb, 3643 struct netlink_callback *cb, 3644 struct net_device *dev, 3645 struct net_device *filter_dev, 3646 int *idx) 3647 { 3648 int err; 3649 3650 netif_addr_lock_bh(dev); 3651 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 3652 if (err) 3653 goto out; 3654 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc); 3655 out: 3656 netif_addr_unlock_bh(dev); 3657 return err; 3658 } 3659 EXPORT_SYMBOL(ndo_dflt_fdb_dump); 3660 3661 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 3662 { 3663 struct net_device *dev; 3664 struct nlattr *tb[IFLA_MAX+1]; 3665 struct net_device *br_dev = NULL; 3666 const struct net_device_ops *ops = NULL; 3667 const struct net_device_ops *cops = NULL; 3668 struct ifinfomsg *ifm = nlmsg_data(cb->nlh); 3669 struct net *net = sock_net(skb->sk); 3670 struct hlist_head *head; 3671 int brport_idx = 0; 3672 int br_idx = 0; 3673 int h, s_h; 3674 int idx = 0, s_idx; 3675 int err = 0; 3676 int fidx = 0; 3677 3678 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, 3679 IFLA_MAX, ifla_policy, NULL); 3680 if (err < 0) { 3681 return -EINVAL; 3682 } else if (err == 0) { 3683 if (tb[IFLA_MASTER]) 3684 br_idx = nla_get_u32(tb[IFLA_MASTER]); 3685 } 3686 3687 brport_idx = ifm->ifi_index; 3688 3689 if (br_idx) { 3690 br_dev = __dev_get_by_index(net, br_idx); 3691 if (!br_dev) 3692 return -ENODEV; 3693 3694 ops = br_dev->netdev_ops; 3695 } 3696 3697 s_h = cb->args[0]; 3698 s_idx = cb->args[1]; 3699 3700 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 3701 idx = 0; 3702 head = &net->dev_index_head[h]; 3703 hlist_for_each_entry(dev, head, index_hlist) { 3704 3705 if (brport_idx && (dev->ifindex != brport_idx)) 3706 continue; 3707 3708 if (!br_idx) { /* user did not specify a specific bridge */ 3709 if (dev->priv_flags & IFF_BRIDGE_PORT) { 3710 br_dev = netdev_master_upper_dev_get(dev); 3711 cops = br_dev->netdev_ops; 3712 } 3713 } else { 3714 if (dev != br_dev && 3715 !(dev->priv_flags & IFF_BRIDGE_PORT)) 3716 continue; 3717 3718 if (br_dev != netdev_master_upper_dev_get(dev) && 3719 !(dev->priv_flags & IFF_EBRIDGE)) 3720 continue; 3721 cops = ops; 3722 } 3723 3724 if (idx < s_idx) 3725 goto cont; 3726 3727 if (dev->priv_flags & IFF_BRIDGE_PORT) { 3728 if (cops && cops->ndo_fdb_dump) { 3729 err = cops->ndo_fdb_dump(skb, cb, 3730 br_dev, dev, 3731 &fidx); 3732 if (err == -EMSGSIZE) 3733 goto out; 3734 } 3735 } 3736 3737 if (dev->netdev_ops->ndo_fdb_dump) 3738 err = dev->netdev_ops->ndo_fdb_dump(skb, cb, 3739 dev, NULL, 3740 &fidx); 3741 else 3742 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, 3743 &fidx); 3744 if (err == -EMSGSIZE) 3745 goto out; 3746 3747 cops = NULL; 3748 3749 /* reset fdb offset to 0 for rest of the interfaces */ 3750 cb->args[2] = 0; 3751 fidx = 0; 3752 cont: 3753 idx++; 3754 } 3755 } 3756 3757 out: 3758 cb->args[0] = h; 3759 cb->args[1] = idx; 3760 cb->args[2] = fidx; 3761 3762 return skb->len; 3763 } 3764 3765 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, 3766 unsigned int attrnum, unsigned int flag) 3767 { 3768 if (mask & flag) 3769 return nla_put_u8(skb, attrnum, !!(flags & flag)); 3770 return 0; 3771 } 3772 3773 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 3774 struct net_device *dev, u16 mode, 3775 u32 flags, u32 mask, int nlflags, 3776 u32 filter_mask, 3777 int (*vlan_fill)(struct sk_buff *skb, 3778 struct net_device *dev, 3779 u32 filter_mask)) 3780 { 3781 struct nlmsghdr *nlh; 3782 struct ifinfomsg *ifm; 3783 struct nlattr *br_afspec; 3784 struct nlattr *protinfo; 3785 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 3786 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3787 int err = 0; 3788 3789 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); 3790 if (nlh == NULL) 3791 return -EMSGSIZE; 3792 3793 ifm = nlmsg_data(nlh); 3794 ifm->ifi_family = AF_BRIDGE; 3795 ifm->__ifi_pad = 0; 3796 ifm->ifi_type = dev->type; 3797 ifm->ifi_index = dev->ifindex; 3798 ifm->ifi_flags = dev_get_flags(dev); 3799 ifm->ifi_change = 0; 3800 3801 3802 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 3803 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 3804 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 3805 (br_dev && 3806 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 3807 (dev->addr_len && 3808 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 3809 (dev->ifindex != dev_get_iflink(dev) && 3810 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 3811 goto nla_put_failure; 3812 3813 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC); 3814 if (!br_afspec) 3815 goto nla_put_failure; 3816 3817 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) { 3818 nla_nest_cancel(skb, br_afspec); 3819 goto nla_put_failure; 3820 } 3821 3822 if (mode != BRIDGE_MODE_UNDEF) { 3823 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { 3824 nla_nest_cancel(skb, br_afspec); 3825 goto nla_put_failure; 3826 } 3827 } 3828 if (vlan_fill) { 3829 err = vlan_fill(skb, dev, filter_mask); 3830 if (err) { 3831 nla_nest_cancel(skb, br_afspec); 3832 goto nla_put_failure; 3833 } 3834 } 3835 nla_nest_end(skb, br_afspec); 3836 3837 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); 3838 if (!protinfo) 3839 goto nla_put_failure; 3840 3841 if (brport_nla_put_flag(skb, flags, mask, 3842 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) || 3843 brport_nla_put_flag(skb, flags, mask, 3844 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) || 3845 brport_nla_put_flag(skb, flags, mask, 3846 IFLA_BRPORT_FAST_LEAVE, 3847 BR_MULTICAST_FAST_LEAVE) || 3848 brport_nla_put_flag(skb, flags, mask, 3849 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) || 3850 brport_nla_put_flag(skb, flags, mask, 3851 IFLA_BRPORT_LEARNING, BR_LEARNING) || 3852 brport_nla_put_flag(skb, flags, mask, 3853 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) || 3854 brport_nla_put_flag(skb, flags, mask, 3855 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) || 3856 brport_nla_put_flag(skb, flags, mask, 3857 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) { 3858 nla_nest_cancel(skb, protinfo); 3859 goto nla_put_failure; 3860 } 3861 3862 nla_nest_end(skb, protinfo); 3863 3864 nlmsg_end(skb, nlh); 3865 return 0; 3866 nla_put_failure: 3867 nlmsg_cancel(skb, nlh); 3868 return err ? err : -EMSGSIZE; 3869 } 3870 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink); 3871 3872 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 3873 { 3874 struct net *net = sock_net(skb->sk); 3875 struct net_device *dev; 3876 int idx = 0; 3877 u32 portid = NETLINK_CB(cb->skb).portid; 3878 u32 seq = cb->nlh->nlmsg_seq; 3879 u32 filter_mask = 0; 3880 int err; 3881 3882 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { 3883 struct nlattr *extfilt; 3884 3885 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), 3886 IFLA_EXT_MASK); 3887 if (extfilt) { 3888 if (nla_len(extfilt) < sizeof(filter_mask)) 3889 return -EINVAL; 3890 3891 filter_mask = nla_get_u32(extfilt); 3892 } 3893 } 3894 3895 rcu_read_lock(); 3896 for_each_netdev_rcu(net, dev) { 3897 const struct net_device_ops *ops = dev->netdev_ops; 3898 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3899 3900 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 3901 if (idx >= cb->args[0]) { 3902 err = br_dev->netdev_ops->ndo_bridge_getlink( 3903 skb, portid, seq, dev, 3904 filter_mask, NLM_F_MULTI); 3905 if (err < 0 && err != -EOPNOTSUPP) { 3906 if (likely(skb->len)) 3907 break; 3908 3909 goto out_err; 3910 } 3911 } 3912 idx++; 3913 } 3914 3915 if (ops->ndo_bridge_getlink) { 3916 if (idx >= cb->args[0]) { 3917 err = ops->ndo_bridge_getlink(skb, portid, 3918 seq, dev, 3919 filter_mask, 3920 NLM_F_MULTI); 3921 if (err < 0 && err != -EOPNOTSUPP) { 3922 if (likely(skb->len)) 3923 break; 3924 3925 goto out_err; 3926 } 3927 } 3928 idx++; 3929 } 3930 } 3931 err = skb->len; 3932 out_err: 3933 rcu_read_unlock(); 3934 cb->args[0] = idx; 3935 3936 return err; 3937 } 3938 3939 static inline size_t bridge_nlmsg_size(void) 3940 { 3941 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 3942 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 3943 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 3944 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ 3945 + nla_total_size(sizeof(u32)) /* IFLA_MTU */ 3946 + nla_total_size(sizeof(u32)) /* IFLA_LINK */ 3947 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ 3948 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ 3949 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ 3950 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ 3951 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ 3952 } 3953 3954 static int rtnl_bridge_notify(struct net_device *dev) 3955 { 3956 struct net *net = dev_net(dev); 3957 struct sk_buff *skb; 3958 int err = -EOPNOTSUPP; 3959 3960 if (!dev->netdev_ops->ndo_bridge_getlink) 3961 return 0; 3962 3963 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); 3964 if (!skb) { 3965 err = -ENOMEM; 3966 goto errout; 3967 } 3968 3969 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); 3970 if (err < 0) 3971 goto errout; 3972 3973 if (!skb->len) 3974 goto errout; 3975 3976 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 3977 return 0; 3978 errout: 3979 WARN_ON(err == -EMSGSIZE); 3980 kfree_skb(skb); 3981 if (err) 3982 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 3983 return err; 3984 } 3985 3986 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 3987 struct netlink_ext_ack *extack) 3988 { 3989 struct net *net = sock_net(skb->sk); 3990 struct ifinfomsg *ifm; 3991 struct net_device *dev; 3992 struct nlattr *br_spec, *attr = NULL; 3993 int rem, err = -EOPNOTSUPP; 3994 u16 flags = 0; 3995 bool have_flags = false; 3996 3997 if (nlmsg_len(nlh) < sizeof(*ifm)) 3998 return -EINVAL; 3999 4000 ifm = nlmsg_data(nlh); 4001 if (ifm->ifi_family != AF_BRIDGE) 4002 return -EPFNOSUPPORT; 4003 4004 dev = __dev_get_by_index(net, ifm->ifi_index); 4005 if (!dev) { 4006 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4007 return -ENODEV; 4008 } 4009 4010 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4011 if (br_spec) { 4012 nla_for_each_nested(attr, br_spec, rem) { 4013 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 4014 if (nla_len(attr) < sizeof(flags)) 4015 return -EINVAL; 4016 4017 have_flags = true; 4018 flags = nla_get_u16(attr); 4019 break; 4020 } 4021 } 4022 } 4023 4024 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 4025 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4026 4027 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) { 4028 err = -EOPNOTSUPP; 4029 goto out; 4030 } 4031 4032 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags); 4033 if (err) 4034 goto out; 4035 4036 flags &= ~BRIDGE_FLAGS_MASTER; 4037 } 4038 4039 if ((flags & BRIDGE_FLAGS_SELF)) { 4040 if (!dev->netdev_ops->ndo_bridge_setlink) 4041 err = -EOPNOTSUPP; 4042 else 4043 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh, 4044 flags); 4045 if (!err) { 4046 flags &= ~BRIDGE_FLAGS_SELF; 4047 4048 /* Generate event to notify upper layer of bridge 4049 * change 4050 */ 4051 err = rtnl_bridge_notify(dev); 4052 } 4053 } 4054 4055 if (have_flags) 4056 memcpy(nla_data(attr), &flags, sizeof(flags)); 4057 out: 4058 return err; 4059 } 4060 4061 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 4062 struct netlink_ext_ack *extack) 4063 { 4064 struct net *net = sock_net(skb->sk); 4065 struct ifinfomsg *ifm; 4066 struct net_device *dev; 4067 struct nlattr *br_spec, *attr = NULL; 4068 int rem, err = -EOPNOTSUPP; 4069 u16 flags = 0; 4070 bool have_flags = false; 4071 4072 if (nlmsg_len(nlh) < sizeof(*ifm)) 4073 return -EINVAL; 4074 4075 ifm = nlmsg_data(nlh); 4076 if (ifm->ifi_family != AF_BRIDGE) 4077 return -EPFNOSUPPORT; 4078 4079 dev = __dev_get_by_index(net, ifm->ifi_index); 4080 if (!dev) { 4081 NL_SET_ERR_MSG(extack, "unknown ifindex"); 4082 return -ENODEV; 4083 } 4084 4085 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4086 if (br_spec) { 4087 nla_for_each_nested(attr, br_spec, rem) { 4088 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 4089 if (nla_len(attr) < sizeof(flags)) 4090 return -EINVAL; 4091 4092 have_flags = true; 4093 flags = nla_get_u16(attr); 4094 break; 4095 } 4096 } 4097 } 4098 4099 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { 4100 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 4101 4102 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) { 4103 err = -EOPNOTSUPP; 4104 goto out; 4105 } 4106 4107 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags); 4108 if (err) 4109 goto out; 4110 4111 flags &= ~BRIDGE_FLAGS_MASTER; 4112 } 4113 4114 if ((flags & BRIDGE_FLAGS_SELF)) { 4115 if (!dev->netdev_ops->ndo_bridge_dellink) 4116 err = -EOPNOTSUPP; 4117 else 4118 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh, 4119 flags); 4120 4121 if (!err) { 4122 flags &= ~BRIDGE_FLAGS_SELF; 4123 4124 /* Generate event to notify upper layer of bridge 4125 * change 4126 */ 4127 err = rtnl_bridge_notify(dev); 4128 } 4129 } 4130 4131 if (have_flags) 4132 memcpy(nla_data(attr), &flags, sizeof(flags)); 4133 out: 4134 return err; 4135 } 4136 4137 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr) 4138 { 4139 return (mask & IFLA_STATS_FILTER_BIT(attrid)) && 4140 (!idxattr || idxattr == attrid); 4141 } 4142 4143 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1) 4144 static int rtnl_get_offload_stats_attr_size(int attr_id) 4145 { 4146 switch (attr_id) { 4147 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 4148 return sizeof(struct rtnl_link_stats64); 4149 } 4150 4151 return 0; 4152 } 4153 4154 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev, 4155 int *prividx) 4156 { 4157 struct nlattr *attr = NULL; 4158 int attr_id, size; 4159 void *attr_data; 4160 int err; 4161 4162 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats && 4163 dev->netdev_ops->ndo_get_offload_stats)) 4164 return -ENODATA; 4165 4166 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST; 4167 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) { 4168 if (attr_id < *prividx) 4169 continue; 4170 4171 size = rtnl_get_offload_stats_attr_size(attr_id); 4172 if (!size) 4173 continue; 4174 4175 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id)) 4176 continue; 4177 4178 attr = nla_reserve_64bit(skb, attr_id, size, 4179 IFLA_OFFLOAD_XSTATS_UNSPEC); 4180 if (!attr) 4181 goto nla_put_failure; 4182 4183 attr_data = nla_data(attr); 4184 memset(attr_data, 0, size); 4185 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, 4186 attr_data); 4187 if (err) 4188 goto get_offload_stats_failure; 4189 } 4190 4191 if (!attr) 4192 return -ENODATA; 4193 4194 *prividx = 0; 4195 return 0; 4196 4197 nla_put_failure: 4198 err = -EMSGSIZE; 4199 get_offload_stats_failure: 4200 *prividx = attr_id; 4201 return err; 4202 } 4203 4204 static int rtnl_get_offload_stats_size(const struct net_device *dev) 4205 { 4206 int nla_size = 0; 4207 int attr_id; 4208 int size; 4209 4210 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats && 4211 dev->netdev_ops->ndo_get_offload_stats)) 4212 return 0; 4213 4214 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST; 4215 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) { 4216 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id)) 4217 continue; 4218 size = rtnl_get_offload_stats_attr_size(attr_id); 4219 nla_size += nla_total_size_64bit(size); 4220 } 4221 4222 if (nla_size != 0) 4223 nla_size += nla_total_size(0); 4224 4225 return nla_size; 4226 } 4227 4228 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, 4229 int type, u32 pid, u32 seq, u32 change, 4230 unsigned int flags, unsigned int filter_mask, 4231 int *idxattr, int *prividx) 4232 { 4233 struct if_stats_msg *ifsm; 4234 struct nlmsghdr *nlh; 4235 struct nlattr *attr; 4236 int s_prividx = *prividx; 4237 int err; 4238 4239 ASSERT_RTNL(); 4240 4241 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags); 4242 if (!nlh) 4243 return -EMSGSIZE; 4244 4245 ifsm = nlmsg_data(nlh); 4246 ifsm->family = PF_UNSPEC; 4247 ifsm->pad1 = 0; 4248 ifsm->pad2 = 0; 4249 ifsm->ifindex = dev->ifindex; 4250 ifsm->filter_mask = filter_mask; 4251 4252 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) { 4253 struct rtnl_link_stats64 *sp; 4254 4255 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64, 4256 sizeof(struct rtnl_link_stats64), 4257 IFLA_STATS_UNSPEC); 4258 if (!attr) 4259 goto nla_put_failure; 4260 4261 sp = nla_data(attr); 4262 dev_get_stats(dev, sp); 4263 } 4264 4265 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) { 4266 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 4267 4268 if (ops && ops->fill_linkxstats) { 4269 *idxattr = IFLA_STATS_LINK_XSTATS; 4270 attr = nla_nest_start(skb, 4271 IFLA_STATS_LINK_XSTATS); 4272 if (!attr) 4273 goto nla_put_failure; 4274 4275 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 4276 nla_nest_end(skb, attr); 4277 if (err) 4278 goto nla_put_failure; 4279 *idxattr = 0; 4280 } 4281 } 4282 4283 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 4284 *idxattr)) { 4285 const struct rtnl_link_ops *ops = NULL; 4286 const struct net_device *master; 4287 4288 master = netdev_master_upper_dev_get(dev); 4289 if (master) 4290 ops = master->rtnl_link_ops; 4291 if (ops && ops->fill_linkxstats) { 4292 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE; 4293 attr = nla_nest_start(skb, 4294 IFLA_STATS_LINK_XSTATS_SLAVE); 4295 if (!attr) 4296 goto nla_put_failure; 4297 4298 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr); 4299 nla_nest_end(skb, attr); 4300 if (err) 4301 goto nla_put_failure; 4302 *idxattr = 0; 4303 } 4304 } 4305 4306 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 4307 *idxattr)) { 4308 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; 4309 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS); 4310 if (!attr) 4311 goto nla_put_failure; 4312 4313 err = rtnl_get_offload_stats(skb, dev, prividx); 4314 if (err == -ENODATA) 4315 nla_nest_cancel(skb, attr); 4316 else 4317 nla_nest_end(skb, attr); 4318 4319 if (err && err != -ENODATA) 4320 goto nla_put_failure; 4321 *idxattr = 0; 4322 } 4323 4324 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) { 4325 struct rtnl_af_ops *af_ops; 4326 4327 *idxattr = IFLA_STATS_AF_SPEC; 4328 attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC); 4329 if (!attr) 4330 goto nla_put_failure; 4331 4332 rcu_read_lock(); 4333 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 4334 if (af_ops->fill_stats_af) { 4335 struct nlattr *af; 4336 int err; 4337 4338 af = nla_nest_start(skb, af_ops->family); 4339 if (!af) { 4340 rcu_read_unlock(); 4341 goto nla_put_failure; 4342 } 4343 err = af_ops->fill_stats_af(skb, dev); 4344 4345 if (err == -ENODATA) { 4346 nla_nest_cancel(skb, af); 4347 } else if (err < 0) { 4348 rcu_read_unlock(); 4349 goto nla_put_failure; 4350 } 4351 4352 nla_nest_end(skb, af); 4353 } 4354 } 4355 rcu_read_unlock(); 4356 4357 nla_nest_end(skb, attr); 4358 4359 *idxattr = 0; 4360 } 4361 4362 nlmsg_end(skb, nlh); 4363 4364 return 0; 4365 4366 nla_put_failure: 4367 /* not a multi message or no progress mean a real error */ 4368 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx) 4369 nlmsg_cancel(skb, nlh); 4370 else 4371 nlmsg_end(skb, nlh); 4372 4373 return -EMSGSIZE; 4374 } 4375 4376 static size_t if_nlmsg_stats_size(const struct net_device *dev, 4377 u32 filter_mask) 4378 { 4379 size_t size = 0; 4380 4381 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 4382 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); 4383 4384 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 4385 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 4386 int attr = IFLA_STATS_LINK_XSTATS; 4387 4388 if (ops && ops->get_linkxstats_size) { 4389 size += nla_total_size(ops->get_linkxstats_size(dev, 4390 attr)); 4391 /* for IFLA_STATS_LINK_XSTATS */ 4392 size += nla_total_size(0); 4393 } 4394 } 4395 4396 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) { 4397 struct net_device *_dev = (struct net_device *)dev; 4398 const struct rtnl_link_ops *ops = NULL; 4399 const struct net_device *master; 4400 4401 /* netdev_master_upper_dev_get can't take const */ 4402 master = netdev_master_upper_dev_get(_dev); 4403 if (master) 4404 ops = master->rtnl_link_ops; 4405 if (ops && ops->get_linkxstats_size) { 4406 int attr = IFLA_STATS_LINK_XSTATS_SLAVE; 4407 4408 size += nla_total_size(ops->get_linkxstats_size(dev, 4409 attr)); 4410 /* for IFLA_STATS_LINK_XSTATS_SLAVE */ 4411 size += nla_total_size(0); 4412 } 4413 } 4414 4415 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) 4416 size += rtnl_get_offload_stats_size(dev); 4417 4418 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { 4419 struct rtnl_af_ops *af_ops; 4420 4421 /* for IFLA_STATS_AF_SPEC */ 4422 size += nla_total_size(0); 4423 4424 rcu_read_lock(); 4425 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) { 4426 if (af_ops->get_stats_af_size) { 4427 size += nla_total_size( 4428 af_ops->get_stats_af_size(dev)); 4429 4430 /* for AF_* */ 4431 size += nla_total_size(0); 4432 } 4433 } 4434 rcu_read_unlock(); 4435 } 4436 4437 return size; 4438 } 4439 4440 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, 4441 struct netlink_ext_ack *extack) 4442 { 4443 struct net *net = sock_net(skb->sk); 4444 struct net_device *dev = NULL; 4445 int idxattr = 0, prividx = 0; 4446 struct if_stats_msg *ifsm; 4447 struct sk_buff *nskb; 4448 u32 filter_mask; 4449 int err; 4450 4451 if (nlmsg_len(nlh) < sizeof(*ifsm)) 4452 return -EINVAL; 4453 4454 ifsm = nlmsg_data(nlh); 4455 if (ifsm->ifindex > 0) 4456 dev = __dev_get_by_index(net, ifsm->ifindex); 4457 else 4458 return -EINVAL; 4459 4460 if (!dev) 4461 return -ENODEV; 4462 4463 filter_mask = ifsm->filter_mask; 4464 if (!filter_mask) 4465 return -EINVAL; 4466 4467 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL); 4468 if (!nskb) 4469 return -ENOBUFS; 4470 4471 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, 4472 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 4473 0, filter_mask, &idxattr, &prividx); 4474 if (err < 0) { 4475 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ 4476 WARN_ON(err == -EMSGSIZE); 4477 kfree_skb(nskb); 4478 } else { 4479 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); 4480 } 4481 4482 return err; 4483 } 4484 4485 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) 4486 { 4487 int h, s_h, err, s_idx, s_idxattr, s_prividx; 4488 struct net *net = sock_net(skb->sk); 4489 unsigned int flags = NLM_F_MULTI; 4490 struct if_stats_msg *ifsm; 4491 struct hlist_head *head; 4492 struct net_device *dev; 4493 u32 filter_mask = 0; 4494 int idx = 0; 4495 4496 s_h = cb->args[0]; 4497 s_idx = cb->args[1]; 4498 s_idxattr = cb->args[2]; 4499 s_prividx = cb->args[3]; 4500 4501 cb->seq = net->dev_base_seq; 4502 4503 if (nlmsg_len(cb->nlh) < sizeof(*ifsm)) 4504 return -EINVAL; 4505 4506 ifsm = nlmsg_data(cb->nlh); 4507 filter_mask = ifsm->filter_mask; 4508 if (!filter_mask) 4509 return -EINVAL; 4510 4511 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 4512 idx = 0; 4513 head = &net->dev_index_head[h]; 4514 hlist_for_each_entry(dev, head, index_hlist) { 4515 if (idx < s_idx) 4516 goto cont; 4517 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 4518 NETLINK_CB(cb->skb).portid, 4519 cb->nlh->nlmsg_seq, 0, 4520 flags, filter_mask, 4521 &s_idxattr, &s_prividx); 4522 /* If we ran out of room on the first message, 4523 * we're in trouble 4524 */ 4525 WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); 4526 4527 if (err < 0) 4528 goto out; 4529 s_prividx = 0; 4530 s_idxattr = 0; 4531 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 4532 cont: 4533 idx++; 4534 } 4535 } 4536 out: 4537 cb->args[3] = s_prividx; 4538 cb->args[2] = s_idxattr; 4539 cb->args[1] = idx; 4540 cb->args[0] = h; 4541 4542 return skb->len; 4543 } 4544 4545 /* Process one rtnetlink message. */ 4546 4547 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 4548 struct netlink_ext_ack *extack) 4549 { 4550 struct net *net = sock_net(skb->sk); 4551 struct rtnl_link *link; 4552 struct module *owner; 4553 int err = -EOPNOTSUPP; 4554 rtnl_doit_func doit; 4555 unsigned int flags; 4556 int kind; 4557 int family; 4558 int type; 4559 4560 type = nlh->nlmsg_type; 4561 if (type > RTM_MAX) 4562 return -EOPNOTSUPP; 4563 4564 type -= RTM_BASE; 4565 4566 /* All the messages must have at least 1 byte length */ 4567 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) 4568 return 0; 4569 4570 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 4571 kind = type&3; 4572 4573 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN)) 4574 return -EPERM; 4575 4576 rcu_read_lock(); 4577 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 4578 struct sock *rtnl; 4579 rtnl_dumpit_func dumpit; 4580 u16 min_dump_alloc = 0; 4581 4582 link = rtnl_get_link(family, type); 4583 if (!link || !link->dumpit) { 4584 family = PF_UNSPEC; 4585 link = rtnl_get_link(family, type); 4586 if (!link || !link->dumpit) 4587 goto err_unlock; 4588 } 4589 owner = link->owner; 4590 dumpit = link->dumpit; 4591 4592 if (type == RTM_GETLINK - RTM_BASE) 4593 min_dump_alloc = rtnl_calcit(skb, nlh); 4594 4595 err = 0; 4596 /* need to do this before rcu_read_unlock() */ 4597 if (!try_module_get(owner)) 4598 err = -EPROTONOSUPPORT; 4599 4600 rcu_read_unlock(); 4601 4602 rtnl = net->rtnl; 4603 if (err == 0) { 4604 struct netlink_dump_control c = { 4605 .dump = dumpit, 4606 .min_dump_alloc = min_dump_alloc, 4607 .module = owner, 4608 }; 4609 err = netlink_dump_start(rtnl, skb, nlh, &c); 4610 /* netlink_dump_start() will keep a reference on 4611 * module if dump is still in progress. 4612 */ 4613 module_put(owner); 4614 } 4615 return err; 4616 } 4617 4618 link = rtnl_get_link(family, type); 4619 if (!link || !link->doit) { 4620 family = PF_UNSPEC; 4621 link = rtnl_get_link(PF_UNSPEC, type); 4622 if (!link || !link->doit) 4623 goto out_unlock; 4624 } 4625 4626 owner = link->owner; 4627 if (!try_module_get(owner)) { 4628 err = -EPROTONOSUPPORT; 4629 goto out_unlock; 4630 } 4631 4632 flags = link->flags; 4633 if (flags & RTNL_FLAG_DOIT_UNLOCKED) { 4634 doit = link->doit; 4635 rcu_read_unlock(); 4636 if (doit) 4637 err = doit(skb, nlh, extack); 4638 module_put(owner); 4639 return err; 4640 } 4641 rcu_read_unlock(); 4642 4643 rtnl_lock(); 4644 link = rtnl_get_link(family, type); 4645 if (link && link->doit) 4646 err = link->doit(skb, nlh, extack); 4647 rtnl_unlock(); 4648 4649 module_put(owner); 4650 4651 return err; 4652 4653 out_unlock: 4654 rcu_read_unlock(); 4655 return err; 4656 4657 err_unlock: 4658 rcu_read_unlock(); 4659 return -EOPNOTSUPP; 4660 } 4661 4662 static void rtnetlink_rcv(struct sk_buff *skb) 4663 { 4664 netlink_rcv_skb(skb, &rtnetlink_rcv_msg); 4665 } 4666 4667 static int rtnetlink_bind(struct net *net, int group) 4668 { 4669 switch (group) { 4670 case RTNLGRP_IPV4_MROUTE_R: 4671 case RTNLGRP_IPV6_MROUTE_R: 4672 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 4673 return -EPERM; 4674 break; 4675 } 4676 return 0; 4677 } 4678 4679 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) 4680 { 4681 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4682 4683 switch (event) { 4684 case NETDEV_REBOOT: 4685 case NETDEV_CHANGEMTU: 4686 case NETDEV_CHANGEADDR: 4687 case NETDEV_CHANGENAME: 4688 case NETDEV_FEAT_CHANGE: 4689 case NETDEV_BONDING_FAILOVER: 4690 case NETDEV_POST_TYPE_CHANGE: 4691 case NETDEV_NOTIFY_PEERS: 4692 case NETDEV_CHANGEUPPER: 4693 case NETDEV_RESEND_IGMP: 4694 case NETDEV_CHANGEINFODATA: 4695 case NETDEV_CHANGELOWERSTATE: 4696 case NETDEV_CHANGE_TX_QUEUE_LEN: 4697 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 4698 GFP_KERNEL, NULL, 0); 4699 break; 4700 default: 4701 break; 4702 } 4703 return NOTIFY_DONE; 4704 } 4705 4706 static struct notifier_block rtnetlink_dev_notifier = { 4707 .notifier_call = rtnetlink_event, 4708 }; 4709 4710 4711 static int __net_init rtnetlink_net_init(struct net *net) 4712 { 4713 struct sock *sk; 4714 struct netlink_kernel_cfg cfg = { 4715 .groups = RTNLGRP_MAX, 4716 .input = rtnetlink_rcv, 4717 .cb_mutex = &rtnl_mutex, 4718 .flags = NL_CFG_F_NONROOT_RECV, 4719 .bind = rtnetlink_bind, 4720 }; 4721 4722 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg); 4723 if (!sk) 4724 return -ENOMEM; 4725 net->rtnl = sk; 4726 return 0; 4727 } 4728 4729 static void __net_exit rtnetlink_net_exit(struct net *net) 4730 { 4731 netlink_kernel_release(net->rtnl); 4732 net->rtnl = NULL; 4733 } 4734 4735 static struct pernet_operations rtnetlink_net_ops = { 4736 .init = rtnetlink_net_init, 4737 .exit = rtnetlink_net_exit, 4738 }; 4739 4740 void __init rtnetlink_init(void) 4741 { 4742 if (register_pernet_subsys(&rtnetlink_net_ops)) 4743 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 4744 4745 register_netdevice_notifier(&rtnetlink_dev_notifier); 4746 4747 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 4748 rtnl_dump_ifinfo, 0); 4749 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); 4750 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); 4751 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); 4752 4753 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); 4754 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); 4755 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); 4756 4757 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); 4758 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0); 4759 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0); 4760 4761 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); 4762 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); 4763 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); 4764 4765 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 4766 0); 4767 } 4768