1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net-sysfs.c - network device class and attributes 4 * 5 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> 6 */ 7 8 #include <linux/capability.h> 9 #include <linux/kernel.h> 10 #include <linux/netdevice.h> 11 #include <linux/if_arp.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sched/isolation.h> 15 #include <linux/nsproxy.h> 16 #include <net/sock.h> 17 #include <net/net_namespace.h> 18 #include <linux/rtnetlink.h> 19 #include <linux/vmalloc.h> 20 #include <linux/export.h> 21 #include <linux/jiffies.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/of.h> 24 #include <linux/of_net.h> 25 #include <linux/cpu.h> 26 #include <net/netdev_rx_queue.h> 27 #include <net/rps.h> 28 29 #include "dev.h" 30 #include "net-sysfs.h" 31 32 #ifdef CONFIG_SYSFS 33 static const char fmt_hex[] = "%#x\n"; 34 static const char fmt_dec[] = "%d\n"; 35 static const char fmt_uint[] = "%u\n"; 36 static const char fmt_ulong[] = "%lu\n"; 37 static const char fmt_u64[] = "%llu\n"; 38 39 /* Caller holds RTNL, netdev->lock or RCU */ 40 static inline int dev_isalive(const struct net_device *dev) 41 { 42 return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED; 43 } 44 45 /* There is a possible ABBA deadlock between rtnl_lock and kernfs_node->active, 46 * when unregistering a net device and accessing associated sysfs files. The 47 * potential deadlock is as follow: 48 * 49 * CPU 0 CPU 1 50 * 51 * rtnl_lock vfs_read 52 * unregister_netdevice_many kernfs_seq_start 53 * device_del / kobject_put kernfs_get_active (kn->active++) 54 * kernfs_drain sysfs_kf_seq_show 55 * wait_event( rtnl_lock 56 * kn->active == KN_DEACTIVATED_BIAS) -> waits on CPU 0 to release 57 * -> waits on CPU 1 to decrease kn->active the rtnl lock. 58 * 59 * The historical fix was to use rtnl_trylock with restart_syscall to bail out 60 * of sysfs operations when the lock couldn't be taken. This fixed the above 61 * issue as it allowed CPU 1 to bail out of the ABBA situation. 62 * 63 * But it came with performances issues, as syscalls are being restarted in 64 * loops when there was contention on the rtnl lock, with huge slow downs in 65 * specific scenarios (e.g. lots of virtual interfaces created and userspace 66 * daemons querying their attributes). 67 * 68 * The idea below is to bail out of the active kernfs_node protection 69 * (kn->active) while trying to take the rtnl lock. 70 * 71 * This replaces rtnl_lock() and still has to be used with rtnl_unlock(). The 72 * net device is guaranteed to be alive if this returns successfully. 73 */ 74 static int sysfs_rtnl_lock(struct kobject *kobj, struct attribute *attr, 75 struct net_device *ndev) 76 { 77 struct kernfs_node *kn; 78 int ret = 0; 79 80 /* First, we hold a reference to the net device as the unregistration 81 * path might run in parallel. This will ensure the net device and the 82 * associated sysfs objects won't be freed while we try to take the rtnl 83 * lock. 84 */ 85 dev_hold(ndev); 86 /* sysfs_break_active_protection was introduced to allow self-removal of 87 * devices and their associated sysfs files by bailing out of the 88 * sysfs/kernfs protection. We do this here to allow the unregistration 89 * path to complete in parallel. The following takes a reference on the 90 * kobject and the kernfs_node being accessed. 91 * 92 * This works because we hold a reference onto the net device and the 93 * unregistration path will wait for us eventually in netdev_run_todo 94 * (outside an rtnl lock section). 95 */ 96 kn = sysfs_break_active_protection(kobj, attr); 97 /* We can now try to take the rtnl lock. This can't deadlock us as the 98 * unregistration path is able to drain sysfs files (kernfs_node) thanks 99 * to the above dance. 100 */ 101 if (rtnl_lock_interruptible()) { 102 ret = -ERESTARTSYS; 103 goto unbreak; 104 } 105 /* Check dismantle on the device hasn't started, otherwise deny the 106 * operation. 107 */ 108 if (!dev_isalive(ndev)) { 109 rtnl_unlock(); 110 ret = -ENODEV; 111 goto unbreak; 112 } 113 /* We are now sure the device dismantle hasn't started nor that it can 114 * start before we exit the locking section as we hold the rtnl lock. 115 * There's no need to keep unbreaking the sysfs protection nor to hold 116 * a net device reference from that point; that was only needed to take 117 * the rtnl lock. 118 */ 119 unbreak: 120 sysfs_unbreak_active_protection(kn); 121 dev_put(ndev); 122 123 return ret; 124 } 125 126 /* use same locking rules as GIF* ioctl's */ 127 static ssize_t netdev_show(const struct device *dev, 128 struct device_attribute *attr, char *buf, 129 ssize_t (*format)(const struct net_device *, char *)) 130 { 131 struct net_device *ndev = to_net_dev(dev); 132 ssize_t ret = -EINVAL; 133 134 rcu_read_lock(); 135 if (dev_isalive(ndev)) 136 ret = (*format)(ndev, buf); 137 rcu_read_unlock(); 138 139 return ret; 140 } 141 142 /* generate a show function for simple field */ 143 #define NETDEVICE_SHOW(field, format_string) \ 144 static ssize_t format_##field(const struct net_device *dev, char *buf) \ 145 { \ 146 return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \ 147 } \ 148 static ssize_t field##_show(struct device *dev, \ 149 struct device_attribute *attr, char *buf) \ 150 { \ 151 return netdev_show(dev, attr, buf, format_##field); \ 152 } \ 153 154 #define NETDEVICE_SHOW_RO(field, format_string) \ 155 NETDEVICE_SHOW(field, format_string); \ 156 static DEVICE_ATTR_RO(field) 157 158 #define NETDEVICE_SHOW_RW(field, format_string) \ 159 NETDEVICE_SHOW(field, format_string); \ 160 static DEVICE_ATTR_RW(field) 161 162 /* use same locking and permission rules as SIF* ioctl's */ 163 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, 164 const char *buf, size_t len, 165 int (*set)(struct net_device *, unsigned long)) 166 { 167 struct net_device *netdev = to_net_dev(dev); 168 struct net *net = dev_net(netdev); 169 unsigned long new; 170 int ret; 171 172 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 173 return -EPERM; 174 175 ret = kstrtoul(buf, 0, &new); 176 if (ret) 177 goto err; 178 179 ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); 180 if (ret) 181 goto err; 182 183 ret = (*set)(netdev, new); 184 if (ret == 0) 185 ret = len; 186 187 rtnl_unlock(); 188 err: 189 return ret; 190 } 191 192 /* Same as netdev_store() but takes netdev_lock() instead of rtnl_lock() */ 193 static ssize_t 194 netdev_lock_store(struct device *dev, struct device_attribute *attr, 195 const char *buf, size_t len, 196 int (*set)(struct net_device *, unsigned long)) 197 { 198 struct net_device *netdev = to_net_dev(dev); 199 struct net *net = dev_net(netdev); 200 unsigned long new; 201 int ret; 202 203 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 204 return -EPERM; 205 206 ret = kstrtoul(buf, 0, &new); 207 if (ret) 208 return ret; 209 210 netdev_lock(netdev); 211 212 if (dev_isalive(netdev)) { 213 ret = (*set)(netdev, new); 214 if (ret == 0) 215 ret = len; 216 } 217 netdev_unlock(netdev); 218 219 return ret; 220 } 221 222 NETDEVICE_SHOW_RO(dev_id, fmt_hex); 223 NETDEVICE_SHOW_RO(dev_port, fmt_dec); 224 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); 225 NETDEVICE_SHOW_RO(addr_len, fmt_dec); 226 NETDEVICE_SHOW_RO(ifindex, fmt_dec); 227 NETDEVICE_SHOW_RO(type, fmt_dec); 228 NETDEVICE_SHOW_RO(link_mode, fmt_dec); 229 230 static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, 231 char *buf) 232 { 233 struct net_device *ndev = to_net_dev(dev); 234 235 return sysfs_emit(buf, fmt_dec, dev_get_iflink(ndev)); 236 } 237 static DEVICE_ATTR_RO(iflink); 238 239 static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) 240 { 241 return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type)); 242 } 243 244 static ssize_t name_assign_type_show(struct device *dev, 245 struct device_attribute *attr, 246 char *buf) 247 { 248 struct net_device *ndev = to_net_dev(dev); 249 ssize_t ret = -EINVAL; 250 251 if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN) 252 ret = netdev_show(dev, attr, buf, format_name_assign_type); 253 254 return ret; 255 } 256 static DEVICE_ATTR_RO(name_assign_type); 257 258 /* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */ 259 static ssize_t address_show(struct device *dev, struct device_attribute *attr, 260 char *buf) 261 { 262 struct net_device *ndev = to_net_dev(dev); 263 ssize_t ret = -EINVAL; 264 265 down_read(&dev_addr_sem); 266 267 rcu_read_lock(); 268 if (dev_isalive(ndev)) 269 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); 270 rcu_read_unlock(); 271 272 up_read(&dev_addr_sem); 273 return ret; 274 } 275 static DEVICE_ATTR_RO(address); 276 277 static ssize_t broadcast_show(struct device *dev, 278 struct device_attribute *attr, char *buf) 279 { 280 struct net_device *ndev = to_net_dev(dev); 281 int ret = -EINVAL; 282 283 rcu_read_lock(); 284 if (dev_isalive(ndev)) 285 ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); 286 rcu_read_unlock(); 287 return ret; 288 } 289 static DEVICE_ATTR_RO(broadcast); 290 291 static int change_carrier(struct net_device *dev, unsigned long new_carrier) 292 { 293 if (!netif_running(dev)) 294 return -EINVAL; 295 return dev_change_carrier(dev, (bool)new_carrier); 296 } 297 298 static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, 299 const char *buf, size_t len) 300 { 301 struct net_device *netdev = to_net_dev(dev); 302 303 /* The check is also done in change_carrier; this helps returning early 304 * without hitting the locking section in netdev_store. 305 */ 306 if (!netdev->netdev_ops->ndo_change_carrier) 307 return -EOPNOTSUPP; 308 309 return netdev_store(dev, attr, buf, len, change_carrier); 310 } 311 312 static ssize_t carrier_show(struct device *dev, 313 struct device_attribute *attr, char *buf) 314 { 315 struct net_device *netdev = to_net_dev(dev); 316 int ret; 317 318 ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); 319 if (ret) 320 return ret; 321 322 ret = -EINVAL; 323 if (netif_running(netdev)) { 324 /* Synchronize carrier state with link watch, 325 * see also rtnl_getlink(). 326 */ 327 linkwatch_sync_dev(netdev); 328 329 ret = sysfs_emit(buf, fmt_dec, !!netif_carrier_ok(netdev)); 330 } 331 332 rtnl_unlock(); 333 return ret; 334 } 335 static DEVICE_ATTR_RW(carrier); 336 337 static ssize_t speed_show(struct device *dev, 338 struct device_attribute *attr, char *buf) 339 { 340 struct net_device *netdev = to_net_dev(dev); 341 int ret = -EINVAL; 342 343 /* The check is also done in __ethtool_get_link_ksettings; this helps 344 * returning early without hitting the locking section below. 345 */ 346 if (!netdev->ethtool_ops->get_link_ksettings) 347 return ret; 348 349 ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); 350 if (ret) 351 return ret; 352 353 ret = -EINVAL; 354 if (netif_running(netdev)) { 355 struct ethtool_link_ksettings cmd; 356 357 if (!__ethtool_get_link_ksettings(netdev, &cmd)) 358 ret = sysfs_emit(buf, fmt_dec, cmd.base.speed); 359 } 360 rtnl_unlock(); 361 return ret; 362 } 363 static DEVICE_ATTR_RO(speed); 364 365 static ssize_t duplex_show(struct device *dev, 366 struct device_attribute *attr, char *buf) 367 { 368 struct net_device *netdev = to_net_dev(dev); 369 int ret = -EINVAL; 370 371 /* The check is also done in __ethtool_get_link_ksettings; this helps 372 * returning early without hitting the locking section below. 373 */ 374 if (!netdev->ethtool_ops->get_link_ksettings) 375 return ret; 376 377 ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); 378 if (ret) 379 return ret; 380 381 ret = -EINVAL; 382 if (netif_running(netdev)) { 383 struct ethtool_link_ksettings cmd; 384 385 if (!__ethtool_get_link_ksettings(netdev, &cmd)) { 386 const char *duplex; 387 388 switch (cmd.base.duplex) { 389 case DUPLEX_HALF: 390 duplex = "half"; 391 break; 392 case DUPLEX_FULL: 393 duplex = "full"; 394 break; 395 default: 396 duplex = "unknown"; 397 break; 398 } 399 ret = sysfs_emit(buf, "%s\n", duplex); 400 } 401 } 402 rtnl_unlock(); 403 return ret; 404 } 405 static DEVICE_ATTR_RO(duplex); 406 407 static ssize_t testing_show(struct device *dev, 408 struct device_attribute *attr, char *buf) 409 { 410 struct net_device *netdev = to_net_dev(dev); 411 412 if (netif_running(netdev)) 413 return sysfs_emit(buf, fmt_dec, !!netif_testing(netdev)); 414 415 return -EINVAL; 416 } 417 static DEVICE_ATTR_RO(testing); 418 419 static ssize_t dormant_show(struct device *dev, 420 struct device_attribute *attr, char *buf) 421 { 422 struct net_device *netdev = to_net_dev(dev); 423 424 if (netif_running(netdev)) 425 return sysfs_emit(buf, fmt_dec, !!netif_dormant(netdev)); 426 427 return -EINVAL; 428 } 429 static DEVICE_ATTR_RO(dormant); 430 431 static const char *const operstates[] = { 432 "unknown", 433 "notpresent", /* currently unused */ 434 "down", 435 "lowerlayerdown", 436 "testing", 437 "dormant", 438 "up" 439 }; 440 441 static ssize_t operstate_show(struct device *dev, 442 struct device_attribute *attr, char *buf) 443 { 444 const struct net_device *netdev = to_net_dev(dev); 445 unsigned char operstate; 446 447 operstate = READ_ONCE(netdev->operstate); 448 if (!netif_running(netdev)) 449 operstate = IF_OPER_DOWN; 450 451 if (operstate >= ARRAY_SIZE(operstates)) 452 return -EINVAL; /* should not happen */ 453 454 return sysfs_emit(buf, "%s\n", operstates[operstate]); 455 } 456 static DEVICE_ATTR_RO(operstate); 457 458 static ssize_t carrier_changes_show(struct device *dev, 459 struct device_attribute *attr, 460 char *buf) 461 { 462 struct net_device *netdev = to_net_dev(dev); 463 464 return sysfs_emit(buf, fmt_dec, 465 atomic_read(&netdev->carrier_up_count) + 466 atomic_read(&netdev->carrier_down_count)); 467 } 468 static DEVICE_ATTR_RO(carrier_changes); 469 470 static ssize_t carrier_up_count_show(struct device *dev, 471 struct device_attribute *attr, 472 char *buf) 473 { 474 struct net_device *netdev = to_net_dev(dev); 475 476 return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); 477 } 478 static DEVICE_ATTR_RO(carrier_up_count); 479 480 static ssize_t carrier_down_count_show(struct device *dev, 481 struct device_attribute *attr, 482 char *buf) 483 { 484 struct net_device *netdev = to_net_dev(dev); 485 486 return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); 487 } 488 static DEVICE_ATTR_RO(carrier_down_count); 489 490 /* read-write attributes */ 491 492 static int change_mtu(struct net_device *dev, unsigned long new_mtu) 493 { 494 return dev_set_mtu(dev, (int)new_mtu); 495 } 496 497 static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, 498 const char *buf, size_t len) 499 { 500 return netdev_store(dev, attr, buf, len, change_mtu); 501 } 502 NETDEVICE_SHOW_RW(mtu, fmt_dec); 503 504 static int change_flags(struct net_device *dev, unsigned long new_flags) 505 { 506 return dev_change_flags(dev, (unsigned int)new_flags, NULL); 507 } 508 509 static ssize_t flags_store(struct device *dev, struct device_attribute *attr, 510 const char *buf, size_t len) 511 { 512 return netdev_store(dev, attr, buf, len, change_flags); 513 } 514 NETDEVICE_SHOW_RW(flags, fmt_hex); 515 516 static ssize_t tx_queue_len_store(struct device *dev, 517 struct device_attribute *attr, 518 const char *buf, size_t len) 519 { 520 if (!capable(CAP_NET_ADMIN)) 521 return -EPERM; 522 523 return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); 524 } 525 NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); 526 527 static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) 528 { 529 netdev_set_gro_flush_timeout(dev, val); 530 return 0; 531 } 532 533 static ssize_t gro_flush_timeout_store(struct device *dev, 534 struct device_attribute *attr, 535 const char *buf, size_t len) 536 { 537 if (!capable(CAP_NET_ADMIN)) 538 return -EPERM; 539 540 return netdev_lock_store(dev, attr, buf, len, change_gro_flush_timeout); 541 } 542 NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); 543 544 static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) 545 { 546 if (val > S32_MAX) 547 return -ERANGE; 548 549 netdev_set_defer_hard_irqs(dev, (u32)val); 550 return 0; 551 } 552 553 static ssize_t napi_defer_hard_irqs_store(struct device *dev, 554 struct device_attribute *attr, 555 const char *buf, size_t len) 556 { 557 if (!capable(CAP_NET_ADMIN)) 558 return -EPERM; 559 560 return netdev_lock_store(dev, attr, buf, len, 561 change_napi_defer_hard_irqs); 562 } 563 NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_uint); 564 565 static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, 566 const char *buf, size_t len) 567 { 568 struct net_device *netdev = to_net_dev(dev); 569 struct net *net = dev_net(netdev); 570 size_t count = len; 571 ssize_t ret; 572 573 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 574 return -EPERM; 575 576 /* ignore trailing newline */ 577 if (len > 0 && buf[len - 1] == '\n') 578 --count; 579 580 ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); 581 if (ret) 582 return ret; 583 584 ret = dev_set_alias(netdev, buf, count); 585 if (ret < 0) 586 goto err; 587 ret = len; 588 netdev_state_change(netdev); 589 err: 590 rtnl_unlock(); 591 592 return ret; 593 } 594 595 static ssize_t ifalias_show(struct device *dev, 596 struct device_attribute *attr, char *buf) 597 { 598 const struct net_device *netdev = to_net_dev(dev); 599 char tmp[IFALIASZ]; 600 ssize_t ret; 601 602 ret = dev_get_alias(netdev, tmp, sizeof(tmp)); 603 if (ret > 0) 604 ret = sysfs_emit(buf, "%s\n", tmp); 605 return ret; 606 } 607 static DEVICE_ATTR_RW(ifalias); 608 609 static int change_group(struct net_device *dev, unsigned long new_group) 610 { 611 dev_set_group(dev, (int)new_group); 612 return 0; 613 } 614 615 static ssize_t group_store(struct device *dev, struct device_attribute *attr, 616 const char *buf, size_t len) 617 { 618 return netdev_store(dev, attr, buf, len, change_group); 619 } 620 NETDEVICE_SHOW(group, fmt_dec); 621 static DEVICE_ATTR(netdev_group, 0644, group_show, group_store); 622 623 static int change_proto_down(struct net_device *dev, unsigned long proto_down) 624 { 625 return dev_change_proto_down(dev, (bool)proto_down); 626 } 627 628 static ssize_t proto_down_store(struct device *dev, 629 struct device_attribute *attr, 630 const char *buf, size_t len) 631 { 632 return netdev_store(dev, attr, buf, len, change_proto_down); 633 } 634 NETDEVICE_SHOW_RW(proto_down, fmt_dec); 635 636 static ssize_t phys_port_id_show(struct device *dev, 637 struct device_attribute *attr, char *buf) 638 { 639 struct net_device *netdev = to_net_dev(dev); 640 struct netdev_phys_item_id ppid; 641 ssize_t ret; 642 643 /* The check is also done in dev_get_phys_port_id; this helps returning 644 * early without hitting the locking section below. 645 */ 646 if (!netdev->netdev_ops->ndo_get_phys_port_id) 647 return -EOPNOTSUPP; 648 649 ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); 650 if (ret) 651 return ret; 652 653 ret = dev_get_phys_port_id(netdev, &ppid); 654 if (!ret) 655 ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); 656 657 rtnl_unlock(); 658 659 return ret; 660 } 661 static DEVICE_ATTR_RO(phys_port_id); 662 663 static ssize_t phys_port_name_show(struct device *dev, 664 struct device_attribute *attr, char *buf) 665 { 666 struct net_device *netdev = to_net_dev(dev); 667 char name[IFNAMSIZ]; 668 ssize_t ret; 669 670 /* The checks are also done in dev_get_phys_port_name; this helps 671 * returning early without hitting the locking section below. 672 */ 673 if (!netdev->netdev_ops->ndo_get_phys_port_name && 674 !netdev->devlink_port) 675 return -EOPNOTSUPP; 676 677 ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); 678 if (ret) 679 return ret; 680 681 ret = dev_get_phys_port_name(netdev, name, sizeof(name)); 682 if (!ret) 683 ret = sysfs_emit(buf, "%s\n", name); 684 685 rtnl_unlock(); 686 687 return ret; 688 } 689 static DEVICE_ATTR_RO(phys_port_name); 690 691 static ssize_t phys_switch_id_show(struct device *dev, 692 struct device_attribute *attr, char *buf) 693 { 694 struct net_device *netdev = to_net_dev(dev); 695 struct netdev_phys_item_id ppid = { }; 696 ssize_t ret; 697 698 /* The checks are also done in dev_get_phys_port_name; this helps 699 * returning early without hitting the locking section below. This works 700 * because recurse is false when calling dev_get_port_parent_id. 701 */ 702 if (!netdev->netdev_ops->ndo_get_port_parent_id && 703 !netdev->devlink_port) 704 return -EOPNOTSUPP; 705 706 ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); 707 if (ret) 708 return ret; 709 710 ret = dev_get_port_parent_id(netdev, &ppid, false); 711 if (!ret) 712 ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); 713 714 rtnl_unlock(); 715 716 return ret; 717 } 718 static DEVICE_ATTR_RO(phys_switch_id); 719 720 static ssize_t threaded_show(struct device *dev, 721 struct device_attribute *attr, char *buf) 722 { 723 struct net_device *netdev = to_net_dev(dev); 724 ssize_t ret = -EINVAL; 725 726 rcu_read_lock(); 727 728 if (dev_isalive(netdev)) 729 ret = sysfs_emit(buf, fmt_dec, READ_ONCE(netdev->threaded)); 730 731 rcu_read_unlock(); 732 733 return ret; 734 } 735 736 static int modify_napi_threaded(struct net_device *dev, unsigned long val) 737 { 738 int ret; 739 740 if (list_empty(&dev->napi_list)) 741 return -EOPNOTSUPP; 742 743 if (val != 0 && val != 1) 744 return -EOPNOTSUPP; 745 746 ret = dev_set_threaded(dev, val); 747 748 return ret; 749 } 750 751 static ssize_t threaded_store(struct device *dev, 752 struct device_attribute *attr, 753 const char *buf, size_t len) 754 { 755 return netdev_lock_store(dev, attr, buf, len, modify_napi_threaded); 756 } 757 static DEVICE_ATTR_RW(threaded); 758 759 static struct attribute *net_class_attrs[] __ro_after_init = { 760 &dev_attr_netdev_group.attr, 761 &dev_attr_type.attr, 762 &dev_attr_dev_id.attr, 763 &dev_attr_dev_port.attr, 764 &dev_attr_iflink.attr, 765 &dev_attr_ifindex.attr, 766 &dev_attr_name_assign_type.attr, 767 &dev_attr_addr_assign_type.attr, 768 &dev_attr_addr_len.attr, 769 &dev_attr_link_mode.attr, 770 &dev_attr_address.attr, 771 &dev_attr_broadcast.attr, 772 &dev_attr_speed.attr, 773 &dev_attr_duplex.attr, 774 &dev_attr_dormant.attr, 775 &dev_attr_testing.attr, 776 &dev_attr_operstate.attr, 777 &dev_attr_carrier_changes.attr, 778 &dev_attr_ifalias.attr, 779 &dev_attr_carrier.attr, 780 &dev_attr_mtu.attr, 781 &dev_attr_flags.attr, 782 &dev_attr_tx_queue_len.attr, 783 &dev_attr_gro_flush_timeout.attr, 784 &dev_attr_napi_defer_hard_irqs.attr, 785 &dev_attr_phys_port_id.attr, 786 &dev_attr_phys_port_name.attr, 787 &dev_attr_phys_switch_id.attr, 788 &dev_attr_proto_down.attr, 789 &dev_attr_carrier_up_count.attr, 790 &dev_attr_carrier_down_count.attr, 791 &dev_attr_threaded.attr, 792 NULL, 793 }; 794 ATTRIBUTE_GROUPS(net_class); 795 796 /* Show a given an attribute in the statistics group */ 797 static ssize_t netstat_show(const struct device *d, 798 struct device_attribute *attr, char *buf, 799 unsigned long offset) 800 { 801 struct net_device *dev = to_net_dev(d); 802 ssize_t ret = -EINVAL; 803 804 WARN_ON(offset > sizeof(struct rtnl_link_stats64) || 805 offset % sizeof(u64) != 0); 806 807 rcu_read_lock(); 808 if (dev_isalive(dev)) { 809 struct rtnl_link_stats64 temp; 810 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 811 812 ret = sysfs_emit(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); 813 } 814 rcu_read_unlock(); 815 return ret; 816 } 817 818 /* generate a read-only statistics attribute */ 819 #define NETSTAT_ENTRY(name) \ 820 static ssize_t name##_show(struct device *d, \ 821 struct device_attribute *attr, char *buf) \ 822 { \ 823 return netstat_show(d, attr, buf, \ 824 offsetof(struct rtnl_link_stats64, name)); \ 825 } \ 826 static DEVICE_ATTR_RO(name) 827 828 NETSTAT_ENTRY(rx_packets); 829 NETSTAT_ENTRY(tx_packets); 830 NETSTAT_ENTRY(rx_bytes); 831 NETSTAT_ENTRY(tx_bytes); 832 NETSTAT_ENTRY(rx_errors); 833 NETSTAT_ENTRY(tx_errors); 834 NETSTAT_ENTRY(rx_dropped); 835 NETSTAT_ENTRY(tx_dropped); 836 NETSTAT_ENTRY(multicast); 837 NETSTAT_ENTRY(collisions); 838 NETSTAT_ENTRY(rx_length_errors); 839 NETSTAT_ENTRY(rx_over_errors); 840 NETSTAT_ENTRY(rx_crc_errors); 841 NETSTAT_ENTRY(rx_frame_errors); 842 NETSTAT_ENTRY(rx_fifo_errors); 843 NETSTAT_ENTRY(rx_missed_errors); 844 NETSTAT_ENTRY(tx_aborted_errors); 845 NETSTAT_ENTRY(tx_carrier_errors); 846 NETSTAT_ENTRY(tx_fifo_errors); 847 NETSTAT_ENTRY(tx_heartbeat_errors); 848 NETSTAT_ENTRY(tx_window_errors); 849 NETSTAT_ENTRY(rx_compressed); 850 NETSTAT_ENTRY(tx_compressed); 851 NETSTAT_ENTRY(rx_nohandler); 852 853 static struct attribute *netstat_attrs[] __ro_after_init = { 854 &dev_attr_rx_packets.attr, 855 &dev_attr_tx_packets.attr, 856 &dev_attr_rx_bytes.attr, 857 &dev_attr_tx_bytes.attr, 858 &dev_attr_rx_errors.attr, 859 &dev_attr_tx_errors.attr, 860 &dev_attr_rx_dropped.attr, 861 &dev_attr_tx_dropped.attr, 862 &dev_attr_multicast.attr, 863 &dev_attr_collisions.attr, 864 &dev_attr_rx_length_errors.attr, 865 &dev_attr_rx_over_errors.attr, 866 &dev_attr_rx_crc_errors.attr, 867 &dev_attr_rx_frame_errors.attr, 868 &dev_attr_rx_fifo_errors.attr, 869 &dev_attr_rx_missed_errors.attr, 870 &dev_attr_tx_aborted_errors.attr, 871 &dev_attr_tx_carrier_errors.attr, 872 &dev_attr_tx_fifo_errors.attr, 873 &dev_attr_tx_heartbeat_errors.attr, 874 &dev_attr_tx_window_errors.attr, 875 &dev_attr_rx_compressed.attr, 876 &dev_attr_tx_compressed.attr, 877 &dev_attr_rx_nohandler.attr, 878 NULL 879 }; 880 881 static const struct attribute_group netstat_group = { 882 .name = "statistics", 883 .attrs = netstat_attrs, 884 }; 885 886 static struct attribute *wireless_attrs[] = { 887 NULL 888 }; 889 890 static const struct attribute_group wireless_group = { 891 .name = "wireless", 892 .attrs = wireless_attrs, 893 }; 894 895 static bool wireless_group_needed(struct net_device *ndev) 896 { 897 #if IS_ENABLED(CONFIG_CFG80211) 898 if (ndev->ieee80211_ptr) 899 return true; 900 #endif 901 #if IS_ENABLED(CONFIG_WIRELESS_EXT) 902 if (ndev->wireless_handlers) 903 return true; 904 #endif 905 return false; 906 } 907 908 #else /* CONFIG_SYSFS */ 909 #define net_class_groups NULL 910 #endif /* CONFIG_SYSFS */ 911 912 #ifdef CONFIG_SYSFS 913 #define to_rx_queue_attr(_attr) \ 914 container_of(_attr, struct rx_queue_attribute, attr) 915 916 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) 917 918 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, 919 char *buf) 920 { 921 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 922 struct netdev_rx_queue *queue = to_rx_queue(kobj); 923 924 if (!attribute->show) 925 return -EIO; 926 927 return attribute->show(queue, buf); 928 } 929 930 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, 931 const char *buf, size_t count) 932 { 933 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 934 struct netdev_rx_queue *queue = to_rx_queue(kobj); 935 936 if (!attribute->store) 937 return -EIO; 938 939 return attribute->store(queue, buf, count); 940 } 941 942 static const struct sysfs_ops rx_queue_sysfs_ops = { 943 .show = rx_queue_attr_show, 944 .store = rx_queue_attr_store, 945 }; 946 947 #ifdef CONFIG_RPS 948 static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) 949 { 950 struct rps_map *map; 951 cpumask_var_t mask; 952 int i, len; 953 954 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 955 return -ENOMEM; 956 957 rcu_read_lock(); 958 map = rcu_dereference(queue->rps_map); 959 if (map) 960 for (i = 0; i < map->len; i++) 961 cpumask_set_cpu(map->cpus[i], mask); 962 963 len = sysfs_emit(buf, "%*pb\n", cpumask_pr_args(mask)); 964 rcu_read_unlock(); 965 free_cpumask_var(mask); 966 967 return len < PAGE_SIZE ? len : -EINVAL; 968 } 969 970 static int netdev_rx_queue_set_rps_mask(struct netdev_rx_queue *queue, 971 cpumask_var_t mask) 972 { 973 static DEFINE_MUTEX(rps_map_mutex); 974 struct rps_map *old_map, *map; 975 int cpu, i; 976 977 map = kzalloc(max_t(unsigned int, 978 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), 979 GFP_KERNEL); 980 if (!map) 981 return -ENOMEM; 982 983 i = 0; 984 for_each_cpu_and(cpu, mask, cpu_online_mask) 985 map->cpus[i++] = cpu; 986 987 if (i) { 988 map->len = i; 989 } else { 990 kfree(map); 991 map = NULL; 992 } 993 994 mutex_lock(&rps_map_mutex); 995 old_map = rcu_dereference_protected(queue->rps_map, 996 mutex_is_locked(&rps_map_mutex)); 997 rcu_assign_pointer(queue->rps_map, map); 998 999 if (map) 1000 static_branch_inc(&rps_needed); 1001 if (old_map) 1002 static_branch_dec(&rps_needed); 1003 1004 mutex_unlock(&rps_map_mutex); 1005 1006 if (old_map) 1007 kfree_rcu(old_map, rcu); 1008 return 0; 1009 } 1010 1011 int rps_cpumask_housekeeping(struct cpumask *mask) 1012 { 1013 if (!cpumask_empty(mask)) { 1014 cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_DOMAIN)); 1015 cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_WQ)); 1016 if (cpumask_empty(mask)) 1017 return -EINVAL; 1018 } 1019 return 0; 1020 } 1021 1022 static ssize_t store_rps_map(struct netdev_rx_queue *queue, 1023 const char *buf, size_t len) 1024 { 1025 cpumask_var_t mask; 1026 int err; 1027 1028 if (!capable(CAP_NET_ADMIN)) 1029 return -EPERM; 1030 1031 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 1032 return -ENOMEM; 1033 1034 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 1035 if (err) 1036 goto out; 1037 1038 err = rps_cpumask_housekeeping(mask); 1039 if (err) 1040 goto out; 1041 1042 err = netdev_rx_queue_set_rps_mask(queue, mask); 1043 1044 out: 1045 free_cpumask_var(mask); 1046 return err ? : len; 1047 } 1048 1049 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 1050 char *buf) 1051 { 1052 struct rps_dev_flow_table *flow_table; 1053 unsigned long val = 0; 1054 1055 rcu_read_lock(); 1056 flow_table = rcu_dereference(queue->rps_flow_table); 1057 if (flow_table) 1058 val = (unsigned long)flow_table->mask + 1; 1059 rcu_read_unlock(); 1060 1061 return sysfs_emit(buf, "%lu\n", val); 1062 } 1063 1064 static void rps_dev_flow_table_release(struct rcu_head *rcu) 1065 { 1066 struct rps_dev_flow_table *table = container_of(rcu, 1067 struct rps_dev_flow_table, rcu); 1068 vfree(table); 1069 } 1070 1071 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 1072 const char *buf, size_t len) 1073 { 1074 unsigned long mask, count; 1075 struct rps_dev_flow_table *table, *old_table; 1076 static DEFINE_SPINLOCK(rps_dev_flow_lock); 1077 int rc; 1078 1079 if (!capable(CAP_NET_ADMIN)) 1080 return -EPERM; 1081 1082 rc = kstrtoul(buf, 0, &count); 1083 if (rc < 0) 1084 return rc; 1085 1086 if (count) { 1087 mask = count - 1; 1088 /* mask = roundup_pow_of_two(count) - 1; 1089 * without overflows... 1090 */ 1091 while ((mask | (mask >> 1)) != mask) 1092 mask |= (mask >> 1); 1093 /* On 64 bit arches, must check mask fits in table->mask (u32), 1094 * and on 32bit arches, must check 1095 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. 1096 */ 1097 #if BITS_PER_LONG > 32 1098 if (mask > (unsigned long)(u32)mask) 1099 return -EINVAL; 1100 #else 1101 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) 1102 / sizeof(struct rps_dev_flow)) { 1103 /* Enforce a limit to prevent overflow */ 1104 return -EINVAL; 1105 } 1106 #endif 1107 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); 1108 if (!table) 1109 return -ENOMEM; 1110 1111 table->mask = mask; 1112 for (count = 0; count <= mask; count++) 1113 table->flows[count].cpu = RPS_NO_CPU; 1114 } else { 1115 table = NULL; 1116 } 1117 1118 spin_lock(&rps_dev_flow_lock); 1119 old_table = rcu_dereference_protected(queue->rps_flow_table, 1120 lockdep_is_held(&rps_dev_flow_lock)); 1121 rcu_assign_pointer(queue->rps_flow_table, table); 1122 spin_unlock(&rps_dev_flow_lock); 1123 1124 if (old_table) 1125 call_rcu(&old_table->rcu, rps_dev_flow_table_release); 1126 1127 return len; 1128 } 1129 1130 static struct rx_queue_attribute rps_cpus_attribute __ro_after_init 1131 = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map); 1132 1133 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init 1134 = __ATTR(rps_flow_cnt, 0644, 1135 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); 1136 #endif /* CONFIG_RPS */ 1137 1138 static struct attribute *rx_queue_default_attrs[] __ro_after_init = { 1139 #ifdef CONFIG_RPS 1140 &rps_cpus_attribute.attr, 1141 &rps_dev_flow_table_cnt_attribute.attr, 1142 #endif 1143 NULL 1144 }; 1145 ATTRIBUTE_GROUPS(rx_queue_default); 1146 1147 static void rx_queue_release(struct kobject *kobj) 1148 { 1149 struct netdev_rx_queue *queue = to_rx_queue(kobj); 1150 #ifdef CONFIG_RPS 1151 struct rps_map *map; 1152 struct rps_dev_flow_table *flow_table; 1153 1154 map = rcu_dereference_protected(queue->rps_map, 1); 1155 if (map) { 1156 RCU_INIT_POINTER(queue->rps_map, NULL); 1157 kfree_rcu(map, rcu); 1158 } 1159 1160 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); 1161 if (flow_table) { 1162 RCU_INIT_POINTER(queue->rps_flow_table, NULL); 1163 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 1164 } 1165 #endif 1166 1167 memset(kobj, 0, sizeof(*kobj)); 1168 netdev_put(queue->dev, &queue->dev_tracker); 1169 } 1170 1171 static const void *rx_queue_namespace(const struct kobject *kobj) 1172 { 1173 struct netdev_rx_queue *queue = to_rx_queue(kobj); 1174 struct device *dev = &queue->dev->dev; 1175 const void *ns = NULL; 1176 1177 if (dev->class && dev->class->namespace) 1178 ns = dev->class->namespace(dev); 1179 1180 return ns; 1181 } 1182 1183 static void rx_queue_get_ownership(const struct kobject *kobj, 1184 kuid_t *uid, kgid_t *gid) 1185 { 1186 const struct net *net = rx_queue_namespace(kobj); 1187 1188 net_ns_get_ownership(net, uid, gid); 1189 } 1190 1191 static const struct kobj_type rx_queue_ktype = { 1192 .sysfs_ops = &rx_queue_sysfs_ops, 1193 .release = rx_queue_release, 1194 .namespace = rx_queue_namespace, 1195 .get_ownership = rx_queue_get_ownership, 1196 }; 1197 1198 static int rx_queue_default_mask(struct net_device *dev, 1199 struct netdev_rx_queue *queue) 1200 { 1201 #if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL) 1202 struct cpumask *rps_default_mask = READ_ONCE(dev_net(dev)->core.rps_default_mask); 1203 1204 if (rps_default_mask && !cpumask_empty(rps_default_mask)) 1205 return netdev_rx_queue_set_rps_mask(queue, rps_default_mask); 1206 #endif 1207 return 0; 1208 } 1209 1210 static int rx_queue_add_kobject(struct net_device *dev, int index) 1211 { 1212 struct netdev_rx_queue *queue = dev->_rx + index; 1213 struct kobject *kobj = &queue->kobj; 1214 int error = 0; 1215 1216 /* Rx queues are cleared in rx_queue_release to allow later 1217 * re-registration. This is triggered when their kobj refcount is 1218 * dropped. 1219 * 1220 * If a queue is removed while both a read (or write) operation and a 1221 * the re-addition of the same queue are pending (waiting on rntl_lock) 1222 * it might happen that the re-addition will execute before the read, 1223 * making the initial removal to never happen (queue's kobj refcount 1224 * won't drop enough because of the pending read). In such rare case, 1225 * return to allow the removal operation to complete. 1226 */ 1227 if (unlikely(kobj->state_initialized)) { 1228 netdev_warn_once(dev, "Cannot re-add rx queues before their removal completed"); 1229 return -EAGAIN; 1230 } 1231 1232 /* Kobject_put later will trigger rx_queue_release call which 1233 * decreases dev refcount: Take that reference here 1234 */ 1235 netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); 1236 1237 kobj->kset = dev->queues_kset; 1238 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, 1239 "rx-%u", index); 1240 if (error) 1241 goto err; 1242 1243 queue->groups = rx_queue_default_groups; 1244 error = sysfs_create_groups(kobj, queue->groups); 1245 if (error) 1246 goto err; 1247 1248 if (dev->sysfs_rx_queue_group) { 1249 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); 1250 if (error) 1251 goto err_default_groups; 1252 } 1253 1254 error = rx_queue_default_mask(dev, queue); 1255 if (error) 1256 goto err_default_groups; 1257 1258 kobject_uevent(kobj, KOBJ_ADD); 1259 1260 return error; 1261 1262 err_default_groups: 1263 sysfs_remove_groups(kobj, queue->groups); 1264 err: 1265 kobject_put(kobj); 1266 return error; 1267 } 1268 1269 static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid, 1270 kgid_t kgid) 1271 { 1272 struct netdev_rx_queue *queue = dev->_rx + index; 1273 struct kobject *kobj = &queue->kobj; 1274 int error; 1275 1276 error = sysfs_change_owner(kobj, kuid, kgid); 1277 if (error) 1278 return error; 1279 1280 if (dev->sysfs_rx_queue_group) 1281 error = sysfs_group_change_owner( 1282 kobj, dev->sysfs_rx_queue_group, kuid, kgid); 1283 1284 return error; 1285 } 1286 #endif /* CONFIG_SYSFS */ 1287 1288 int 1289 net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 1290 { 1291 #ifdef CONFIG_SYSFS 1292 int i; 1293 int error = 0; 1294 1295 #ifndef CONFIG_RPS 1296 if (!dev->sysfs_rx_queue_group) 1297 return 0; 1298 #endif 1299 for (i = old_num; i < new_num; i++) { 1300 error = rx_queue_add_kobject(dev, i); 1301 if (error) { 1302 new_num = old_num; 1303 break; 1304 } 1305 } 1306 1307 while (--i >= new_num) { 1308 struct netdev_rx_queue *queue = &dev->_rx[i]; 1309 struct kobject *kobj = &queue->kobj; 1310 1311 if (!refcount_read(&dev_net(dev)->ns.count)) 1312 kobj->uevent_suppress = 1; 1313 if (dev->sysfs_rx_queue_group) 1314 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); 1315 sysfs_remove_groups(kobj, queue->groups); 1316 kobject_put(kobj); 1317 } 1318 1319 return error; 1320 #else 1321 return 0; 1322 #endif 1323 } 1324 1325 static int net_rx_queue_change_owner(struct net_device *dev, int num, 1326 kuid_t kuid, kgid_t kgid) 1327 { 1328 #ifdef CONFIG_SYSFS 1329 int error = 0; 1330 int i; 1331 1332 #ifndef CONFIG_RPS 1333 if (!dev->sysfs_rx_queue_group) 1334 return 0; 1335 #endif 1336 for (i = 0; i < num; i++) { 1337 error = rx_queue_change_owner(dev, i, kuid, kgid); 1338 if (error) 1339 break; 1340 } 1341 1342 return error; 1343 #else 1344 return 0; 1345 #endif 1346 } 1347 1348 #ifdef CONFIG_SYSFS 1349 /* 1350 * netdev_queue sysfs structures and functions. 1351 */ 1352 struct netdev_queue_attribute { 1353 struct attribute attr; 1354 ssize_t (*show)(struct kobject *kobj, struct attribute *attr, 1355 struct netdev_queue *queue, char *buf); 1356 ssize_t (*store)(struct kobject *kobj, struct attribute *attr, 1357 struct netdev_queue *queue, const char *buf, 1358 size_t len); 1359 }; 1360 #define to_netdev_queue_attr(_attr) \ 1361 container_of(_attr, struct netdev_queue_attribute, attr) 1362 1363 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) 1364 1365 static ssize_t netdev_queue_attr_show(struct kobject *kobj, 1366 struct attribute *attr, char *buf) 1367 { 1368 const struct netdev_queue_attribute *attribute 1369 = to_netdev_queue_attr(attr); 1370 struct netdev_queue *queue = to_netdev_queue(kobj); 1371 1372 if (!attribute->show) 1373 return -EIO; 1374 1375 return attribute->show(kobj, attr, queue, buf); 1376 } 1377 1378 static ssize_t netdev_queue_attr_store(struct kobject *kobj, 1379 struct attribute *attr, 1380 const char *buf, size_t count) 1381 { 1382 const struct netdev_queue_attribute *attribute 1383 = to_netdev_queue_attr(attr); 1384 struct netdev_queue *queue = to_netdev_queue(kobj); 1385 1386 if (!attribute->store) 1387 return -EIO; 1388 1389 return attribute->store(kobj, attr, queue, buf, count); 1390 } 1391 1392 static const struct sysfs_ops netdev_queue_sysfs_ops = { 1393 .show = netdev_queue_attr_show, 1394 .store = netdev_queue_attr_store, 1395 }; 1396 1397 static ssize_t tx_timeout_show(struct kobject *kobj, struct attribute *attr, 1398 struct netdev_queue *queue, char *buf) 1399 { 1400 unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout); 1401 1402 return sysfs_emit(buf, fmt_ulong, trans_timeout); 1403 } 1404 1405 static unsigned int get_netdev_queue_index(struct netdev_queue *queue) 1406 { 1407 struct net_device *dev = queue->dev; 1408 unsigned int i; 1409 1410 i = queue - dev->_tx; 1411 BUG_ON(i >= dev->num_tx_queues); 1412 1413 return i; 1414 } 1415 1416 static ssize_t traffic_class_show(struct kobject *kobj, struct attribute *attr, 1417 struct netdev_queue *queue, char *buf) 1418 { 1419 struct net_device *dev = queue->dev; 1420 int num_tc, tc, index, ret; 1421 1422 if (!netif_is_multiqueue(dev)) 1423 return -ENOENT; 1424 1425 ret = sysfs_rtnl_lock(kobj, attr, queue->dev); 1426 if (ret) 1427 return ret; 1428 1429 index = get_netdev_queue_index(queue); 1430 1431 /* If queue belongs to subordinate dev use its TC mapping */ 1432 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1433 1434 num_tc = dev->num_tc; 1435 tc = netdev_txq_to_tc(dev, index); 1436 1437 rtnl_unlock(); 1438 1439 if (tc < 0) 1440 return -EINVAL; 1441 1442 /* We can report the traffic class one of two ways: 1443 * Subordinate device traffic classes are reported with the traffic 1444 * class first, and then the subordinate class so for example TC0 on 1445 * subordinate device 2 will be reported as "0-2". If the queue 1446 * belongs to the root device it will be reported with just the 1447 * traffic class, so just "0" for TC 0 for example. 1448 */ 1449 return num_tc < 0 ? sysfs_emit(buf, "%d%d\n", tc, num_tc) : 1450 sysfs_emit(buf, "%d\n", tc); 1451 } 1452 1453 #ifdef CONFIG_XPS 1454 static ssize_t tx_maxrate_show(struct kobject *kobj, struct attribute *attr, 1455 struct netdev_queue *queue, char *buf) 1456 { 1457 return sysfs_emit(buf, "%lu\n", queue->tx_maxrate); 1458 } 1459 1460 static ssize_t tx_maxrate_store(struct kobject *kobj, struct attribute *attr, 1461 struct netdev_queue *queue, const char *buf, 1462 size_t len) 1463 { 1464 int err, index = get_netdev_queue_index(queue); 1465 struct net_device *dev = queue->dev; 1466 u32 rate = 0; 1467 1468 if (!capable(CAP_NET_ADMIN)) 1469 return -EPERM; 1470 1471 /* The check is also done later; this helps returning early without 1472 * hitting the locking section below. 1473 */ 1474 if (!dev->netdev_ops->ndo_set_tx_maxrate) 1475 return -EOPNOTSUPP; 1476 1477 err = kstrtou32(buf, 10, &rate); 1478 if (err < 0) 1479 return err; 1480 1481 err = sysfs_rtnl_lock(kobj, attr, dev); 1482 if (err) 1483 return err; 1484 1485 err = -EOPNOTSUPP; 1486 if (dev->netdev_ops->ndo_set_tx_maxrate) 1487 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); 1488 1489 if (!err) { 1490 queue->tx_maxrate = rate; 1491 rtnl_unlock(); 1492 return len; 1493 } 1494 1495 rtnl_unlock(); 1496 return err; 1497 } 1498 1499 static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init 1500 = __ATTR_RW(tx_maxrate); 1501 #endif 1502 1503 static struct netdev_queue_attribute queue_trans_timeout __ro_after_init 1504 = __ATTR_RO(tx_timeout); 1505 1506 static struct netdev_queue_attribute queue_traffic_class __ro_after_init 1507 = __ATTR_RO(traffic_class); 1508 1509 #ifdef CONFIG_BQL 1510 /* 1511 * Byte queue limits sysfs structures and functions. 1512 */ 1513 static ssize_t bql_show(char *buf, unsigned int value) 1514 { 1515 return sysfs_emit(buf, "%u\n", value); 1516 } 1517 1518 static ssize_t bql_set(const char *buf, const size_t count, 1519 unsigned int *pvalue) 1520 { 1521 unsigned int value; 1522 int err; 1523 1524 if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { 1525 value = DQL_MAX_LIMIT; 1526 } else { 1527 err = kstrtouint(buf, 10, &value); 1528 if (err < 0) 1529 return err; 1530 if (value > DQL_MAX_LIMIT) 1531 return -EINVAL; 1532 } 1533 1534 *pvalue = value; 1535 1536 return count; 1537 } 1538 1539 static ssize_t bql_show_hold_time(struct kobject *kobj, struct attribute *attr, 1540 struct netdev_queue *queue, char *buf) 1541 { 1542 struct dql *dql = &queue->dql; 1543 1544 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); 1545 } 1546 1547 static ssize_t bql_set_hold_time(struct kobject *kobj, struct attribute *attr, 1548 struct netdev_queue *queue, const char *buf, 1549 size_t len) 1550 { 1551 struct dql *dql = &queue->dql; 1552 unsigned int value; 1553 int err; 1554 1555 err = kstrtouint(buf, 10, &value); 1556 if (err < 0) 1557 return err; 1558 1559 dql->slack_hold_time = msecs_to_jiffies(value); 1560 1561 return len; 1562 } 1563 1564 static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init 1565 = __ATTR(hold_time, 0644, 1566 bql_show_hold_time, bql_set_hold_time); 1567 1568 static ssize_t bql_show_stall_thrs(struct kobject *kobj, struct attribute *attr, 1569 struct netdev_queue *queue, char *buf) 1570 { 1571 struct dql *dql = &queue->dql; 1572 1573 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs)); 1574 } 1575 1576 static ssize_t bql_set_stall_thrs(struct kobject *kobj, struct attribute *attr, 1577 struct netdev_queue *queue, const char *buf, 1578 size_t len) 1579 { 1580 struct dql *dql = &queue->dql; 1581 unsigned int value; 1582 int err; 1583 1584 err = kstrtouint(buf, 10, &value); 1585 if (err < 0) 1586 return err; 1587 1588 value = msecs_to_jiffies(value); 1589 if (value && (value < 4 || value > 4 / 2 * BITS_PER_LONG)) 1590 return -ERANGE; 1591 1592 if (!dql->stall_thrs && value) 1593 dql->last_reap = jiffies; 1594 /* Force last_reap to be live */ 1595 smp_wmb(); 1596 dql->stall_thrs = value; 1597 1598 return len; 1599 } 1600 1601 static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init = 1602 __ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs); 1603 1604 static ssize_t bql_show_stall_max(struct kobject *kobj, struct attribute *attr, 1605 struct netdev_queue *queue, char *buf) 1606 { 1607 return sysfs_emit(buf, "%u\n", READ_ONCE(queue->dql.stall_max)); 1608 } 1609 1610 static ssize_t bql_set_stall_max(struct kobject *kobj, struct attribute *attr, 1611 struct netdev_queue *queue, const char *buf, 1612 size_t len) 1613 { 1614 WRITE_ONCE(queue->dql.stall_max, 0); 1615 return len; 1616 } 1617 1618 static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init = 1619 __ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max); 1620 1621 static ssize_t bql_show_stall_cnt(struct kobject *kobj, struct attribute *attr, 1622 struct netdev_queue *queue, char *buf) 1623 { 1624 struct dql *dql = &queue->dql; 1625 1626 return sysfs_emit(buf, "%lu\n", dql->stall_cnt); 1627 } 1628 1629 static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init = 1630 __ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL); 1631 1632 static ssize_t bql_show_inflight(struct kobject *kobj, struct attribute *attr, 1633 struct netdev_queue *queue, char *buf) 1634 { 1635 struct dql *dql = &queue->dql; 1636 1637 return sysfs_emit(buf, "%u\n", dql->num_queued - dql->num_completed); 1638 } 1639 1640 static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = 1641 __ATTR(inflight, 0444, bql_show_inflight, NULL); 1642 1643 #define BQL_ATTR(NAME, FIELD) \ 1644 static ssize_t bql_show_ ## NAME(struct kobject *kobj, \ 1645 struct attribute *attr, \ 1646 struct netdev_queue *queue, char *buf) \ 1647 { \ 1648 return bql_show(buf, queue->dql.FIELD); \ 1649 } \ 1650 \ 1651 static ssize_t bql_set_ ## NAME(struct kobject *kobj, \ 1652 struct attribute *attr, \ 1653 struct netdev_queue *queue, \ 1654 const char *buf, size_t len) \ 1655 { \ 1656 return bql_set(buf, len, &queue->dql.FIELD); \ 1657 } \ 1658 \ 1659 static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ 1660 = __ATTR(NAME, 0644, \ 1661 bql_show_ ## NAME, bql_set_ ## NAME) 1662 1663 BQL_ATTR(limit, limit); 1664 BQL_ATTR(limit_max, max_limit); 1665 BQL_ATTR(limit_min, min_limit); 1666 1667 static struct attribute *dql_attrs[] __ro_after_init = { 1668 &bql_limit_attribute.attr, 1669 &bql_limit_max_attribute.attr, 1670 &bql_limit_min_attribute.attr, 1671 &bql_hold_time_attribute.attr, 1672 &bql_inflight_attribute.attr, 1673 &bql_stall_thrs_attribute.attr, 1674 &bql_stall_cnt_attribute.attr, 1675 &bql_stall_max_attribute.attr, 1676 NULL 1677 }; 1678 1679 static const struct attribute_group dql_group = { 1680 .name = "byte_queue_limits", 1681 .attrs = dql_attrs, 1682 }; 1683 #else 1684 /* Fake declaration, all the code using it should be dead */ 1685 static const struct attribute_group dql_group = {}; 1686 #endif /* CONFIG_BQL */ 1687 1688 #ifdef CONFIG_XPS 1689 static ssize_t xps_queue_show(struct net_device *dev, unsigned int index, 1690 int tc, char *buf, enum xps_map_type type) 1691 { 1692 struct xps_dev_maps *dev_maps; 1693 unsigned long *mask; 1694 unsigned int nr_ids; 1695 int j, len; 1696 1697 rcu_read_lock(); 1698 dev_maps = rcu_dereference(dev->xps_maps[type]); 1699 1700 /* Default to nr_cpu_ids/dev->num_rx_queues and do not just return 0 1701 * when dev_maps hasn't been allocated yet, to be backward compatible. 1702 */ 1703 nr_ids = dev_maps ? dev_maps->nr_ids : 1704 (type == XPS_CPUS ? nr_cpu_ids : dev->num_rx_queues); 1705 1706 mask = bitmap_zalloc(nr_ids, GFP_NOWAIT); 1707 if (!mask) { 1708 rcu_read_unlock(); 1709 return -ENOMEM; 1710 } 1711 1712 if (!dev_maps || tc >= dev_maps->num_tc) 1713 goto out_no_maps; 1714 1715 for (j = 0; j < nr_ids; j++) { 1716 int i, tci = j * dev_maps->num_tc + tc; 1717 struct xps_map *map; 1718 1719 map = rcu_dereference(dev_maps->attr_map[tci]); 1720 if (!map) 1721 continue; 1722 1723 for (i = map->len; i--;) { 1724 if (map->queues[i] == index) { 1725 __set_bit(j, mask); 1726 break; 1727 } 1728 } 1729 } 1730 out_no_maps: 1731 rcu_read_unlock(); 1732 1733 len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids); 1734 bitmap_free(mask); 1735 1736 return len < PAGE_SIZE ? len : -EINVAL; 1737 } 1738 1739 static ssize_t xps_cpus_show(struct kobject *kobj, struct attribute *attr, 1740 struct netdev_queue *queue, char *buf) 1741 { 1742 struct net_device *dev = queue->dev; 1743 unsigned int index; 1744 int len, tc, ret; 1745 1746 if (!netif_is_multiqueue(dev)) 1747 return -ENOENT; 1748 1749 index = get_netdev_queue_index(queue); 1750 1751 ret = sysfs_rtnl_lock(kobj, attr, queue->dev); 1752 if (ret) 1753 return ret; 1754 1755 /* If queue belongs to subordinate dev use its map */ 1756 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1757 1758 tc = netdev_txq_to_tc(dev, index); 1759 if (tc < 0) { 1760 rtnl_unlock(); 1761 return -EINVAL; 1762 } 1763 1764 /* Increase the net device refcnt to make sure it won't be freed while 1765 * xps_queue_show is running. 1766 */ 1767 dev_hold(dev); 1768 rtnl_unlock(); 1769 1770 len = xps_queue_show(dev, index, tc, buf, XPS_CPUS); 1771 1772 dev_put(dev); 1773 return len; 1774 } 1775 1776 static ssize_t xps_cpus_store(struct kobject *kobj, struct attribute *attr, 1777 struct netdev_queue *queue, const char *buf, 1778 size_t len) 1779 { 1780 struct net_device *dev = queue->dev; 1781 unsigned int index; 1782 cpumask_var_t mask; 1783 int err; 1784 1785 if (!netif_is_multiqueue(dev)) 1786 return -ENOENT; 1787 1788 if (!capable(CAP_NET_ADMIN)) 1789 return -EPERM; 1790 1791 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 1792 return -ENOMEM; 1793 1794 index = get_netdev_queue_index(queue); 1795 1796 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 1797 if (err) { 1798 free_cpumask_var(mask); 1799 return err; 1800 } 1801 1802 err = sysfs_rtnl_lock(kobj, attr, dev); 1803 if (err) { 1804 free_cpumask_var(mask); 1805 return err; 1806 } 1807 1808 err = netif_set_xps_queue(dev, mask, index); 1809 rtnl_unlock(); 1810 1811 free_cpumask_var(mask); 1812 1813 return err ? : len; 1814 } 1815 1816 static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init 1817 = __ATTR_RW(xps_cpus); 1818 1819 static ssize_t xps_rxqs_show(struct kobject *kobj, struct attribute *attr, 1820 struct netdev_queue *queue, char *buf) 1821 { 1822 struct net_device *dev = queue->dev; 1823 unsigned int index; 1824 int tc, ret; 1825 1826 index = get_netdev_queue_index(queue); 1827 1828 ret = sysfs_rtnl_lock(kobj, attr, dev); 1829 if (ret) 1830 return ret; 1831 1832 tc = netdev_txq_to_tc(dev, index); 1833 1834 /* Increase the net device refcnt to make sure it won't be freed while 1835 * xps_queue_show is running. 1836 */ 1837 dev_hold(dev); 1838 rtnl_unlock(); 1839 1840 ret = tc >= 0 ? xps_queue_show(dev, index, tc, buf, XPS_RXQS) : -EINVAL; 1841 dev_put(dev); 1842 return ret; 1843 } 1844 1845 static ssize_t xps_rxqs_store(struct kobject *kobj, struct attribute *attr, 1846 struct netdev_queue *queue, const char *buf, 1847 size_t len) 1848 { 1849 struct net_device *dev = queue->dev; 1850 struct net *net = dev_net(dev); 1851 unsigned long *mask; 1852 unsigned int index; 1853 int err; 1854 1855 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1856 return -EPERM; 1857 1858 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); 1859 if (!mask) 1860 return -ENOMEM; 1861 1862 index = get_netdev_queue_index(queue); 1863 1864 err = bitmap_parse(buf, len, mask, dev->num_rx_queues); 1865 if (err) { 1866 bitmap_free(mask); 1867 return err; 1868 } 1869 1870 err = sysfs_rtnl_lock(kobj, attr, dev); 1871 if (err) { 1872 bitmap_free(mask); 1873 return err; 1874 } 1875 1876 cpus_read_lock(); 1877 err = __netif_set_xps_queue(dev, mask, index, XPS_RXQS); 1878 cpus_read_unlock(); 1879 1880 rtnl_unlock(); 1881 1882 bitmap_free(mask); 1883 return err ? : len; 1884 } 1885 1886 static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init 1887 = __ATTR_RW(xps_rxqs); 1888 #endif /* CONFIG_XPS */ 1889 1890 static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { 1891 &queue_trans_timeout.attr, 1892 &queue_traffic_class.attr, 1893 #ifdef CONFIG_XPS 1894 &xps_cpus_attribute.attr, 1895 &xps_rxqs_attribute.attr, 1896 &queue_tx_maxrate.attr, 1897 #endif 1898 NULL 1899 }; 1900 ATTRIBUTE_GROUPS(netdev_queue_default); 1901 1902 static void netdev_queue_release(struct kobject *kobj) 1903 { 1904 struct netdev_queue *queue = to_netdev_queue(kobj); 1905 1906 memset(kobj, 0, sizeof(*kobj)); 1907 netdev_put(queue->dev, &queue->dev_tracker); 1908 } 1909 1910 static const void *netdev_queue_namespace(const struct kobject *kobj) 1911 { 1912 struct netdev_queue *queue = to_netdev_queue(kobj); 1913 struct device *dev = &queue->dev->dev; 1914 const void *ns = NULL; 1915 1916 if (dev->class && dev->class->namespace) 1917 ns = dev->class->namespace(dev); 1918 1919 return ns; 1920 } 1921 1922 static void netdev_queue_get_ownership(const struct kobject *kobj, 1923 kuid_t *uid, kgid_t *gid) 1924 { 1925 const struct net *net = netdev_queue_namespace(kobj); 1926 1927 net_ns_get_ownership(net, uid, gid); 1928 } 1929 1930 static const struct kobj_type netdev_queue_ktype = { 1931 .sysfs_ops = &netdev_queue_sysfs_ops, 1932 .release = netdev_queue_release, 1933 .namespace = netdev_queue_namespace, 1934 .get_ownership = netdev_queue_get_ownership, 1935 }; 1936 1937 static bool netdev_uses_bql(const struct net_device *dev) 1938 { 1939 if (dev->lltx || (dev->priv_flags & IFF_NO_QUEUE)) 1940 return false; 1941 1942 return IS_ENABLED(CONFIG_BQL); 1943 } 1944 1945 static int netdev_queue_add_kobject(struct net_device *dev, int index) 1946 { 1947 struct netdev_queue *queue = dev->_tx + index; 1948 struct kobject *kobj = &queue->kobj; 1949 int error = 0; 1950 1951 /* Tx queues are cleared in netdev_queue_release to allow later 1952 * re-registration. This is triggered when their kobj refcount is 1953 * dropped. 1954 * 1955 * If a queue is removed while both a read (or write) operation and a 1956 * the re-addition of the same queue are pending (waiting on rntl_lock) 1957 * it might happen that the re-addition will execute before the read, 1958 * making the initial removal to never happen (queue's kobj refcount 1959 * won't drop enough because of the pending read). In such rare case, 1960 * return to allow the removal operation to complete. 1961 */ 1962 if (unlikely(kobj->state_initialized)) { 1963 netdev_warn_once(dev, "Cannot re-add tx queues before their removal completed"); 1964 return -EAGAIN; 1965 } 1966 1967 /* Kobject_put later will trigger netdev_queue_release call 1968 * which decreases dev refcount: Take that reference here 1969 */ 1970 netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); 1971 1972 kobj->kset = dev->queues_kset; 1973 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, 1974 "tx-%u", index); 1975 if (error) 1976 goto err; 1977 1978 queue->groups = netdev_queue_default_groups; 1979 error = sysfs_create_groups(kobj, queue->groups); 1980 if (error) 1981 goto err; 1982 1983 if (netdev_uses_bql(dev)) { 1984 error = sysfs_create_group(kobj, &dql_group); 1985 if (error) 1986 goto err_default_groups; 1987 } 1988 1989 kobject_uevent(kobj, KOBJ_ADD); 1990 return 0; 1991 1992 err_default_groups: 1993 sysfs_remove_groups(kobj, queue->groups); 1994 err: 1995 kobject_put(kobj); 1996 return error; 1997 } 1998 1999 static int tx_queue_change_owner(struct net_device *ndev, int index, 2000 kuid_t kuid, kgid_t kgid) 2001 { 2002 struct netdev_queue *queue = ndev->_tx + index; 2003 struct kobject *kobj = &queue->kobj; 2004 int error; 2005 2006 error = sysfs_change_owner(kobj, kuid, kgid); 2007 if (error) 2008 return error; 2009 2010 if (netdev_uses_bql(ndev)) 2011 error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid); 2012 2013 return error; 2014 } 2015 #endif /* CONFIG_SYSFS */ 2016 2017 int 2018 netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 2019 { 2020 #ifdef CONFIG_SYSFS 2021 int i; 2022 int error = 0; 2023 2024 /* Tx queue kobjects are allowed to be updated when a device is being 2025 * unregistered, but solely to remove queues from qdiscs. Any path 2026 * adding queues should be fixed. 2027 */ 2028 WARN(dev->reg_state == NETREG_UNREGISTERING && new_num > old_num, 2029 "New queues can't be registered after device unregistration."); 2030 2031 for (i = old_num; i < new_num; i++) { 2032 error = netdev_queue_add_kobject(dev, i); 2033 if (error) { 2034 new_num = old_num; 2035 break; 2036 } 2037 } 2038 2039 while (--i >= new_num) { 2040 struct netdev_queue *queue = dev->_tx + i; 2041 2042 if (!refcount_read(&dev_net(dev)->ns.count)) 2043 queue->kobj.uevent_suppress = 1; 2044 2045 if (netdev_uses_bql(dev)) 2046 sysfs_remove_group(&queue->kobj, &dql_group); 2047 2048 sysfs_remove_groups(&queue->kobj, queue->groups); 2049 kobject_put(&queue->kobj); 2050 } 2051 2052 return error; 2053 #else 2054 return 0; 2055 #endif /* CONFIG_SYSFS */ 2056 } 2057 2058 static int net_tx_queue_change_owner(struct net_device *dev, int num, 2059 kuid_t kuid, kgid_t kgid) 2060 { 2061 #ifdef CONFIG_SYSFS 2062 int error = 0; 2063 int i; 2064 2065 for (i = 0; i < num; i++) { 2066 error = tx_queue_change_owner(dev, i, kuid, kgid); 2067 if (error) 2068 break; 2069 } 2070 2071 return error; 2072 #else 2073 return 0; 2074 #endif /* CONFIG_SYSFS */ 2075 } 2076 2077 static int register_queue_kobjects(struct net_device *dev) 2078 { 2079 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; 2080 2081 #ifdef CONFIG_SYSFS 2082 dev->queues_kset = kset_create_and_add("queues", 2083 NULL, &dev->dev.kobj); 2084 if (!dev->queues_kset) 2085 return -ENOMEM; 2086 real_rx = dev->real_num_rx_queues; 2087 #endif 2088 real_tx = dev->real_num_tx_queues; 2089 2090 error = net_rx_queue_update_kobjects(dev, 0, real_rx); 2091 if (error) 2092 goto error; 2093 rxq = real_rx; 2094 2095 error = netdev_queue_update_kobjects(dev, 0, real_tx); 2096 if (error) 2097 goto error; 2098 txq = real_tx; 2099 2100 return 0; 2101 2102 error: 2103 netdev_queue_update_kobjects(dev, txq, 0); 2104 net_rx_queue_update_kobjects(dev, rxq, 0); 2105 #ifdef CONFIG_SYSFS 2106 kset_unregister(dev->queues_kset); 2107 #endif 2108 return error; 2109 } 2110 2111 static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid) 2112 { 2113 int error = 0, real_rx = 0, real_tx = 0; 2114 2115 #ifdef CONFIG_SYSFS 2116 if (ndev->queues_kset) { 2117 error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid); 2118 if (error) 2119 return error; 2120 } 2121 real_rx = ndev->real_num_rx_queues; 2122 #endif 2123 real_tx = ndev->real_num_tx_queues; 2124 2125 error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid); 2126 if (error) 2127 return error; 2128 2129 error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid); 2130 if (error) 2131 return error; 2132 2133 return 0; 2134 } 2135 2136 static void remove_queue_kobjects(struct net_device *dev) 2137 { 2138 int real_rx = 0, real_tx = 0; 2139 2140 #ifdef CONFIG_SYSFS 2141 real_rx = dev->real_num_rx_queues; 2142 #endif 2143 real_tx = dev->real_num_tx_queues; 2144 2145 net_rx_queue_update_kobjects(dev, real_rx, 0); 2146 netdev_queue_update_kobjects(dev, real_tx, 0); 2147 2148 dev->real_num_rx_queues = 0; 2149 dev->real_num_tx_queues = 0; 2150 #ifdef CONFIG_SYSFS 2151 kset_unregister(dev->queues_kset); 2152 #endif 2153 } 2154 2155 static bool net_current_may_mount(void) 2156 { 2157 struct net *net = current->nsproxy->net_ns; 2158 2159 return ns_capable(net->user_ns, CAP_SYS_ADMIN); 2160 } 2161 2162 static void *net_grab_current_ns(void) 2163 { 2164 struct net *ns = current->nsproxy->net_ns; 2165 #ifdef CONFIG_NET_NS 2166 if (ns) 2167 refcount_inc(&ns->passive); 2168 #endif 2169 return ns; 2170 } 2171 2172 static const void *net_initial_ns(void) 2173 { 2174 return &init_net; 2175 } 2176 2177 static const void *net_netlink_ns(struct sock *sk) 2178 { 2179 return sock_net(sk); 2180 } 2181 2182 const struct kobj_ns_type_operations net_ns_type_operations = { 2183 .type = KOBJ_NS_TYPE_NET, 2184 .current_may_mount = net_current_may_mount, 2185 .grab_current_ns = net_grab_current_ns, 2186 .netlink_ns = net_netlink_ns, 2187 .initial_ns = net_initial_ns, 2188 .drop_ns = net_drop_ns, 2189 }; 2190 EXPORT_SYMBOL_GPL(net_ns_type_operations); 2191 2192 static int netdev_uevent(const struct device *d, struct kobj_uevent_env *env) 2193 { 2194 const struct net_device *dev = to_net_dev(d); 2195 int retval; 2196 2197 /* pass interface to uevent. */ 2198 retval = add_uevent_var(env, "INTERFACE=%s", dev->name); 2199 if (retval) 2200 goto exit; 2201 2202 /* pass ifindex to uevent. 2203 * ifindex is useful as it won't change (interface name may change) 2204 * and is what RtNetlink uses natively. 2205 */ 2206 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); 2207 2208 exit: 2209 return retval; 2210 } 2211 2212 /* 2213 * netdev_release -- destroy and free a dead device. 2214 * Called when last reference to device kobject is gone. 2215 */ 2216 static void netdev_release(struct device *d) 2217 { 2218 struct net_device *dev = to_net_dev(d); 2219 2220 BUG_ON(dev->reg_state != NETREG_RELEASED); 2221 2222 /* no need to wait for rcu grace period: 2223 * device is dead and about to be freed. 2224 */ 2225 kfree(rcu_access_pointer(dev->ifalias)); 2226 kvfree(dev); 2227 } 2228 2229 static const void *net_namespace(const struct device *d) 2230 { 2231 const struct net_device *dev = to_net_dev(d); 2232 2233 return dev_net(dev); 2234 } 2235 2236 static void net_get_ownership(const struct device *d, kuid_t *uid, kgid_t *gid) 2237 { 2238 const struct net_device *dev = to_net_dev(d); 2239 const struct net *net = dev_net(dev); 2240 2241 net_ns_get_ownership(net, uid, gid); 2242 } 2243 2244 static const struct class net_class = { 2245 .name = "net", 2246 .dev_release = netdev_release, 2247 .dev_groups = net_class_groups, 2248 .dev_uevent = netdev_uevent, 2249 .ns_type = &net_ns_type_operations, 2250 .namespace = net_namespace, 2251 .get_ownership = net_get_ownership, 2252 }; 2253 2254 #ifdef CONFIG_OF 2255 static int of_dev_node_match(struct device *dev, const void *data) 2256 { 2257 for (; dev; dev = dev->parent) { 2258 if (dev->of_node == data) 2259 return 1; 2260 } 2261 2262 return 0; 2263 } 2264 2265 /* 2266 * of_find_net_device_by_node - lookup the net device for the device node 2267 * @np: OF device node 2268 * 2269 * Looks up the net_device structure corresponding with the device node. 2270 * If successful, returns a pointer to the net_device with the embedded 2271 * struct device refcount incremented by one, or NULL on failure. The 2272 * refcount must be dropped when done with the net_device. 2273 */ 2274 struct net_device *of_find_net_device_by_node(struct device_node *np) 2275 { 2276 struct device *dev; 2277 2278 dev = class_find_device(&net_class, NULL, np, of_dev_node_match); 2279 if (!dev) 2280 return NULL; 2281 2282 return to_net_dev(dev); 2283 } 2284 EXPORT_SYMBOL(of_find_net_device_by_node); 2285 #endif 2286 2287 /* Delete sysfs entries but hold kobject reference until after all 2288 * netdev references are gone. 2289 */ 2290 void netdev_unregister_kobject(struct net_device *ndev) 2291 { 2292 struct device *dev = &ndev->dev; 2293 2294 if (!refcount_read(&dev_net(ndev)->ns.count)) 2295 dev_set_uevent_suppress(dev, 1); 2296 2297 kobject_get(&dev->kobj); 2298 2299 remove_queue_kobjects(ndev); 2300 2301 pm_runtime_set_memalloc_noio(dev, false); 2302 2303 device_del(dev); 2304 } 2305 2306 /* Create sysfs entries for network device. */ 2307 int netdev_register_kobject(struct net_device *ndev) 2308 { 2309 struct device *dev = &ndev->dev; 2310 const struct attribute_group **groups = ndev->sysfs_groups; 2311 int error = 0; 2312 2313 device_initialize(dev); 2314 dev->class = &net_class; 2315 dev->platform_data = ndev; 2316 dev->groups = groups; 2317 2318 dev_set_name(dev, "%s", ndev->name); 2319 2320 #ifdef CONFIG_SYSFS 2321 /* Allow for a device specific group */ 2322 if (*groups) 2323 groups++; 2324 2325 *groups++ = &netstat_group; 2326 2327 if (wireless_group_needed(ndev)) 2328 *groups++ = &wireless_group; 2329 #endif /* CONFIG_SYSFS */ 2330 2331 error = device_add(dev); 2332 if (error) 2333 return error; 2334 2335 error = register_queue_kobjects(ndev); 2336 if (error) { 2337 device_del(dev); 2338 return error; 2339 } 2340 2341 pm_runtime_set_memalloc_noio(dev, true); 2342 2343 return error; 2344 } 2345 2346 /* Change owner for sysfs entries when moving network devices across network 2347 * namespaces owned by different user namespaces. 2348 */ 2349 int netdev_change_owner(struct net_device *ndev, const struct net *net_old, 2350 const struct net *net_new) 2351 { 2352 kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID; 2353 kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID; 2354 struct device *dev = &ndev->dev; 2355 int error; 2356 2357 net_ns_get_ownership(net_old, &old_uid, &old_gid); 2358 net_ns_get_ownership(net_new, &new_uid, &new_gid); 2359 2360 /* The network namespace was changed but the owning user namespace is 2361 * identical so there's no need to change the owner of sysfs entries. 2362 */ 2363 if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid)) 2364 return 0; 2365 2366 error = device_change_owner(dev, new_uid, new_gid); 2367 if (error) 2368 return error; 2369 2370 error = queue_change_owner(ndev, new_uid, new_gid); 2371 if (error) 2372 return error; 2373 2374 return 0; 2375 } 2376 2377 int netdev_class_create_file_ns(const struct class_attribute *class_attr, 2378 const void *ns) 2379 { 2380 return class_create_file_ns(&net_class, class_attr, ns); 2381 } 2382 EXPORT_SYMBOL(netdev_class_create_file_ns); 2383 2384 void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 2385 const void *ns) 2386 { 2387 class_remove_file_ns(&net_class, class_attr, ns); 2388 } 2389 EXPORT_SYMBOL(netdev_class_remove_file_ns); 2390 2391 int __init netdev_kobject_init(void) 2392 { 2393 kobj_ns_type_register(&net_ns_type_operations); 2394 return class_register(&net_class); 2395 } 2396